From 9bc0d5161edb00140addc1d06271baf01ede4c54 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 1 Apr 2025 18:56:06 +1100 Subject: [PATCH 01/35] Disable LevelDB snappy feature (#7235) Disable the `snappy` feature of LevelDB to prevent compilation issues with CMake 4.0, e.g. https://github.com/sigp/lighthouse/actions/runs/14182783816/job/39732457274?pr=7231 We do not use Snappy compression in LevelDB, and do not need to compile this. This might also shave a few seconds off compilation! --- beacon_node/store/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index d2f3a5c562..57330eb583 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -22,7 +22,7 @@ directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } itertools = { workspace = true } -leveldb = { version = "0.8.6", optional = true } +leveldb = { version = "0.8.6", optional = true, default-features = false } logging = { workspace = true } lru = { workspace = true } metrics = { workspace = true } From 4839ed620fa952959a93da6eb54f06c2c0805136 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 1 Apr 2025 21:51:09 +1100 Subject: [PATCH 02/35] Tracing cleanup (#7168) #7153 #7146 #7147 #7148 -> Thanks to @ackintosh This PR does the following: 1. Disable logging to file when using either `--logfile-max-number 0` or `--logfile-max-size 0`. Note that disabling the log file in this way will also disable `discv5` and `libp2p` logging. 1. `discv5` and `libp2p` logging will be disabled by default unless running `beacon_node` or `boot_node`. This also should fix the VC panic we were seeing. 1. Removes log rotation and compression from `libp2p` and `discv5` logs. It is now limited to 1 file and will rotate based on the value of the `--logfile-max-size` flag. We could potentially add flags specifically to control the size/number of these, however I felt a single log file was sufficient. Perhaps @AgeManning has opinions about this? 1. Removes all dependency logging and references to `dep_log`. 1. Introduces workspace filtering to file and stdout. This explicitly allows logs from members of the Lighthouse workspace, disallowing all others. It uses a proc macro which pulls the member list from cargo metadata at compile time. This might be over-engineered but my hope is that this list will not require maintenance. 1. Unifies file and stdout JSON format. With slog, the formats were slightly different. @threehrsleep worked to maintain that format difference, to ensure there was no breaking changes. If these format differences are actually problematic we can restore it, however I felt the added complexity wasn't worth it. 1. General code improvements and cleanup. --- Cargo.lock | 29 +++- Cargo.toml | 3 + common/logging/Cargo.toml | 3 +- common/logging/src/lib.rs | 142 +----------------- .../tracing_libp2p_discv5_logging_layer.rs | 113 ++++++++++++++ common/logging/src/tracing_logging_layer.rs | 116 +++----------- common/logging/src/utils.rs | 31 ++++ common/workspace_members/Cargo.toml | 11 ++ common/workspace_members/src/lib.rs | 39 +++++ lighthouse/environment/src/lib.rs | 86 ++++------- lighthouse/environment/src/tracing_common.rs | 78 +++++----- lighthouse/src/main.rs | 70 ++++++--- testing/simulator/src/basic_sim.rs | 12 +- testing/simulator/src/fallback_sim.rs | 14 +- 14 files changed, 382 insertions(+), 365 deletions(-) create mode 100644 common/logging/src/tracing_libp2p_discv5_logging_layer.rs create mode 100644 common/logging/src/utils.rs create mode 100644 common/workspace_members/Cargo.toml create mode 100644 common/workspace_members/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index b3b4069e8c..e6fca4c052 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1187,6 +1187,20 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.26", + "serde", + "serde_json", + "thiserror 2.0.12", +] + [[package]] name = "cast" version = "0.3.0" @@ -1915,7 +1929,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.100", ] [[package]] @@ -2853,7 +2867,7 @@ checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", - "cargo_metadata", + "cargo_metadata 0.15.4", "chrono", "elliptic-curve 0.12.3", "ethabi 18.0.0", @@ -4761,7 +4775,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -5458,6 +5472,7 @@ dependencies = [ "tracing-core", "tracing-log", "tracing-subscriber", + "workspace_members", ] [[package]] @@ -10152,6 +10167,14 @@ dependencies = [ "bitflags 2.9.0", ] +[[package]] +name = "workspace_members" +version = "0.1.0" +dependencies = [ + "cargo_metadata 0.19.2", + "quote", +] + [[package]] name = "write16" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 5284713fc2..de5d6b541e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ members = [ "common/unused_port", "common/validator_dir", "common/warp_utils", + "common/workspace_members", "consensus/fixed_bytes", "consensus/fork_choice", @@ -120,6 +121,7 @@ bincode = "1" bitvec = "1" byteorder = "1" bytes = "1" +cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. @@ -246,6 +248,7 @@ kzg = { path = "crypto/kzg" } metrics = { path = "common/metrics" } lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_version = { path = "common/lighthouse_version" } +workspace_members = { path = "common/workspace_members" } lockfile = { path = "common/lockfile" } logging = { path = "common/logging" } lru_cache = { path = "common/lru_cache" } diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index a69bc6ab23..6975e04505 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -16,8 +16,9 @@ parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true, features = [ "time" ] } -tracing = "0.1" +tracing = { workspace = true } tracing-appender = { workspace = true } tracing-core = { workspace = true } tracing-log = { workspace = true } tracing-subscriber = { workspace = true } +workspace_members = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 403f682a06..5c4de1fd61 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,24 +1,24 @@ -use chrono::Local; -use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize}; use metrics::{try_create_int_counter, IntCounter, Result as MetricsResult}; -use std::io::Write; -use std::path::PathBuf; use std::sync::LazyLock; use std::time::{Duration, Instant}; -use tracing::Subscriber; -use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; -use tracing_subscriber::layer::Context; -use tracing_subscriber::{EnvFilter, Layer}; +use tracing_subscriber::EnvFilter; pub const MAX_MESSAGE_WIDTH: usize = 40; pub mod macros; mod sse_logging_components; +mod tracing_libp2p_discv5_logging_layer; pub mod tracing_logging_layer; mod tracing_metrics_layer; +mod utils; pub use sse_logging_components::SSELoggingComponents; +pub use tracing_libp2p_discv5_logging_layer::{ + create_libp2p_discv5_tracing_layer, Libp2pDiscv5TracingLayer, +}; +pub use tracing_logging_layer::LoggingLayer; pub use tracing_metrics_layer::MetricsLayer; +pub use utils::build_workspace_filter; /// The minimum interval between log messages indicating that a queue is full. const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30); @@ -51,132 +51,6 @@ impl TimeLatch { } } -pub struct Libp2pDiscv5TracingLayer { - pub libp2p_non_blocking_writer: NonBlocking, - pub _libp2p_guard: WorkerGuard, - pub discv5_non_blocking_writer: NonBlocking, - pub _discv5_guard: WorkerGuard, -} - -impl Layer for Libp2pDiscv5TracingLayer -where - S: Subscriber, -{ - fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context) { - let meta = event.metadata(); - let log_level = meta.level(); - let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); - - let target = match meta.target().split_once("::") { - Some((crate_name, _)) => crate_name, - None => "unknown", - }; - - let mut writer = match target { - "gossipsub" => self.libp2p_non_blocking_writer.clone(), - "discv5" => self.discv5_non_blocking_writer.clone(), - _ => return, - }; - - let mut visitor = LogMessageExtractor { - message: String::default(), - }; - - event.record(&mut visitor); - let message = format!("{} {} {}\n", timestamp, log_level, visitor.message); - - if let Err(e) = writer.write_all(message.as_bytes()) { - eprintln!("Failed to write log: {}", e); - } - } -} - -struct LogMessageExtractor { - message: String, -} - -impl tracing_core::field::Visit for LogMessageExtractor { - fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) { - self.message = format!("{} {:?}", self.message, value); - } -} - -pub fn create_libp2p_discv5_tracing_layer( - base_tracing_log_path: Option, - max_log_size: u64, - compression: bool, - max_log_number: usize, -) -> Libp2pDiscv5TracingLayer { - if let Some(mut tracing_log_path) = base_tracing_log_path { - // Ensure that `tracing_log_path` only contains directories. - for p in tracing_log_path.clone().iter() { - tracing_log_path = tracing_log_path.join(p); - if let Ok(metadata) = tracing_log_path.metadata() { - if !metadata.is_dir() { - tracing_log_path.pop(); - break; - } - } - } - - let mut libp2p_writer = - LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log")) - .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) - .max_keep_files(max_log_number.try_into().unwrap_or_else(|e| { - eprintln!("Failed to convert max_log_number to u64: {}", e); - 10 - })); - - let mut discv5_writer = - LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log")) - .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) - .max_keep_files(max_log_number.try_into().unwrap_or_else(|e| { - eprintln!("Failed to convert max_log_number to u64: {}", e); - 10 - })); - - if compression { - libp2p_writer = libp2p_writer.compression(Compression::Gzip); - discv5_writer = discv5_writer.compression(Compression::Gzip); - } - - let libp2p_writer = match libp2p_writer.build() { - Ok(writer) => writer, - Err(e) => { - eprintln!("Failed to initialize libp2p rolling file appender: {e}"); - std::process::exit(1); - } - }; - - let discv5_writer = match discv5_writer.build() { - Ok(writer) => writer, - Err(e) => { - eprintln!("Failed to initialize discv5 rolling file appender: {e}"); - std::process::exit(1); - } - }; - - let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); - let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer); - - Libp2pDiscv5TracingLayer { - libp2p_non_blocking_writer, - _libp2p_guard, - discv5_non_blocking_writer, - _discv5_guard, - } - } else { - let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(std::io::sink()); - let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(std::io::sink()); - Libp2pDiscv5TracingLayer { - libp2p_non_blocking_writer, - _libp2p_guard, - discv5_non_blocking_writer, - _discv5_guard, - } - } -} - /// Return a tracing subscriber suitable for test usage. /// /// By default no logs will be printed, but they can be enabled via diff --git a/common/logging/src/tracing_libp2p_discv5_logging_layer.rs b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs new file mode 100644 index 0000000000..90033d11ad --- /dev/null +++ b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs @@ -0,0 +1,113 @@ +use chrono::Local; +use logroller::{LogRollerBuilder, Rotation, RotationSize}; +use std::io::Write; +use std::path::PathBuf; +use tracing::Subscriber; +use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; +use tracing_subscriber::{layer::Context, Layer}; + +pub struct Libp2pDiscv5TracingLayer { + pub libp2p_non_blocking_writer: NonBlocking, + _libp2p_guard: WorkerGuard, + pub discv5_non_blocking_writer: NonBlocking, + _discv5_guard: WorkerGuard, +} + +impl Layer for Libp2pDiscv5TracingLayer +where + S: Subscriber, +{ + fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context) { + let meta = event.metadata(); + let log_level = meta.level(); + let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); + + let target = match meta.target().split_once("::") { + Some((crate_name, _)) => crate_name, + None => "unknown", + }; + + let mut writer = match target { + "libp2p_gossipsub" => self.libp2p_non_blocking_writer.clone(), + "discv5" => self.discv5_non_blocking_writer.clone(), + _ => return, + }; + + let mut visitor = LogMessageExtractor { + message: String::default(), + }; + + event.record(&mut visitor); + let message = format!("{} {} {}\n", timestamp, log_level, visitor.message); + + if let Err(e) = writer.write_all(message.as_bytes()) { + eprintln!("Failed to write log: {}", e); + } + } +} + +struct LogMessageExtractor { + message: String, +} + +impl tracing_core::field::Visit for LogMessageExtractor { + fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) { + self.message = format!("{} {:?}", self.message, value); + } +} + +pub fn create_libp2p_discv5_tracing_layer( + base_tracing_log_path: Option, + max_log_size: u64, +) -> Option { + if let Some(mut tracing_log_path) = base_tracing_log_path { + // Ensure that `tracing_log_path` only contains directories. + for p in tracing_log_path.clone().iter() { + tracing_log_path = tracing_log_path.join(p); + if let Ok(metadata) = tracing_log_path.metadata() { + if !metadata.is_dir() { + tracing_log_path.pop(); + break; + } + } + } + + let libp2p_writer = + LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log")) + .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) + .max_keep_files(1); + + let discv5_writer = + LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log")) + .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) + .max_keep_files(1); + + let libp2p_writer = match libp2p_writer.build() { + Ok(writer) => writer, + Err(e) => { + eprintln!("Failed to initialize libp2p rolling file appender: {e}"); + std::process::exit(1); + } + }; + + let discv5_writer = match discv5_writer.build() { + Ok(writer) => writer, + Err(e) => { + eprintln!("Failed to initialize discv5 rolling file appender: {e}"); + std::process::exit(1); + } + }; + + let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); + let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer); + + Some(Libp2pDiscv5TracingLayer { + libp2p_non_blocking_writer, + _libp2p_guard, + discv5_non_blocking_writer, + _discv5_guard, + }) + } else { + None + } +} diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index 4478e1facb..810f7e960e 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -1,3 +1,5 @@ +use crate::utils::is_ascii_control; + use chrono::prelude::*; use serde_json::{Map, Value}; use std::collections::HashMap; @@ -13,14 +15,11 @@ use tracing_subscriber::Layer; pub struct LoggingLayer { pub non_blocking_writer: NonBlocking, - pub guard: WorkerGuard, + _guard: WorkerGuard, pub disable_log_timestamp: bool, pub log_color: bool, - pub logfile_color: bool, pub log_format: Option, - pub logfile_format: Option, pub extra_info: bool, - pub dep_logs: bool, span_fields: Arc>>, } @@ -28,25 +27,19 @@ impl LoggingLayer { #[allow(clippy::too_many_arguments)] pub fn new( non_blocking_writer: NonBlocking, - guard: WorkerGuard, + _guard: WorkerGuard, disable_log_timestamp: bool, log_color: bool, - logfile_color: bool, log_format: Option, - logfile_format: Option, extra_info: bool, - dep_logs: bool, ) -> Self { Self { non_blocking_writer, - guard, + _guard, disable_log_timestamp, log_color, - logfile_color, log_format, - logfile_format, extra_info, - dep_logs, span_fields: Arc::new(Mutex::new(HashMap::new())), } } @@ -84,16 +77,6 @@ where String::new() }; - if !self.dep_logs { - if let Some(file) = meta.file() { - if file.contains("/.cargo/") { - return; - } - } else { - return; - } - } - let mut writer = self.non_blocking_writer.clone(); let mut visitor = LogMessageExtractor { @@ -122,16 +105,10 @@ where None => "".to_string(), }; - if module.contains("discv5") { - visitor - .fields - .push(("service".to_string(), "\"discv5\"".to_string())); - } - let gray = "\x1b[90m"; let reset = "\x1b[0m"; let location = if self.extra_info { - if self.logfile_color { + if self.log_color { format!("{}{}::{}:{}{}", gray, module, file, line, reset) } else { format!("{}::{}:{}", module, file, line) @@ -164,33 +141,16 @@ where } }; - if self.dep_logs { - if self.logfile_format.as_deref() == Some("JSON") { - build_json_log_file( - &visitor, - plain_level_str, - meta, - &ctx, - &self.span_fields, - event, - &mut writer, - ); - } else { - build_log_text( - &visitor, - plain_level_str, - ×tamp, - &ctx, - &self.span_fields, - event, - &location, - color_level_str, - self.logfile_color, - &mut writer, - ); - } - } else if self.log_format.as_deref() == Some("JSON") { - build_json_log_stdout(&visitor, plain_level_str, ×tamp, &mut writer); + if self.log_format.as_deref() == Some("JSON") { + build_log_json( + &visitor, + plain_level_str, + meta, + &ctx, + &self.span_fields, + event, + &mut writer, + ); } else { build_log_text( &visitor, @@ -300,49 +260,7 @@ impl tracing_core::field::Visit for LogMessageExtractor { } } -/// Function to filter out ascii control codes. -/// -/// This helps to keep log formatting consistent. -/// Whitespace and padding control codes are excluded. -fn is_ascii_control(character: &u8) -> bool { - matches!( - character, - b'\x00'..=b'\x08' | - b'\x0b'..=b'\x0c' | - b'\x0e'..=b'\x1f' | - b'\x7f' | - b'\x81'..=b'\x9f' - ) -} - -fn build_json_log_stdout( - visitor: &LogMessageExtractor, - plain_level_str: &str, - timestamp: &str, - writer: &mut impl Write, -) { - let mut log_map = Map::new(); - log_map.insert("msg".to_string(), Value::String(visitor.message.clone())); - log_map.insert( - "level".to_string(), - Value::String(plain_level_str.to_string()), - ); - log_map.insert("ts".to_string(), Value::String(timestamp.to_string())); - - for (key, val) in visitor.fields.clone().into_iter() { - let parsed_val = parse_field(&val); - log_map.insert(key, parsed_val); - } - - let json_obj = Value::Object(log_map); - let output = format!("{}\n", json_obj); - - if let Err(e) = writer.write_all(output.as_bytes()) { - eprintln!("Failed to write log: {}", e); - } -} - -fn build_json_log_file<'a, S>( +fn build_log_json<'a, S>( visitor: &LogMessageExtractor, plain_level_str: &str, meta: &tracing::Metadata<'_>, diff --git a/common/logging/src/utils.rs b/common/logging/src/utils.rs new file mode 100644 index 0000000000..784cd5ca70 --- /dev/null +++ b/common/logging/src/utils.rs @@ -0,0 +1,31 @@ +use std::collections::HashSet; +use tracing_subscriber::filter::FilterFn; +use workspace_members::workspace_crates; + +const WORKSPACE_CRATES: &[&str] = workspace_crates!(); + +/// Constructs a filter which only permits logging from crates which are members of the workspace. +pub fn build_workspace_filter( +) -> Result bool + Clone>, String> { + let workspace_crates: HashSet<&str> = WORKSPACE_CRATES.iter().copied().collect(); + + Ok(tracing_subscriber::filter::FilterFn::new(move |metadata| { + let target_crate = metadata.target().split("::").next().unwrap_or(""); + workspace_crates.contains(target_crate) + })) +} + +/// Function to filter out ascii control codes. +/// +/// This helps to keep log formatting consistent. +/// Whitespace and padding control codes are excluded. +pub fn is_ascii_control(character: &u8) -> bool { + matches!( + character, + b'\x00'..=b'\x08' | + b'\x0b'..=b'\x0c' | + b'\x0e'..=b'\x1f' | + b'\x7f' | + b'\x81'..=b'\x9f' + ) +} diff --git a/common/workspace_members/Cargo.toml b/common/workspace_members/Cargo.toml new file mode 100644 index 0000000000..05924590e3 --- /dev/null +++ b/common/workspace_members/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "workspace_members" +version = "0.1.0" +edition = { workspace = true } + +[lib] +proc-macro = true + +[dependencies] +cargo_metadata = { workspace = true } +quote = { workspace = true } diff --git a/common/workspace_members/src/lib.rs b/common/workspace_members/src/lib.rs new file mode 100644 index 0000000000..1eea0e60e2 --- /dev/null +++ b/common/workspace_members/src/lib.rs @@ -0,0 +1,39 @@ +use cargo_metadata::MetadataCommand; +use proc_macro::TokenStream; +use quote::quote; +use std::error::Error; + +fn get_workspace_crates() -> Result, Box> { + let metadata = MetadataCommand::new().no_deps().exec()?; + + Ok(metadata + .workspace_members + .iter() + .filter_map(|member_id| { + metadata + .packages + .iter() + .find(|package| &package.id == member_id) + .map(|package| package.name.clone()) + }) + .collect()) +} + +#[proc_macro] +pub fn workspace_crates(_input: TokenStream) -> TokenStream { + match get_workspace_crates() { + Ok(crate_names) => { + let crate_strs = crate_names.iter().map(|s| s.as_str()); + quote! { + &[#(#crate_strs),*] + } + } + Err(e) => { + let msg = format!("Failed to get workspace crates: {e}"); + quote! { + compile_error!(#msg); + } + } + } + .into() +} diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index f427836751..9b0284e06d 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -197,6 +197,13 @@ impl EnvironmentBuilder { Ok(self) } + /// Initialize the Lighthouse-specific tracing logging components from + /// the provided config. + /// + /// This consists of 3 tracing `Layers`: + /// - A `Layer` which logs to `stdout` + /// - An `Option` which logs to a log file + /// - An `Option` which emits logs to an SSE stream pub fn init_tracing( mut self, config: LoggerConfig, @@ -204,7 +211,7 @@ impl EnvironmentBuilder { ) -> ( Self, LoggingLayer, - LoggingLayer, + Option, Option, ) { let filename_prefix = match logfile_prefix { @@ -216,72 +223,48 @@ impl EnvironmentBuilder { #[cfg(target_family = "unix")] let file_mode = if config.is_restricted { 0o600 } else { 0o644 }; - let file_logging_layer = { - if let Some(path) = config.path { - let mut appender = LogRollerBuilder::new( - path.clone(), - PathBuf::from(format!("{}.log", filename_prefix)), - ) - .rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size))) - .max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| { - eprintln!("Failed to convert max_log_number to u64: {}", e); - 10 - })); + let file_logging_layer = match config.path { + None => { + eprintln!("No logfile path provided, logging to file is disabled"); + None + } + Some(_) if config.max_log_number == 0 || config.max_log_size == 0 => { + // User has explicitly disabled logging to file, so don't emit a message. + None + } + Some(path) => { + let log_filename = PathBuf::from(format!("{}.log", filename_prefix)); + let mut appender = LogRollerBuilder::new(path.clone(), log_filename) + .rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size))) + .max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| { + eprintln!("Failed to convert max_log_number to u64: {}", e); + 10 + })); if config.compression { appender = appender.compression(Compression::Gzip); } + match appender.build() { Ok(file_appender) => { #[cfg(target_family = "unix")] set_logfile_permissions(&path, filename_prefix, file_mode); - let (file_non_blocking_writer, file_guard) = - tracing_appender::non_blocking(file_appender); - - LoggingLayer::new( - file_non_blocking_writer, - file_guard, + let (writer, guard) = tracing_appender::non_blocking(file_appender); + Some(LoggingLayer::new( + writer, + guard, config.disable_log_timestamp, - false, config.logfile_color, - config.log_format.clone(), config.logfile_format.clone(), config.extra_info, - false, - ) + )) } Err(e) => { eprintln!("Failed to initialize rolling file appender: {}", e); - let (sink_writer, sink_guard) = - tracing_appender::non_blocking(std::io::sink()); - LoggingLayer::new( - sink_writer, - sink_guard, - config.disable_log_timestamp, - false, - config.logfile_color, - config.log_format.clone(), - config.logfile_format.clone(), - config.extra_info, - false, - ) + None } } - } else { - eprintln!("No path provided. File logging is disabled."); - let (sink_writer, sink_guard) = tracing_appender::non_blocking(std::io::sink()); - LoggingLayer::new( - sink_writer, - sink_guard, - config.disable_log_timestamp, - false, - true, - config.log_format.clone(), - config.logfile_format.clone(), - config.extra_info, - false, - ) } }; @@ -293,11 +276,8 @@ impl EnvironmentBuilder { stdout_guard, config.disable_log_timestamp, config.log_color, - true, config.log_format, - config.logfile_format, config.extra_info, - false, ); let sse_logging_layer_opt = if config.sse_logging { @@ -310,8 +290,8 @@ impl EnvironmentBuilder { ( self, - file_logging_layer, stdout_logging_layer, + file_logging_layer, sse_logging_layer_opt, ) } diff --git a/lighthouse/environment/src/tracing_common.rs b/lighthouse/environment/src/tracing_common.rs index 893f50dae5..dd9fe45cad 100644 --- a/lighthouse/environment/src/tracing_common.rs +++ b/lighthouse/environment/src/tracing_common.rs @@ -1,47 +1,67 @@ use crate::{EnvironmentBuilder, LoggerConfig}; use clap::ArgMatches; use logging::Libp2pDiscv5TracingLayer; -use logging::{tracing_logging_layer::LoggingLayer, SSELoggingComponents}; +use logging::{ + create_libp2p_discv5_tracing_layer, tracing_logging_layer::LoggingLayer, SSELoggingComponents, +}; use std::process; -use tracing_subscriber::filter::{FilterFn, LevelFilter}; + +use tracing_subscriber::filter::LevelFilter; use types::EthSpec; +/// Constructs all logging layers including both Lighthouse-specific and +/// dependency logging. +/// +/// The `Layer`s are as follows: +/// - A `Layer` which logs to `stdout` +/// - An `Option` which logs to a log file +/// - An `Option` which emits logs to an SSE stream +/// - An `Option` which logs relevant dependencies to their +/// own log files. (Currently only `libp2p` and `discv5`) pub fn construct_logger( logger_config: LoggerConfig, matches: &ArgMatches, environment_builder: EnvironmentBuilder, ) -> ( EnvironmentBuilder, - Libp2pDiscv5TracingLayer, - LoggingLayer, - LoggingLayer, - Option, LoggerConfig, - FilterFn, + LoggingLayer, + Option, + Option, + Option, ) { - let libp2p_discv5_layer = logging::create_libp2p_discv5_tracing_layer( - logger_config.path.clone(), - logger_config.max_log_size, - logger_config.compression, - logger_config.max_log_number, - ); + let subcommand_name = matches.subcommand_name(); + let logfile_prefix = subcommand_name.unwrap_or("lighthouse"); - let logfile_prefix = matches.subcommand_name().unwrap_or("lighthouse"); - - let (builder, file_logging_layer, stdout_logging_layer, sse_logging_layer_opt) = + let (builder, stdout_logging_layer, file_logging_layer, sse_logging_layer_opt) = environment_builder.init_tracing(logger_config.clone(), logfile_prefix); - let dependency_log_filter = - FilterFn::new(filter_dependency_log as fn(&tracing::Metadata<'_>) -> bool); + let libp2p_discv5_layer = if let Some(subcommand_name) = subcommand_name { + if subcommand_name == "beacon_node" || subcommand_name == "boot_node" { + if logger_config.max_log_size == 0 || logger_config.max_log_number == 0 { + // User has explicitly disabled logging to file. + None + } else { + create_libp2p_discv5_tracing_layer( + logger_config.path.clone(), + logger_config.max_log_size, + ) + } + } else { + // Disable libp2p and discv5 logs when running other subcommands. + None + } + } else { + None + }; ( builder, - libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - sse_logging_layer_opt, logger_config, - dependency_log_filter, + stdout_logging_layer, + file_logging_layer, + sse_logging_layer_opt, + libp2p_discv5_layer, ) } @@ -58,15 +78,3 @@ pub fn parse_level(level: &str) -> LevelFilter { } } } - -fn filter_dependency_log(meta: &tracing::Metadata<'_>) -> bool { - if let Some(file) = meta.file() { - let target = meta.target(); - if file.contains("/.cargo/") { - return target.contains("discv5") || target.contains("libp2p"); - } else { - return !file.contains("gossipsub") && !target.contains("hyper"); - } - } - true -} diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index a6ab1cfb6b..60e65e6470 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -17,17 +17,15 @@ use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODE use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; use lighthouse_version::VERSION; -use logging::crit; -use logging::MetricsLayer; +use logging::{build_workspace_filter, crit, MetricsLayer}; use malloc_utils::configure_memory_allocator; use std::backtrace::Backtrace; use std::path::PathBuf; use std::process::exit; use std::sync::LazyLock; use task_executor::ShutdownReason; -use tracing::{info, warn}; -use tracing_subscriber::prelude::*; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; +use tracing::{info, warn, Level}; +use tracing_subscriber::{filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt, Layer}; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; @@ -592,12 +590,11 @@ fn run( let ( builder, - libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - sse_logging_layer_opt, logger_config, - dependency_log_filter, + stdout_logging_layer, + file_logging_layer, + sse_logging_layer_opt, + libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: log_path.clone(), @@ -619,21 +616,50 @@ fn run( environment_builder, ); - let logging = tracing_subscriber::registry() - .with(dependency_log_filter) - .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) - .with(stdout_logging_layer.with_filter(logger_config.debug_level)) - .with(MetricsLayer) - .with(libp2p_discv5_layer); + let workspace_filter = build_workspace_filter()?; - let logging_result = if let Some(sse_logging_layer) = sse_logging_layer_opt { - logging.with(sse_logging_layer).try_init() - } else { - logging.try_init() - }; + let mut logging_layers = Vec::new(); + + logging_layers.push( + stdout_logging_layer + .with_filter(logger_config.debug_level) + .with_filter(workspace_filter.clone()) + .boxed(), + ); + + if let Some(file_logging_layer) = file_logging_layer { + logging_layers.push( + file_logging_layer + .with_filter(logger_config.logfile_debug_level) + .with_filter(workspace_filter) + .boxed(), + ); + } + + if let Some(sse_logging_layer) = sse_logging_layer_opt { + logging_layers.push(sse_logging_layer.boxed()); + } + + if let Some(libp2p_discv5_layer) = libp2p_discv5_layer { + logging_layers.push( + libp2p_discv5_layer + .with_filter( + EnvFilter::builder() + .with_default_directive(Level::DEBUG.into()) + .from_env_lossy(), + ) + .boxed(), + ); + } + + logging_layers.push(MetricsLayer.boxed()); + + let logging_result = tracing_subscriber::registry() + .with(logging_layers) + .try_init(); if let Err(e) = logging_result { - eprintln!("Failed to initialize dependency logging: {e}"); + eprintln!("Failed to initialize logger: {e}"); } let mut environment = builder diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 4cd599f845..6afc7771d4 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -15,7 +15,6 @@ use std::sync::Arc; use std::time::Duration; use environment::tracing_common; -use logging::MetricsLayer; use tracing_subscriber::prelude::*; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; @@ -90,12 +89,11 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { let ( env_builder, - _libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - _sse_logging_layer_opt, logger_config, - _dependency_log_filter, + stdout_logging_layer, + _file_logging_layer, + _sse_logging_layer_opt, + _libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: None, @@ -118,9 +116,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { ); if let Err(e) = tracing_subscriber::registry() - .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) .with(stdout_logging_layer.with_filter(logger_config.debug_level)) - .with(MetricsLayer) .try_init() { eprintln!("Failed to initialize dependency logging: {e}"); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 384699c64c..f4e0d20f38 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -5,7 +5,6 @@ use clap::ArgMatches; use crate::retry::with_retry; use environment::tracing_common; use futures::prelude::*; -use logging::MetricsLayer; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, testing_validator_config, ValidatorFiles, @@ -94,12 +93,11 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { let ( env_builder, - libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - _sse_logging_layer_opt, logger_config, - dependency_log_filter, + stdout_logging_layer, + _file_logging_layer, + _sse_logging_layer_opt, + _libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: None, @@ -122,11 +120,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { ); if let Err(e) = tracing_subscriber::registry() - .with(dependency_log_filter) - .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) .with(stdout_logging_layer.with_filter(logger_config.debug_level)) - .with(libp2p_discv5_layer) - .with(MetricsLayer) .try_init() { eprintln!("Failed to initialize dependency logging: {e}"); From 33e41f7249b7c34d9d3271fe4408eaa2a6dfeedc Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 1 Apr 2025 16:24:07 -0700 Subject: [PATCH 03/35] Consensus spec tests beta4 (#7231) N/A Run latest consensus spec tests on CI https://github.com/ethereum/consensus-specs/releases/tag/v1.5.0-beta.4 --- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index c32a670e9a..c3a56ec11a 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-beta.2 +TESTS_TAG := v1.5.0-beta.4 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 4e744b797a..3aeff8ce06 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -50,6 +50,8 @@ excluded_paths = [ # TODO(das): Fulu tests are ignored for now "tests/.*/fulu", "tests/.*/fulu/ssz_static/MatrixEntry", + "tests/.*/eip7441", + "tests/.*/eip7732", ] From 0850bcfb89d1048030c1aced795f3d43d91abeb0 Mon Sep 17 00:00:00 2001 From: Varun Doshi <61531351+varun-doshi@users.noreply.github.com> Date: Wed, 2 Apr 2025 08:02:08 +0530 Subject: [PATCH 04/35] feat: add more bootnodes for Hoodi and Sepolia (#7222) Closes #7218 Add more bootnodes for Sepolia and Hoodi --- .../built_in_network_configs/hoodi/boot_enr.yaml | 3 +++ .../built_in_network_configs/sepolia/boot_enr.yaml | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml index 33eaa7e8a9..5d8df4006c 100644 --- a/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml @@ -11,3 +11,6 @@ - enr:-Ku4QIC89sMC0o-irosD4_23lJJ4qCGOvdUz7SmoShWx0k6AaxCFTKviEHa-sa7-EzsiXpDp0qP0xzX6nKdXJX3X-IQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbRilSJc2VjcDI1NmsxoQK_m0f1DzDc9Cjrspm36zuRa7072HSiMGYWLsKiVSbP34N1ZHCCIyk - enr:-Ku4QNkWjw5tNzo8DtWqKm7CnDdIq_y7xppD6c1EZSwjB8rMOkSFA1wJPLoKrq5UvA7wcxIotH6Usx3PAugEN2JMncIBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbHuBeJc2VjcDI1NmsxoQP3FwrhFYB60djwRjAoOjttq6du94DtkQuaN99wvgqaIYN1ZHCCIyk - enr:-OS4QMJGE13xEROqvKN1xnnt7U-noc51VXyM6wFMuL9LMhQDfo1p1dF_zFdS4OsnXz_vIYk-nQWnqJMWRDKvkSK6_CwDh2F0dG5ldHOIAAAAADAAAACGY2xpZW502IpMaWdodGhvdXNljDcuMC4wLWJldGEuM4RldGgykNLxmX9gAAkQAAgAAAAAAACCaWSCdjSCaXCEhse4F4RxdWljgiMqiXNlY3AyNTZrMaECef77P8k5l3PC_raLw42OAzdXfxeQ-58BJriNaqiRGJSIc3luY25ldHMAg3RjcIIjKIN1ZHCCIyg +# Teku +- enr:-LK4QDwhXMitMbC8xRiNL-XGMhRyMSOnxej-zGifjv9Nm5G8EF285phTU-CAsMHRRefZimNI7eNpAluijMQP7NDC8kEMh2F0dG5ldHOIAAAAAAAABgCEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhAOIT_SJc2VjcDI1NmsxoQMoHWNL4MAvh6YpQeM2SUjhUrLIPsAVPB8nyxbmckC6KIN0Y3CCIyiDdWRwgiMo +- enr:-LK4QPYl2HnMPQ7b1es6Nf_tFYkyya5bj9IqAKOEj2cmoqVkN8ANbJJJK40MX4kciL7pZszPHw6vLNyeC-O3HUrLQv8Mh2F0dG5ldHOIAAAAAAAAAMCEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhAMYRG-Jc2VjcDI1NmsxoQPQ35tjr6q1qUqwAnegQmYQyfqxC_6437CObkZneI9n34N0Y3CCIyiDdWRwgiMo diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml index 22b711861f..ba9a3e8354 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml @@ -1,6 +1,12 @@ -# EF bootnodes +# EF - enr:-Ku4QDZ_rCowZFsozeWr60WwLgOfHzv1Fz2cuMvJqN5iJzLxKtVjoIURY42X_YTokMi3IGstW5v32uSYZyGUXj9Q_IECh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIpEe5iJc2VjcDI1NmsxoQNHTpFdaNSCEWiN_QqT396nb0PzcUpLe3OVtLph-AciBYN1ZHCCIy0 - enr:-Ku4QHRyRwEPT7s0XLYzJ_EeeWvZTXBQb4UCGy1F_3m-YtCNTtDlGsCMr4UTgo4uR89pv11uM-xq4w6GKfKhqU31hTgCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIrFM7WJc2VjcDI1NmsxoQI4diTwChN3zAAkarf7smOHCdFb1q3DSwdiQ_Lc_FdzFIN1ZHCCIy0 - enr:-Ku4QOkvvf0u5Hg4-HhY-SJmEyft77G5h3rUM8VF_e-Hag5cAma3jtmFoX4WElLAqdILCA-UWFRN1ZCDJJVuEHrFeLkDh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhJK-AWeJc2VjcDI1NmsxoQLFcT5VE_NMiIC8Ll7GypWDnQ4UEmuzD7hF_Hf4veDJwIN1ZHCCIy0 - enr:-Ku4QH6tYsHKITYeHUu5kdfXgEZWI18EWk_2RtGOn1jBPlx2UlS_uF3Pm5Dx7tnjOvla_zs-wwlPgjnEOcQDWXey51QCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIs7Mc6Jc2VjcDI1NmsxoQIET4Mlv9YzhrYhX_H9D7aWMemUrvki6W4J2Qo0YmFMp4N1ZHCCIy0 - enr:-Ku4QDmz-4c1InchGitsgNk4qzorWMiFUoaPJT4G0IiF8r2UaevrekND1o7fdoftNucirj7sFFTTn2-JdC2Ej0p1Mn8Ch2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhKpA-liJc2VjcDI1NmsxoQMpHP5U1DK8O_JQU6FadmWbE42qEdcGlllR8HcSkkfWq4N1ZHCCIy0 + +# Teku bootnode +- enr:-KO4QP7MmB3juk8rUjJHcUoxZDU9Np4FlW0HyDEGIjSO7GD9PbSsabu7713cWSUWKDkxIypIXg1A-6lG7ySRGOMZHeGCAmuEZXRoMpDTH2GRkAAAc___________gmlkgnY0gmlwhBSoyGOJc2VjcDI1NmsxoQNta5b_bexSSwwrGW2Re24MjfMntzFd0f2SAxQtMj3ueYN0Y3CCIyiDdWRwgiMo + +# Lodestar +- enr:-KG4QJejf8KVtMeAPWFhN_P0c4efuwu1pZHELTveiXUeim6nKYcYcMIQpGxxdgT2Xp9h-M5pr9gn2NbbwEAtxzu50Y8BgmlkgnY0gmlwhEEVkQCDaXA2kCoBBPnAEJg4AAAAAAAAAAGJc2VjcDI1NmsxoQLEh_eVvk07AQABvLkTGBQTrrIOQkzouMgSBtNHIRUxOIN1ZHCCIyiEdWRwNoIjKA From 80626e58d224ddd9ded3cd0ae67f02da59cbcba9 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 3 Apr 2025 15:01:34 +1100 Subject: [PATCH 05/35] Attempt to fix flaky network tests (#7244) --- .../network/src/subnet_service/tests/mod.rs | 93 ++++++++++--------- 1 file changed, 49 insertions(+), 44 deletions(-) diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 7e274850b5..6f9e8cd41a 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -116,18 +116,16 @@ fn get_subnet_service() -> SubnetService { ) } -// gets a number of events from the subscription service, or returns none if it times out after a number -// of slots -async fn get_events + Unpin>( +// gets a number of events from the subscription service, or returns none if it times out after a +// specified duration. +async fn get_events_until_timeout + Unpin>( stream: &mut S, num_events: Option, - num_slots_before_timeout: u32, + timeout: Duration, ) -> Vec { let mut events = Vec::new(); - - let timeout = - tokio::time::sleep(Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout); - futures::pin_mut!(timeout); + let sleep = tokio::time::sleep(timeout); + futures::pin_mut!(sleep); loop { tokio::select! { @@ -139,7 +137,7 @@ async fn get_events + Unpin>( } } } - _ = timeout.as_mut() => { + _ = sleep.as_mut() => { break; } @@ -149,6 +147,17 @@ async fn get_events + Unpin>( events } +// gets a number of events from the subscription service, or returns none if it times out after a number +// of slots +async fn get_events_until_num_slots + Unpin>( + stream: &mut S, + num_events: Option, + num_slots_before_timeout: u32, +) -> Vec { + let timeout = Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout; + get_events_until_timeout(stream, num_events, timeout).await +} + mod test { #[cfg(not(windows))] @@ -196,7 +205,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); - let _events = get_events(&mut subnet_service, None, 1).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 1).await; let current_slot = subnet_service .beacon_chain @@ -249,7 +258,7 @@ mod test { ]; // Wait for 1 slot duration to get the unsubscription event - let events = get_events( + let events = get_events_until_num_slots( &mut subnet_service, Some(2), (MainnetEthSpec::slots_per_epoch()) as u32, @@ -281,7 +290,7 @@ mod test { // create the subnet service and subscriptions let mut subnet_service = get_subnet_service(); - let _events = get_events(&mut subnet_service, None, 0).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 0).await; let current_slot = subnet_service .beacon_chain .slot_clock @@ -330,14 +339,14 @@ mod test { if subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { // If we are permanently subscribed to this subnet, we won't see a subscribe message - let _ = get_events(&mut subnet_service, None, 1).await; + let _ = get_events_until_num_slots(&mut subnet_service, None, 1).await; } else { - let subscription = get_events(&mut subnet_service, None, 1).await; + let subscription = get_events_until_num_slots(&mut subnet_service, None, 1).await; assert_eq!(subscription, [expected]); } // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events(&mut subnet_service, None, 1).await; + let unsubscribe_event = get_events_until_num_slots(&mut subnet_service, None, 1).await; // If the long lived and short lived subnets are different, we should get an unsubscription // event. @@ -376,7 +385,7 @@ mod test { // submit the subscriptions subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut subnet_service, Some(130), 10).await; + let events = get_events_until_num_slots(&mut subnet_service, Some(130), 10).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; let mut unsubscribe_event_count = 0; @@ -445,7 +454,7 @@ mod test { // submit the subscriptions subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut subnet_service, None, 3).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 3).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; let mut unexpected_msg_count = 0; @@ -495,7 +504,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); // Remove permanent events - let _events = get_events(&mut subnet_service, None, 0).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 0).await; let current_slot = subnet_service .beacon_chain @@ -560,7 +569,7 @@ mod test { // Unsubscription event should happen at the end of the slot. // We wait for 2 slots, to avoid timeout issues - let events = get_events(&mut subnet_service, None, 2).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 2).await; let expected_subscription = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); @@ -577,28 +586,26 @@ mod test { println!("{events:?}"); let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the // advance subscription time - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); + let no_events = + dbg!(get_events_until_timeout(&mut subnet_service, None, wait_duration).await); assert_eq!(no_events, []); let subscription_end_slot = current_slot + subscription_slot2 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_end_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let second_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; + let second_subscribe_event = + get_events_until_timeout(&mut subnet_service, None, wait_duration).await; // If the permanent and short lived subnets are different, we should get an unsubscription event. if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( @@ -612,28 +619,26 @@ mod test { let subscription_slot = current_slot + subscription_slot3 - 1; - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); + let no_events = + dbg!(get_events_until_timeout(&mut subnet_service, None, wait_duration).await); assert_eq!(no_events, []); let subscription_end_slot = current_slot + subscription_slot3 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_end_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let third_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; + let third_subscribe_event = + get_events_until_timeout(&mut subnet_service, None, wait_duration).await; if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( @@ -652,7 +657,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); - let _events = get_events(&mut subnet_service, None, 0).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 0).await; let subscriptions = std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { @@ -673,7 +678,7 @@ mod test { let subnet_id = subnet_ids.iter().next().unwrap(); // Note: the unsubscription event takes 2 epochs (8 * 2 * 0.4 secs = 3.2 secs) - let events = get_events( + let events = get_events_until_num_slots( &mut subnet_service, Some(5), (MainnetEthSpec::slots_per_epoch() * 3) as u32, // Have some buffer time before getting 5 events @@ -709,7 +714,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); // Get the initial events from permanent subnet subscriptions - let _events = get_events(&mut subnet_service, None, 1).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 1).await; let subscriptions = std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { @@ -722,7 +727,7 @@ mod test { subnet_service.validator_subscriptions(subscriptions); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut subnet_service, None, 1).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 1).await; matches::assert_matches!( events[..], [ @@ -752,7 +757,7 @@ mod test { subnet_service.validator_subscriptions(subscriptions.into_iter()); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut subnet_service, None, 1).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 1).await; matches::assert_matches!(events[..], [SubnetServiceMessage::DiscoverPeers(_),]); // Should be unsubscribed at the end. From d6cd049a453bb5c091931b24408750699f6d4939 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 3 Apr 2025 21:10:15 +1100 Subject: [PATCH 06/35] RPC RequestId Cleanup (#7238) I've been working at updating another library to latest Lighthouse and got very confused with RPC request Ids. There were types that had fields called `request_id` and `id`. And interchangeably could have types `PeerRequestId`, `rpc::RequestId`, `AppRequestId`, `api_types::RequestId` or even `Request.id`. I couldn't keep track of which Id was linked to what and what each type meant. So this PR mainly does a few things: - Changes the field naming to match the actual type. So any field that has an `AppRequestId` will be named `app_request_id` rather than `id` or `request_id` for example. - I simplified the types. I removed the two different `RequestId` types (one in Lighthouse_network the other in the rpc) and grouped them into one. It has one downside tho. I had to add a few unreachable lines of code in the beacon processor, which the extra type would prevent, but I feel like it might be worth it. Happy to add an extra type to avoid those few lines. - I also removed the concept of `PeerRequestId` which sometimes went alongside a `request_id`. There were times were had a `PeerRequest` and a `Request` being returned, both of which contain a `RequestId` so we had redundant information. I've simplified the logic by removing `PeerRequestId` and made a `ResponseId`. I think if you look at the code changes, it simplifies things a bit and removes the redundant extra info. I think with this PR things are a little bit easier to reasonable about what is going on with all these RPC Ids. NOTE: I did this with the help of AI, so probably should be checked --- beacon_node/lighthouse_network/src/lib.rs | 2 +- .../lighthouse_network/src/rpc/handler.rs | 74 +++--- beacon_node/lighthouse_network/src/rpc/mod.rs | 98 +++----- .../src/rpc/self_limiter.rs | 16 +- .../src/service/api_types.rs | 15 +- .../lighthouse_network/src/service/mod.rs | 127 +++++----- .../lighthouse_network/tests/rpc_tests.rs | 113 ++++----- .../src/network_beacon_processor/mod.rs | 136 ++-------- .../network_beacon_processor/rpc_methods.rs | 237 ++++-------------- .../src/network_beacon_processor/tests.rs | 10 +- beacon_node/network/src/router.rs | 180 +++++-------- beacon_node/network/src/service.rs | 52 ++-- beacon_node/network/src/sync/manager.rs | 46 ++-- .../network/src/sync/network_context.rs | 16 +- beacon_node/network/src/sync/tests/lookups.rs | 43 ++-- beacon_node/network/src/sync/tests/range.rs | 12 +- 16 files changed, 438 insertions(+), 739 deletions(-) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 2f8fd82c51..dbeb0c2c2b 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -122,6 +122,6 @@ pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; -pub use service::api_types::{PeerRequestId, Response}; +pub use service::api_types::Response; pub use service::utils::*; pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 8353b661c5..b86e2b3a6f 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -4,8 +4,7 @@ use super::methods::{GoodbyeReason, RpcErrorResponse, RpcResponse}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, Protocol, RPCError, RPCProtocol, RequestType}; -use super::RequestId; -use super::{RPCReceived, RPCSend, ReqId, Request}; +use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::OutboundFramed; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; @@ -91,6 +90,11 @@ pub struct RPCHandler where E: EthSpec, { + /// The PeerId matching this `ConnectionHandler`. + peer_id: PeerId, + + /// The ConnectionId matching this `ConnectionHandler`. + connection_id: ConnectionId, /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, ()>, @@ -139,9 +143,6 @@ where /// Timeout that will me used for inbound and outbound responses. resp_timeout: Duration, - - /// Information about this handler for logging purposes. - log_info: (PeerId, ConnectionId), } enum HandlerState { @@ -228,6 +229,8 @@ where connection_id: ConnectionId, ) -> Self { RPCHandler { + connection_id, + peer_id, listen_protocol, events_out: SmallVec::new(), dial_queue: SmallVec::new(), @@ -244,7 +247,6 @@ where fork_context, waker: None, resp_timeout, - log_info: (peer_id, connection_id), } } @@ -255,8 +257,8 @@ where if !self.dial_queue.is_empty() { debug!( unsent_queued_requests = self.dial_queue.len(), - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Starting handler shutdown" ); } @@ -306,8 +308,8 @@ where if !matches!(response, RpcResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses trace!(%response, id = ?inbound_id, - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Inbound stream has expired. Response not sent"); } return; @@ -324,8 +326,8 @@ where if matches!(self.state, HandlerState::Deactivated) { // we no longer send responses after the handler is deactivated debug!(%response, id = ?inbound_id, - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Response not sent. Deactivated handler"); return; } @@ -394,8 +396,8 @@ where Poll::Ready(_) => { self.state = HandlerState::Deactivated; debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Shutdown timeout elapsed, Handler deactivated" ); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( @@ -445,8 +447,8 @@ where ))); } else { crit!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, stream_id = ?outbound_id.get_ref(), "timed out substream not in the books"); } } @@ -577,8 +579,8 @@ where // Its useful to log when the request was completed. if matches!(info.protocol, Protocol::BlocksByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = Instant::now() .duration_since(info.request_start_time) .as_secs(), @@ -587,8 +589,8 @@ where } if matches!(info.protocol, Protocol::BlobsByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = Instant::now() .duration_since(info.request_start_time) .as_secs(), @@ -617,16 +619,16 @@ where if matches!(info.protocol, Protocol::BlocksByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = info.request_start_time.elapsed().as_secs(), "BlocksByRange Response failed" ); } if matches!(info.protocol, Protocol::BlobsByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = info.request_start_time.elapsed().as_secs(), "BlobsByRange Response failed" ); @@ -816,8 +818,8 @@ where } OutboundSubstreamState::Poisoned => { crit!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Poisoned outbound substream" ); unreachable!("Coding Error: Outbound substream is poisoned") @@ -852,8 +854,8 @@ where && self.dial_negotiated == 0 { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Goodbye sent, Handler deactivated" ); self.state = HandlerState::Deactivated; @@ -986,12 +988,13 @@ where self.shutdown(None); } - self.events_out - .push(HandlerEvent::Ok(RPCReceived::Request(Request { - id: RequestId::next(), + self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( + super::InboundRequestId { + connection_id: self.connection_id, substream_id: self.current_inbound_substream_id, - r#type: req, - }))); + }, + req, + ))); self.current_inbound_substream_id.0 += 1; } @@ -1049,9 +1052,8 @@ where .is_some() { crit!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, - + peer_id = %self.peer_id, + connection_id = %self.connection_id, id = ?self.current_outbound_substream_id, "Duplicate outbound substream id"); } self.current_outbound_substream_id.0 += 1; diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index f5085e798c..1156447d56 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -16,7 +16,6 @@ use libp2p::PeerId; use logging::crit; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use std::marker::PhantomData; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -49,8 +48,6 @@ mod protocol; mod rate_limiter; mod self_limiter; -static NEXT_REQUEST_ID: AtomicUsize = AtomicUsize::new(1); - /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} impl ReqId for T where T: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -80,7 +77,7 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(Request), + Request(InboundRequestId, RequestType), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the @@ -91,35 +88,30 @@ pub enum RPCReceived { EndOfStream(Id, ResponseTermination), } -/// Rpc `Request` identifier. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct RequestId(usize); +// An identifier for the inbound requests received via Rpc. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct InboundRequestId { + /// The connection ID of the peer that sent the request. + connection_id: ConnectionId, + /// The ID of the substream that sent the request. + substream_id: SubstreamId, +} -impl RequestId { - /// Returns the next available [`RequestId`]. - pub fn next() -> Self { - Self(NEXT_REQUEST_ID.fetch_add(1, Ordering::SeqCst)) - } - - /// Creates an _unchecked_ [`RequestId`]. +impl InboundRequestId { + /// Creates an _unchecked_ [`InboundRequestId`]. /// - /// [`Rpc`] enforces that [`RequestId`]s are unique and not reused. + /// [`Rpc`] enforces that [`InboundRequestId`]s are unique and not reused. /// This constructor does not, hence the _unchecked_. /// /// It is primarily meant for allowing manual tests. - pub fn new_unchecked(id: usize) -> Self { - Self(id) + pub fn new_unchecked(connection_id: usize, substream_id: usize) -> Self { + Self { + connection_id: ConnectionId::new_unchecked(connection_id), + substream_id: SubstreamId::new(substream_id), + } } } -/// An Rpc Request. -#[derive(Debug, Clone)] -pub struct Request { - pub id: RequestId, - pub substream_id: SubstreamId, - pub r#type: RequestType, -} - impl std::fmt::Display for RPCSend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -136,7 +128,7 @@ pub struct RPCMessage { /// The peer that sent the message. pub peer_id: PeerId, /// Handler managing this message. - pub conn_id: ConnectionId, + pub connection_id: ConnectionId, /// The message that was sent. pub message: Result, HandlerErr>, } @@ -215,14 +207,13 @@ impl RPC { pub fn send_response( &mut self, peer_id: PeerId, - id: (ConnectionId, SubstreamId), - _request_id: RequestId, - event: RpcResponse, + request_id: InboundRequestId, + response: RpcResponse, ) { self.events.push(ToSwarm::NotifyHandler { peer_id, - handler: NotifyHandler::One(id.0), - event: RPCSend::Response(id.1, event), + handler: NotifyHandler::One(request_id.connection_id), + event: RPCSend::Response(request_id.substream_id, response), }); } @@ -387,7 +378,7 @@ where for (id, proto) in limiter.peer_disconnected(peer_id) { let error_msg = ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id: connection_id, + connection_id, message: Err(HandlerErr::Outbound { id, proto, @@ -408,7 +399,7 @@ where } if *p == peer_id => { *event = ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id: connection_id, + connection_id, message: Err(HandlerErr::Outbound { id: *request_id, proto: req.versioned_protocol().protocol(), @@ -424,21 +415,17 @@ where fn on_connection_handler_event( &mut self, peer_id: PeerId, - conn_id: ConnectionId, + connection_id: ConnectionId, event: ::ToBehaviour, ) { match event { - HandlerEvent::Ok(RPCReceived::Request(Request { - id, - substream_id, - r#type, - })) => { + HandlerEvent::Ok(RPCReceived::Request(request_id, request_type)) => { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota - match limiter.allows(&peer_id, &r#type) { + match limiter.allows(&peer_id, &request_type) { Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = r#type.versioned_protocol().protocol(); + let protocol = request_type.versioned_protocol().protocol(); if matches!( protocol, Protocol::BlocksByRange @@ -448,7 +435,7 @@ where | Protocol::BlobsByRoot | Protocol::DataColumnsByRoot ) { - debug!(request = %r#type, %protocol, "Request too large to process"); + debug!(request = %request_type, %protocol, "Request too large to process"); } else { // Other protocols shouldn't be sending large messages, we should flag the peer kind crit!(%protocol, "Request size too large to ever be processed"); @@ -457,8 +444,7 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, substream_id), - id, + request_id, RpcResponse::Error( RpcErrorResponse::RateLimited, "Rate limited. Request too large".into(), @@ -467,13 +453,12 @@ where return; } Err(RateLimitedErr::TooSoon(wait_time)) => { - debug!(request = %r#type, %peer_id, wait_time_ms = wait_time.as_millis(), "Request exceeds the rate limit"); + debug!(request = %request_type, %peer_id, wait_time_ms = wait_time.as_millis(), "Request exceeds the rate limit"); // send an error code to the peer. // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, substream_id), - id, + request_id, RpcResponse::Error( RpcErrorResponse::RateLimited, format!("Wait {:?}", wait_time).into(), @@ -487,12 +472,11 @@ where } // If we received a Ping, we queue a Pong response. - if let RequestType::Ping(_) = r#type { - trace!(connection_id = %conn_id, %peer_id, "Received Ping, queueing Pong"); + if let RequestType::Ping(_) = request_type { + trace!(connection_id = %connection_id, %peer_id, "Received Ping, queueing Pong"); self.send_response( peer_id, - (conn_id, substream_id), - id, + request_id, RpcResponse::Success(RpcSuccessResponse::Pong(Ping { data: self.seq_number, })), @@ -501,25 +485,21 @@ where self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id, - message: Ok(RPCReceived::Request(Request { - id, - substream_id, - r#type, - })), + connection_id, + message: Ok(RPCReceived::Request(request_id, request_type)), })); } HandlerEvent::Ok(rpc) => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id, + connection_id, message: Ok(rpc), })); } HandlerEvent::Err(err) => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id, + connection_id, message: Err(err), })); } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index af6ac37d2c..e4af977a6c 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -207,7 +207,7 @@ mod tests { use crate::rpc::rate_limiter::Quota; use crate::rpc::self_limiter::SelfRateLimiter; use crate::rpc::{Ping, Protocol, RequestType}; - use crate::service::api_types::{AppRequestId, RequestId, SingleLookupReqId, SyncRequestId}; + use crate::service::api_types::{AppRequestId, SingleLookupReqId, SyncRequestId}; use libp2p::PeerId; use logging::create_test_tracing_subscriber; use std::time::Duration; @@ -226,7 +226,7 @@ mod tests { Hash256::ZERO, &MainnetEthSpec::default_spec(), )); - let mut limiter: SelfRateLimiter = + let mut limiter: SelfRateLimiter = SelfRateLimiter::new(config, fork_context).unwrap(); let peer_id = PeerId::random(); let lookup_id = 0; @@ -234,12 +234,12 @@ mod tests { for i in 1..=5u32 { let _ = limiter.allows( peer_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + AppRequestId::Sync(SyncRequestId::SingleBlock { id: SingleLookupReqId { lookup_id, req_id: i, }, - })), + }), RequestType::Ping(Ping { data: i as u64 }), ); } @@ -256,9 +256,9 @@ mod tests { for i in 2..=5u32 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + AppRequestId::Sync(SyncRequestId::SingleBlock { id: SingleLookupReqId { req_id, .. }, - })) if req_id == i, + }) if req_id == i, )); } @@ -281,9 +281,9 @@ mod tests { for i in 3..=5 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + AppRequestId::Sync(SyncRequestId::SingleBlock { id: SingleLookupReqId { req_id, .. }, - })) if req_id == i, + }) if req_id == i, )); } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 894fff5074..b36f8cc215 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,8 +1,4 @@ -use crate::rpc::{ - methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}, - SubstreamId, -}; -use libp2p::swarm::ConnectionId; +use crate::rpc::methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}; use std::fmt::{Display, Formatter}; use std::sync::Arc; use types::{ @@ -10,9 +6,6 @@ use types::{ LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; -/// Identifier of requests sent by a peer. -pub type PeerRequestId = (ConnectionId, SubstreamId); - pub type Id = u32; #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] @@ -130,12 +123,6 @@ pub struct CustodyRequester(pub SingleLookupReqId); pub enum AppRequestId { Sync(SyncRequestId), Router, -} - -/// Global identifier of a request. -#[derive(Debug, Clone, Copy)] -pub enum RequestId { - Application(AppRequestId), Internal, } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 9650976c63..bc9f2011f8 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -10,8 +10,9 @@ use crate::peer_manager::{ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; use crate::rpc::{ - self, GoodbyeReason, HandlerErr, NetworkParams, Protocol, RPCError, RPCMessage, RPCReceived, - RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, RpcSuccessResponse, RPC, + GoodbyeReason, HandlerErr, InboundRequestId, NetworkParams, Protocol, RPCError, RPCMessage, + RPCReceived, RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, + RpcSuccessResponse, RPC, }; use crate::types::{ all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, @@ -20,7 +21,7 @@ use crate::types::{ use crate::EnrExt; use crate::Eth2Enr; use crate::{metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use api_types::{AppRequestId, PeerRequestId, RequestId, Response}; +use api_types::{AppRequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, @@ -66,7 +67,7 @@ pub enum NetworkEvent { /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. - id: AppRequestId, + app_request_id: AppRequestId, /// The peer to which this request was sent. peer_id: PeerId, /// The error of the failed request. @@ -76,15 +77,15 @@ pub enum NetworkEvent { /// The peer that sent the request. peer_id: PeerId, /// Identifier of the request. All responses to this request must use this id. - id: PeerRequestId, + inbound_request_id: InboundRequestId, /// Request the peer sent. - request: rpc::Request, + request_type: RequestType, }, ResponseReceived { /// Peer that sent the response. peer_id: PeerId, /// Id of the request to which the peer is responding. - id: AppRequestId, + app_request_id: AppRequestId, /// Response the peer sent. response: Response, }, @@ -126,7 +127,7 @@ where /// The peer manager that keeps track of peer's reputation and status. pub peer_manager: PeerManager, /// The Eth2 RPC specified in the wire-0 protocol. - pub eth2_rpc: RPC, + pub eth2_rpc: RPC, /// Discv5 Discovery protocol. pub discovery: Discovery, /// Keep regular connection to peers and disconnect if absent. @@ -669,7 +670,7 @@ impl Network { name = "libp2p", skip_all )] - pub fn eth2_rpc_mut(&mut self) -> &mut RPC { + pub fn eth2_rpc_mut(&mut self) -> &mut RPC { &mut self.swarm.behaviour_mut().eth2_rpc } /// Discv5 Discovery protocol. @@ -720,7 +721,7 @@ impl Network { name = "libp2p", skip_all )] - pub fn eth2_rpc(&self) -> &RPC { + pub fn eth2_rpc(&self) -> &RPC { &self.swarm.behaviour().eth2_rpc } /// Discv5 Discovery protocol. @@ -1104,16 +1105,16 @@ impl Network { pub fn send_request( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { - return Err((request_id, RPCError::Disconnected)); + return Err((app_request_id, RPCError::Disconnected)); } self.eth2_rpc_mut() - .send_request(peer_id, RequestId::Application(request_id), request); + .send_request(peer_id, app_request_id, request); Ok(()) } @@ -1127,12 +1128,11 @@ impl Network { pub fn send_response( &mut self, peer_id: PeerId, - id: PeerRequestId, - request_id: rpc::RequestId, + inbound_request_id: InboundRequestId, response: Response, ) { self.eth2_rpc_mut() - .send_response(peer_id, id, request_id, response.into()) + .send_response(peer_id, inbound_request_id, response.into()) } /// Inform the peer that their request produced an error. @@ -1145,15 +1145,13 @@ impl Network { pub fn send_error_response( &mut self, peer_id: PeerId, - id: PeerRequestId, - request_id: rpc::RequestId, + inbound_request_id: InboundRequestId, error: RpcErrorResponse, reason: String, ) { self.eth2_rpc_mut().send_response( peer_id, - id, - request_id, + inbound_request_id, RpcResponse::Error(error, reason.into()), ) } @@ -1374,7 +1372,7 @@ impl Network { skip_all )] fn ping(&mut self, peer_id: PeerId) { - self.eth2_rpc_mut().ping(peer_id, RequestId::Internal); + self.eth2_rpc_mut().ping(peer_id, AppRequestId::Internal); } /// Sends a METADATA request to a peer. @@ -1394,7 +1392,7 @@ impl Network { RequestType::MetaData(MetadataRequest::new_v2()) }; self.eth2_rpc_mut() - .send_request(peer_id, RequestId::Internal, event); + .send_request(peer_id, AppRequestId::Internal, event); } /// Sends a METADATA response to a peer. @@ -1407,15 +1405,14 @@ impl Network { fn send_meta_data_response( &mut self, _req: MetadataRequest, - id: PeerRequestId, - request_id: rpc::RequestId, + inbound_request_id: InboundRequestId, peer_id: PeerId, ) { let metadata = self.network_globals.local_metadata.read().clone(); // The encoder is responsible for sending the negotiated version of the metadata let event = RpcResponse::Success(RpcSuccessResponse::MetaData(Arc::new(metadata))); self.eth2_rpc_mut() - .send_response(peer_id, id, request_id, event); + .send_response(peer_id, inbound_request_id, event); } // RPC Propagation methods @@ -1429,17 +1426,17 @@ impl Network { )] fn build_response( &mut self, - id: RequestId, + app_request_id: AppRequestId, peer_id: PeerId, response: Response, ) -> Option> { - match id { - RequestId::Application(id) => Some(NetworkEvent::ResponseReceived { + match app_request_id { + AppRequestId::Internal => None, + _ => Some(NetworkEvent::ResponseReceived { peer_id, - id, + app_request_id, response, }), - RequestId::Internal => None, } } @@ -1643,7 +1640,7 @@ impl Network { name = "libp2p", skip_all )] - fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { + fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { let peer_id = event.peer_id; // Do not permit Inbound events from peers that are being disconnected or RPC requests, @@ -1656,7 +1653,6 @@ impl Network { return None; } - let connection_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated match event.message { Err(handler_err) => { @@ -1686,16 +1682,20 @@ impl Network { ConnectionDirection::Outgoing, ); // inform failures of requests coming outside the behaviour - if let RequestId::Application(id) = id { - Some(NetworkEvent::RPCFailed { peer_id, id, error }) - } else { + if let AppRequestId::Internal = id { None + } else { + Some(NetworkEvent::RPCFailed { + peer_id, + app_request_id: id, + error, + }) } } } } - Ok(RPCReceived::Request(request)) => { - match request.r#type { + Ok(RPCReceived::Request(inbound_request_id, request_type)) => { + match request_type { /* Behaviour managed protocols: Ping and Metadata */ RequestType::Ping(ping) => { // inform the peer manager and send the response @@ -1704,12 +1704,7 @@ impl Network { } RequestType::MetaData(req) => { // send the requested meta-data - self.send_meta_data_response( - req, - (connection_id, request.substream_id), - request.id, - peer_id, - ); + self.send_meta_data_response(req, inbound_request_id, peer_id); None } RequestType::Goodbye(reason) => { @@ -1734,8 +1729,8 @@ impl Network { // propagate the STATUS message upwards Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlocksByRange(ref req) => { @@ -1757,32 +1752,32 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlocksByRoot(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlobsByRange(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlobsByRoot(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::DataColumnsByRoot(_) => { @@ -1792,8 +1787,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::DataColumnsByRange(_) => { @@ -1803,8 +1798,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientBootstrap(_) => { @@ -1814,8 +1809,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientOptimisticUpdate => { @@ -1825,8 +1820,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientFinalityUpdate => { @@ -1836,8 +1831,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientUpdatesByRange(_) => { @@ -1847,8 +1842,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } } @@ -2010,7 +2005,7 @@ impl Network { debug!(%peer_id, %reason, "Peer Manager disconnecting peer"); // send one goodbye self.eth2_rpc_mut() - .shutdown(peer_id, RequestId::Internal, reason); + .shutdown(peer_id, AppRequestId::Internal, reason); None } } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index d736fefa5f..aedd507751 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -98,7 +98,7 @@ fn test_tcp_status_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => { // Should receive the RPC response @@ -118,13 +118,17 @@ fn test_tcp_status_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response debug!("Receiver Received"); - receiver.send_response(peer_id, id, request.id, rpc_response.clone()); + receiver.send_response( + peer_id, + inbound_request_id, + rpc_response.clone(), + ); } } _ => {} // Ignore other events @@ -204,7 +208,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: _, + app_request_id: _, response, } => { warn!("Sender received a response"); @@ -240,10 +244,10 @@ fn test_tcp_blocks_by_range_chunked_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for i in 0..messages_to_send { @@ -258,16 +262,14 @@ fn test_tcp_blocks_by_range_chunked_rpc() { }; receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); } @@ -338,7 +340,7 @@ fn test_blobs_by_range_chunked_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: _, + app_request_id: _, response, } => { warn!("Sender received a response"); @@ -368,10 +370,10 @@ fn test_blobs_by_range_chunked_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for _ in 0..messages_to_send { @@ -379,16 +381,14 @@ fn test_blobs_by_range_chunked_rpc() { // second as altair and third as bellatrix. receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlobsByRange(None), ); } @@ -459,8 +459,8 @@ fn test_tcp_blocks_by_range_over_limit() { .unwrap(); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE - NetworkEvent::RPCFailed { id, .. } => { - assert!(matches!(id, AppRequestId::Router)); + NetworkEvent::RPCFailed { app_request_id, .. } => { + assert!(matches!(app_request_id, AppRequestId::Router)); return; } _ => {} // Ignore other behaviour events @@ -474,26 +474,24 @@ fn test_tcp_blocks_by_range_over_limit() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_bellatrix_large.clone(); receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); } @@ -566,7 +564,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { } NetworkEvent::ResponseReceived { peer_id: _, - id: _, + app_request_id: _, response, } => // Should receive the RPC response @@ -608,15 +606,15 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { futures::future::Either::Left(( NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, }, _, )) => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); - message_info = Some((peer_id, id, request.id)); + message_info = Some((peer_id, inbound_request_id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -626,8 +624,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); + let (peer_id, inbound_request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *inbound_request_id, rpc_response.clone()); debug!("Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -700,7 +698,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => match response { Response::BlocksByRange(Some(_)) => { @@ -727,26 +725,24 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for _ in 1..=messages_to_send { receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); } @@ -837,7 +833,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => match response { Response::BlocksByRoot(Some(_)) => { @@ -870,10 +866,10 @@ fn test_tcp_blocks_by_root_chunked_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response debug!("Receiver got request"); @@ -886,14 +882,13 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, request.id, rpc_response); + receiver.send_response(peer_id, inbound_request_id, rpc_response); debug!("Sending message"); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); debug!("Send stream term"); @@ -977,7 +972,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => { debug!("Sender received a response"); @@ -1019,15 +1014,15 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { futures::future::Either::Left(( NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, }, _, )) => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); - message_info = Some((peer_id, id, request.id)); + message_info = Some((peer_id, inbound_request_id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -1037,8 +1032,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); + let (peer_id, inbound_request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *inbound_request_id, rpc_response.clone()); debug!("Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 1329936932..3431c1abb9 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -15,12 +15,11 @@ use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; -use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, LightClientUpdatesByRangeRequest, }; -use lighthouse_network::rpc::{RequestId, SubstreamId}; +use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, @@ -647,21 +646,13 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, // Use ResponseId here request: BlocksByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_range_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + .handle_blocks_by_range_request(peer_id, inbound_request_id, request) .await; }; @@ -675,21 +666,13 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_roots_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, // Use ResponseId here request: BlocksByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_root_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + .handle_blocks_by_root_request(peer_id, inbound_request_id, request) .await; }; @@ -703,21 +686,12 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_blobs_by_range_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) - }; + let process_fn = + move || processor.handle_blobs_by_range_request(peer_id, inbound_request_id, request); self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -729,21 +703,12 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_roots_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_blobs_by_root_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) - }; + let process_fn = + move || processor.handle_blobs_by_root_request(peer_id, inbound_request_id, request); self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -755,20 +720,12 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_roots_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - processor.handle_data_columns_by_root_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + processor.handle_data_columns_by_root_request(peer_id, inbound_request_id, request) }; self.try_send(BeaconWorkEvent { @@ -781,20 +738,12 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - processor.handle_data_columns_by_range_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + processor.handle_data_columns_by_range_request(peer_id, inbound_request_id, request) }; self.try_send(BeaconWorkEvent { @@ -807,21 +756,12 @@ impl NetworkBeaconProcessor { pub fn send_light_client_bootstrap_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientBootstrapRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_light_client_bootstrap( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) - }; + let process_fn = + move || processor.handle_light_client_bootstrap(peer_id, inbound_request_id, request); self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -833,19 +773,11 @@ impl NetworkBeaconProcessor { pub fn send_light_client_optimistic_update_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_light_client_optimistic_update( - peer_id, - connection_id, - substream_id, - request_id, - ) - }; + let process_fn = + move || processor.handle_light_client_optimistic_update(peer_id, inbound_request_id); self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -857,19 +789,11 @@ impl NetworkBeaconProcessor { pub fn send_light_client_finality_update_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_light_client_finality_update( - peer_id, - connection_id, - substream_id, - request_id, - ) - }; + let process_fn = + move || processor.handle_light_client_finality_update(peer_id, inbound_request_id); self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -881,20 +805,12 @@ impl NetworkBeaconProcessor { pub fn send_light_client_updates_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientUpdatesByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - processor.handle_light_client_updates_by_range( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + processor.handle_light_client_updates_by_range(peer_id, inbound_request_id, request) }; self.try_send(BeaconWorkEvent { diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 7beadffc06..4694c926c9 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -4,12 +4,11 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use itertools::process_results; -use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, }; use lighthouse_network::rpc::*; -use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; +use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; @@ -34,15 +33,12 @@ impl NetworkBeaconProcessor { pub fn send_response( &self, peer_id: PeerId, + inbound_request_id: InboundRequestId, response: Response, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, - id: (connection_id, substream_id), + inbound_request_id, response, }) } @@ -52,15 +48,13 @@ impl NetworkBeaconProcessor { peer_id: PeerId, error: RpcErrorResponse, reason: String, - id: PeerRequestId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) { self.send_network_message(NetworkMessage::SendErrorResponse { peer_id, error, reason, - id, - request_id, + inbound_request_id, }) } @@ -161,24 +155,14 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlocksByRootRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, self.clone() - .handle_blocks_by_root_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + .handle_blocks_by_root_request_inner(peer_id, inbound_request_id, request) .await, Response::BlocksByRoot, ); @@ -188,9 +172,7 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request_inner( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlocksByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { let log_results = |peer_id, requested_blocks, send_block_count| { @@ -220,10 +202,8 @@ impl NetworkBeaconProcessor { Ok(Some(block)) => { self.send_response( peer_id, + inbound_request_id, Response::BlocksByRoot(Some(block.clone())), - connection_id, - substream_id, - request_id, ); send_block_count += 1; } @@ -265,23 +245,13 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRootRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_blobs_by_root_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - request, - ), + inbound_request_id, + self.handle_blobs_by_root_request_inner(peer_id, inbound_request_id, request), Response::BlobsByRoot, ); } @@ -290,9 +260,7 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { let Some(requested_root) = request.blob_ids.as_slice().first().map(|id| id.block_root) @@ -314,10 +282,8 @@ impl NetworkBeaconProcessor { if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(id) { self.send_response( peer_id, + inbound_request_id, Response::BlobsByRoot(Some(blob)), - connection_id, - substream_id, - request_id, ); send_blob_count += 1; } else { @@ -339,10 +305,8 @@ impl NetworkBeaconProcessor { if blob_sidecar.index == *index { self.send_response( peer_id, + inbound_request_id, Response::BlobsByRoot(Some(blob_sidecar.clone())), - connection_id, - substream_id, - request_id, ); send_blob_count += 1; break 'inner; @@ -375,23 +339,13 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRootRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_data_columns_by_root_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - request, - ), + inbound_request_id, + self.handle_data_columns_by_root_request_inner(peer_id, inbound_request_id, request), Response::DataColumnsByRoot, ); } @@ -400,9 +354,7 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { let mut send_data_column_count = 0; @@ -416,10 +368,8 @@ impl NetworkBeaconProcessor { send_data_column_count += 1; self.send_response( peer_id, + inbound_request_id, Response::DataColumnsByRoot(Some(data_column)), - connection_id, - substream_id, - request_id, ); } Ok(None) => {} // no-op @@ -449,22 +399,16 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_updates_by_range( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientUpdatesByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, self.clone() .handle_light_client_updates_by_range_request_inner( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, request, ), Response::LightClientUpdatesByRange, @@ -475,9 +419,7 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_updates_by_range_request_inner( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: LightClientUpdatesByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!( @@ -516,8 +458,7 @@ impl NetworkBeaconProcessor { self.send_network_message(NetworkMessage::SendResponse { peer_id, response: Response::LightClientUpdatesByRange(Some(Arc::new(lc_update.clone()))), - request_id, - id: (connection_id, substream_id), + inbound_request_id, }); } @@ -549,16 +490,12 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_bootstrap( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientBootstrapRequest, ) { self.terminate_response_single_item( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, match self.chain.get_light_client_bootstrap(&request.root) { Ok(Some((bootstrap, _))) => Ok(Arc::new(bootstrap)), Ok(None) => Err(( @@ -583,15 +520,11 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_optimistic_update( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) { self.terminate_response_single_item( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, match self .chain .light_client_server_cache @@ -611,15 +544,11 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_finality_update( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) { self.terminate_response_single_item( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, match self .chain .light_client_server_cache @@ -639,24 +568,14 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlocksByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, self.clone() - .handle_blocks_by_range_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - req, - ) + .handle_blocks_by_range_request_inner(peer_id, inbound_request_id, req) .await, Response::BlocksByRange, ); @@ -666,9 +585,7 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request_inner( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlocksByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!( @@ -789,9 +706,8 @@ impl NetworkBeaconProcessor { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: Response::BlocksByRange(Some(block.clone())), - id: (connection_id, substream_id), }); } } @@ -852,23 +768,13 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_range_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlobsByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_blobs_by_range_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - req, - ), + inbound_request_id, + self.handle_blobs_by_range_request_inner(peer_id, inbound_request_id, req), Response::BlobsByRange, ); } @@ -877,9 +783,7 @@ impl NetworkBeaconProcessor { fn handle_blobs_by_range_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlobsByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!( @@ -1016,9 +920,8 @@ impl NetworkBeaconProcessor { blobs_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + inbound_request_id, response: Response::BlobsByRange(Some(blob_sidecar.clone())), - request_id, - id: (connection_id, substream_id), }); } } @@ -1048,23 +951,13 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: DataColumnsByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_data_columns_by_range_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - req, - ), + inbound_request_id, + self.handle_data_columns_by_range_request_inner(peer_id, inbound_request_id, req), Response::DataColumnsByRange, ); } @@ -1073,9 +966,7 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: DataColumnsByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!( @@ -1205,11 +1096,10 @@ impl NetworkBeaconProcessor { data_columns_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: Response::DataColumnsByRange(Some( data_column_sidecar.clone(), )), - id: (connection_id, substream_id), }); } Ok(None) => {} // no-op @@ -1252,32 +1142,20 @@ impl NetworkBeaconProcessor { fn terminate_response_single_item Response>( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, result: Result, into_response: F, ) { match result { Ok(resp) => { - // Not necessary to explicitly send a termination message if this InboundRequest - // returns <= 1 for InboundRequest::expected_responses - // https://github.com/sigp/lighthouse/blob/3058b96f2560f1da04ada4f9d8ba8e5651794ff6/beacon_node/lighthouse_network/src/rpc/handler.rs#L555-L558 self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: into_response(resp), - id: (connection_id, substream_id), }); } Err((error_code, reason)) => { - self.send_error_response( - peer_id, - error_code, - reason, - (connection_id, substream_id), - request_id, - ); + self.send_error_response(peer_id, error_code, reason, inbound_request_id); } } } @@ -1287,27 +1165,18 @@ impl NetworkBeaconProcessor { fn terminate_response_stream) -> Response>( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, result: Result<(), (RpcErrorResponse, &'static str)>, into_response: F, ) { match result { Ok(_) => self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: into_response(None), - id: (connection_id, substream_id), }), Err((error_code, reason)) => { - self.send_error_response( - peer_id, - error_code, - reason.into(), - (connection_id, substream_id), - request_id, - ); + self.send_error_response(peer_id, error_code, reason.into(), inbound_request_id); } } } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 69ba5c1dbd..aa5f54ac1f 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -14,9 +14,8 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; -use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; -use lighthouse_network::rpc::{RequestId, SubstreamId}; +use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::{ discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, @@ -366,9 +365,7 @@ impl TestRig { self.network_beacon_processor .send_blobs_by_range_request( PeerId::random(), - ConnectionId::new_unchecked(42), - SubstreamId::new(24), - RequestId::new_unchecked(0), + InboundRequestId::new_unchecked(42, 24), BlobsByRangeRequest { start_slot: 0, count, @@ -1149,8 +1146,7 @@ async fn test_blobs_by_range() { if let NetworkMessage::SendResponse { peer_id: _, response: Response::BlobsByRange(blob), - id: _, - request_id: _, + inbound_request_id: _, } = next { if blob.is_some() { diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 7376244501..05c00b76af 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -14,12 +14,10 @@ use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, }; use futures::prelude::*; -use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::*; use lighthouse_network::{ - rpc, service::api_types::{AppRequestId, SyncRequestId}, - MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Response, + MessageId, NetworkGlobals, PeerId, PubsubMessage, Response, }; use logging::crit; use logging::TimeLatch; @@ -54,19 +52,19 @@ pub enum RouterMessage { /// An RPC request has been received. RPCRequestReceived { peer_id: PeerId, - id: PeerRequestId, - request: rpc::Request, + inbound_request_id: InboundRequestId, + request_type: RequestType, }, /// An RPC response has been received. RPCResponseReceived { peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, response: Response, }, /// An RPC request failed RPCFailed { peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, error: RPCError, }, /// A gossip message has been received. The fields are: message id, the peer that sent us this @@ -159,24 +157,24 @@ impl Router { } RouterMessage::RPCRequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - self.handle_rpc_request(peer_id, id, request); + self.handle_rpc_request(peer_id, inbound_request_id, request_type); } RouterMessage::RPCResponseReceived { peer_id, - request_id, + app_request_id, response, } => { - self.handle_rpc_response(peer_id, request_id, response); + self.handle_rpc_response(peer_id, app_request_id, response); } RouterMessage::RPCFailed { peer_id, - request_id, + app_request_id, error, } => { - self.on_rpc_error(peer_id, request_id, error); + self.on_rpc_error(peer_id, app_request_id, error); } RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { self.handle_gossip(id, peer_id, gossip, should_process); @@ -190,23 +188,18 @@ impl Router { fn handle_rpc_request( &mut self, peer_id: PeerId, - request_id: PeerRequestId, - rpc_request: rpc::Request, + inbound_request_id: InboundRequestId, // Use ResponseId here + request_type: RequestType, ) { if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!( %peer_id, request = ?rpc_request, "Dropping request of disconnected peer"); + debug!(%peer_id, request = ?request_type, "Dropping request of disconnected peer"); return; } - match rpc_request.r#type { - RequestType::Status(status_message) => self.on_status_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - status_message, - ), + match request_type { + RequestType::Status(status_message) => { + self.on_status_request(peer_id, inbound_request_id, status_message) + } RequestType::BlocksByRange(request) => { - // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 let mut count = *request.count(); if *request.step() > 1 { count = 1; @@ -223,9 +216,7 @@ impl Router { self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blocks_by_range_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, blocks_request, ), ) @@ -233,86 +224,50 @@ impl Router { RequestType::BlocksByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blocks_by_roots_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), RequestType::BlobsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blobs_by_range_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), RequestType::BlobsByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blobs_by_roots_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), RequestType::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_roots_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - request, - ), + .send_data_columns_by_roots_request(peer_id, inbound_request_id, request), ), RequestType::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_range_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - request, - ), + .send_data_columns_by_range_request(peer_id, inbound_request_id, request), ), RequestType::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_bootstrap_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - request, - ), + .send_light_client_bootstrap_request(peer_id, inbound_request_id, request), ), RequestType::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_optimistic_update_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - ), + .send_light_client_optimistic_update_request(peer_id, inbound_request_id), ), RequestType::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_finality_update_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - ), + .send_light_client_finality_update_request(peer_id, inbound_request_id), ), RequestType::LightClientUpdatesByRange(request) => self .handle_beacon_processor_send_result( self.network_beacon_processor .send_light_client_updates_by_range_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), @@ -324,7 +279,7 @@ impl Router { fn handle_rpc_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, response: Response, ) { match response { @@ -336,22 +291,22 @@ impl Router { ) } Response::BlocksByRange(beacon_block) => { - self.on_blocks_by_range_response(peer_id, request_id, beacon_block); + self.on_blocks_by_range_response(peer_id, app_request_id, beacon_block); } Response::BlocksByRoot(beacon_block) => { - self.on_blocks_by_root_response(peer_id, request_id, beacon_block); + self.on_blocks_by_root_response(peer_id, app_request_id, beacon_block); } Response::BlobsByRange(blob) => { - self.on_blobs_by_range_response(peer_id, request_id, blob); + self.on_blobs_by_range_response(peer_id, app_request_id, blob); } Response::BlobsByRoot(blob) => { - self.on_blobs_by_root_response(peer_id, request_id, blob); + self.on_blobs_by_root_response(peer_id, app_request_id, blob); } Response::DataColumnsByRoot(data_column) => { - self.on_data_columns_by_root_response(peer_id, request_id, data_column); + self.on_data_columns_by_root_response(peer_id, app_request_id, data_column); } Response::DataColumnsByRange(data_column) => { - self.on_data_columns_by_range_response(peer_id, request_id, data_column); + self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } // Light client responses should not be received Response::LightClientBootstrap(_) @@ -563,12 +518,12 @@ impl Router { /// An error occurred during an RPC request. The state is maintained by the sync manager, so /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: AppRequestId, error: RPCError) { + pub fn on_rpc_error(&mut self, peer_id: PeerId, app_request_id: AppRequestId, error: RPCError) { // Check if the failed RPC belongs to sync - if let AppRequestId::Sync(request_id) = request_id { + if let AppRequestId::Sync(sync_request_id) = app_request_id { self.send_to_sync(SyncMessage::RpcError { peer_id, - request_id, + sync_request_id, error, }); } @@ -580,9 +535,7 @@ impl Router { pub fn on_status_request( &mut self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, // Use ResponseId here status: StatusMessage, ) { debug!(%peer_id, ?status, "Received Status Request"); @@ -590,9 +543,8 @@ impl Router { // Say status back. self.network.send_response( peer_id, + inbound_request_id, Response::Status(status_message(&self.chain)), - (connection_id, substream_id), - request_id, ); self.handle_beacon_processor_send_result( @@ -606,11 +558,11 @@ impl Router { pub fn on_blocks_by_range_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, beacon_block: Option>>, ) { - let request_id = match request_id { - AppRequestId::Sync(sync_id) => match sync_id { + let sync_request_id = match app_request_id { + AppRequestId::Sync(sync_request_id) => match sync_request_id { id @ SyncRequestId::BlocksByRange { .. } => id, other => { crit!(request = ?other, "BlocksByRange response on incorrect request"); @@ -621,6 +573,7 @@ impl Router { crit!(%peer_id, "All BBRange requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -631,7 +584,7 @@ impl Router { self.send_to_sync(SyncMessage::RpcBlock { peer_id, - request_id, + sync_request_id, beacon_block, seen_timestamp: timestamp_now(), }); @@ -640,7 +593,7 @@ impl Router { pub fn on_blobs_by_range_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, blob_sidecar: Option>>, ) { trace!( @@ -648,10 +601,10 @@ impl Router { "Received BlobsByRange Response" ); - if let AppRequestId::Sync(id) = request_id { + if let AppRequestId::Sync(sync_request_id) = app_request_id { self.send_to_sync(SyncMessage::RpcBlob { peer_id, - request_id: id, + sync_request_id, blob_sidecar, seen_timestamp: timestamp_now(), }); @@ -664,10 +617,10 @@ impl Router { pub fn on_blocks_by_root_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, beacon_block: Option>>, ) { - let request_id = match request_id { + let sync_request_id = match app_request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlock { .. } => id, other => { @@ -679,6 +632,7 @@ impl Router { crit!(%peer_id, "All BBRoot requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -687,7 +641,7 @@ impl Router { ); self.send_to_sync(SyncMessage::RpcBlock { peer_id, - request_id, + sync_request_id, beacon_block, seen_timestamp: timestamp_now(), }); @@ -697,10 +651,10 @@ impl Router { pub fn on_blobs_by_root_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, blob_sidecar: Option>>, ) { - let request_id = match request_id { + let sync_request_id = match app_request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlob { .. } => id, other => { @@ -712,6 +666,7 @@ impl Router { crit!(%peer_id, "All BlobsByRoot requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -719,7 +674,7 @@ impl Router { "Received BlobsByRoot Response" ); self.send_to_sync(SyncMessage::RpcBlob { - request_id, + sync_request_id, peer_id, blob_sidecar, seen_timestamp: timestamp_now(), @@ -730,10 +685,10 @@ impl Router { pub fn on_data_columns_by_root_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, data_column: Option>>, ) { - let request_id = match request_id { + let sync_request_id = match app_request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::DataColumnsByRoot { .. } => id, other => { @@ -745,6 +700,7 @@ impl Router { crit!(%peer_id, "All DataColumnsByRoot requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -752,7 +708,7 @@ impl Router { "Received DataColumnsByRoot Response" ); self.send_to_sync(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column, seen_timestamp: timestamp_now(), @@ -762,7 +718,7 @@ impl Router { pub fn on_data_columns_by_range_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, data_column: Option>>, ) { trace!( @@ -770,10 +726,10 @@ impl Router { "Received DataColumnsByRange Response" ); - if let AppRequestId::Sync(id) = request_id { + if let AppRequestId::Sync(sync_request_id) = app_request_id { self.send_to_sync(SyncMessage::RpcDataColumn { peer_id, - request_id: id, + sync_request_id, data_column, seen_timestamp: timestamp_now(), }); @@ -824,7 +780,7 @@ impl HandlerNetworkContext { pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, - request_id: AppRequestId::Router, + app_request_id: AppRequestId::Router, request, }) } @@ -833,14 +789,12 @@ impl HandlerNetworkContext { pub fn send_response( &mut self, peer_id: PeerId, + inbound_request_id: InboundRequestId, response: Response, - id: PeerRequestId, - request_id: RequestId, ) { self.inform_network(NetworkMessage::SendResponse { - request_id, peer_id, - id, + inbound_request_id, response, }) } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 778ac63290..7afd62ab2e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -10,14 +10,15 @@ use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconPro use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; -use lighthouse_network::rpc::{RequestId, RequestType}; +use lighthouse_network::rpc::InboundRequestId; +use lighthouse_network::rpc::RequestType; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; use lighthouse_network::Enr; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RpcErrorResponse}, - Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Response, Subnet, + Context, PeerAction, PubsubMessage, ReportSource, Response, Subnet, }; use lighthouse_network::{ service::api_types::AppRequestId, @@ -61,22 +62,20 @@ pub enum NetworkMessage { SendRequest { peer_id: PeerId, request: RequestType, - request_id: AppRequestId, + app_request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. SendResponse { peer_id: PeerId, - request_id: RequestId, + inbound_request_id: InboundRequestId, response: Response, - id: PeerRequestId, }, /// Sends an error response to an RPC request. SendErrorResponse { peer_id: PeerId, - request_id: RequestId, + inbound_request_id: InboundRequestId, error: RpcErrorResponse, reason: String, - id: PeerRequestId, }, /// Publish a list of messages to the gossipsub protocol. Publish { messages: Vec> }, @@ -488,30 +487,34 @@ impl NetworkService { } NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { self.send_to_router(RouterMessage::RPCRequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, }); } NetworkEvent::ResponseReceived { peer_id, - id, + app_request_id, response, } => { self.send_to_router(RouterMessage::RPCResponseReceived { peer_id, - request_id: id, + app_request_id, response, }); } - NetworkEvent::RPCFailed { id, peer_id, error } => { + NetworkEvent::RPCFailed { + app_request_id, + peer_id, + error, + } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, - request_id: id, + app_request_id, error, }); } @@ -601,35 +604,34 @@ impl NetworkService { NetworkMessage::SendRequest { peer_id, request, - request_id, + app_request_id, } => { - if let Err((request_id, error)) = - self.libp2p.send_request(peer_id, request_id, request) + if let Err((app_request_id, error)) = + self.libp2p.send_request(peer_id, app_request_id, request) { self.send_to_router(RouterMessage::RPCFailed { peer_id, - request_id, + app_request_id, error, }); } } NetworkMessage::SendResponse { peer_id, + inbound_request_id, response, - id, - request_id, } => { - self.libp2p.send_response(peer_id, id, request_id, response); + self.libp2p + .send_response(peer_id, inbound_request_id, response); } NetworkMessage::SendErrorResponse { peer_id, error, - id, - request_id, + inbound_request_id, reason, } => { self.libp2p - .send_error_response(peer_id, id, request_id, error, reason); + .send_error_response(peer_id, inbound_request_id, error, reason); } NetworkMessage::ValidationResult { propagation_source, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 9a48e9aa5d..a02302a525 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -108,7 +108,7 @@ pub enum SyncMessage { /// A block has been received from the RPC. RpcBlock { - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, beacon_block: Option>>, seen_timestamp: Duration, @@ -116,7 +116,7 @@ pub enum SyncMessage { /// A blob has been received from the RPC. RpcBlob { - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, blob_sidecar: Option>>, seen_timestamp: Duration, @@ -124,7 +124,7 @@ pub enum SyncMessage { /// A data columns has been received from the RPC RpcDataColumn { - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, data_column: Option>>, seen_timestamp: Duration, @@ -153,7 +153,7 @@ pub enum SyncMessage { /// An RPC Error has occurred on a request. RpcError { peer_id: PeerId, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, error: RPCError, }, @@ -477,9 +477,9 @@ impl SyncManager { } /// Handles RPC errors related to requests that were emitted from the sync manager. - fn inject_error(&mut self, peer_id: PeerId, request_id: SyncRequestId, error: RPCError) { + fn inject_error(&mut self, peer_id: PeerId, sync_request_id: SyncRequestId, error: RPCError) { trace!("Sync manager received a failed RPC"); - match request_id { + match sync_request_id { SyncRequestId::SingleBlock { id } => { self.on_single_block_response(id, peer_id, RpcEvent::RPCError(error)) } @@ -509,8 +509,8 @@ impl SyncManager { fn peer_disconnect(&mut self, peer_id: &PeerId) { // Inject a Disconnected error on all requests associated with the disconnected peer // to retry all batches/lookups - for request_id in self.network.peer_disconnected(peer_id) { - self.inject_error(*peer_id, request_id, RPCError::Disconnected); + for sync_request_id in self.network.peer_disconnected(peer_id) { + self.inject_error(*peer_id, sync_request_id, RPCError::Disconnected); } // Remove peer from all data structures @@ -751,25 +751,27 @@ impl SyncManager { self.add_peers_force_range_sync(&peers, head_root, head_slot); } SyncMessage::RpcBlock { - request_id, + sync_request_id, peer_id, beacon_block, seen_timestamp, } => { - self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); + self.rpc_block_received(sync_request_id, peer_id, beacon_block, seen_timestamp); } SyncMessage::RpcBlob { - request_id, + sync_request_id, peer_id, blob_sidecar, seen_timestamp, - } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + } => self.rpc_blob_received(sync_request_id, peer_id, blob_sidecar, seen_timestamp), SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column, seen_timestamp, - } => self.rpc_data_column_received(request_id, peer_id, data_column, seen_timestamp), + } => { + self.rpc_data_column_received(sync_request_id, peer_id, data_column, seen_timestamp) + } SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -845,9 +847,9 @@ impl SyncManager { } SyncMessage::RpcError { peer_id, - request_id, + sync_request_id, error, - } => self.inject_error(peer_id, request_id, error), + } => self.inject_error(peer_id, sync_request_id, error), SyncMessage::BlockComponentProcessed { process_type, result, @@ -1018,12 +1020,12 @@ impl SyncManager { fn rpc_block_received( &mut self, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, block: Option>>, seen_timestamp: Duration, ) { - match request_id { + match sync_request_id { SyncRequestId::SingleBlock { id } => self.on_single_block_response( id, peer_id, @@ -1060,12 +1062,12 @@ impl SyncManager { fn rpc_blob_received( &mut self, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, blob: Option>>, seen_timestamp: Duration, ) { - match request_id { + match sync_request_id { SyncRequestId::SingleBlob { id } => self.on_single_blob_response( id, peer_id, @@ -1084,12 +1086,12 @@ impl SyncManager { fn rpc_data_column_received( &mut self, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, data_column: Option>>, seen_timestamp: Duration, ) { - match request_id { + match sync_request_id { SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response( req_id, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 16fcf93bcf..69b350f8cb 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -372,11 +372,11 @@ impl SyncNetworkContext { ); let request = RequestType::Status(status_message.clone()); - let request_id = AppRequestId::Router; + let app_request_id = AppRequestId::Router; let _ = self.send_network_msg(NetworkMessage::SendRequest { peer_id, request, - request_id, + app_request_id, }); } } @@ -595,7 +595,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlocksByRoot(request.into_request(&self.fork_context)), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -684,7 +684,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlobsByRoot(request.clone().into_request(&self.fork_context)), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -733,7 +733,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), })?; debug!( @@ -839,7 +839,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlocksByRange(request.clone().into()), - request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -880,7 +880,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlobsByRange(request.clone()), - request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -919,7 +919,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRange(request.clone()), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index fe72979930..3864e66e1b 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -460,7 +460,7 @@ impl TestRig { ) { self.log("parent_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, peer_id, beacon_block, seen_timestamp: D, @@ -475,7 +475,7 @@ impl TestRig { ) { self.log("single_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, peer_id, beacon_block, seen_timestamp: D, @@ -493,7 +493,7 @@ impl TestRig { blob_sidecar.as_ref().map(|b| b.index) )); self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::SingleBlob { id }, + sync_request_id: SyncRequestId::SingleBlob { id }, peer_id, blob_sidecar, seen_timestamp: D, @@ -507,7 +507,7 @@ impl TestRig { blob_sidecar: Option>>, ) { self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::SingleBlob { id }, + sync_request_id: SyncRequestId::SingleBlob { id }, peer_id, blob_sidecar, seen_timestamp: D, @@ -583,7 +583,7 @@ impl TestRig { fn parent_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { self.send_sync_message(SyncMessage::RpcError { peer_id, - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, error, }) } @@ -602,7 +602,7 @@ impl TestRig { fn single_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { self.send_sync_message(SyncMessage::RpcError { peer_id, - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, error, }) } @@ -614,11 +614,11 @@ impl TestRig { } } - fn return_empty_sampling_request(&mut self, (request_id, _): DCByRootId) { + fn return_empty_sampling_request(&mut self, (sync_request_id, _): DCByRootId) { let peer_id = PeerId::random(); // Send stream termination self.send_sync_message(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column: None, seen_timestamp: timestamp_now(), @@ -631,10 +631,10 @@ impl TestRig { peer_id: PeerId, error: RPCError, ) { - for (request_id, _) in sampling_ids { + for (sync_request_id, _) in sampling_ids { self.send_sync_message(SyncMessage::RpcError { peer_id, - request_id, + sync_request_id, error: error.clone(), }) } @@ -760,14 +760,14 @@ impl TestRig { fn complete_data_columns_by_root_request( &mut self, - (request_id, _): DCByRootId, + (sync_request_id, _): DCByRootId, data_columns: &[Arc>], ) { let peer_id = PeerId::random(); for data_column in data_columns { // Send chunks self.send_sync_message(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column: Some(data_column.clone()), seen_timestamp: timestamp_now(), @@ -775,7 +775,7 @@ impl TestRig { } // Send stream termination self.send_sync_message(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column: None, seen_timestamp: timestamp_now(), @@ -785,17 +785,17 @@ impl TestRig { /// Return RPCErrors for all active requests of peer fn rpc_error_all_active_requests(&mut self, disconnected_peer_id: PeerId) { self.drain_network_rx(); - while let Ok(request_id) = self.pop_received_network_event(|ev| match ev { + while let Ok(sync_request_id) = self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id, - request_id: AppRequestId::Sync(id), + app_request_id: AppRequestId::Sync(id), .. } if *peer_id == disconnected_peer_id => Some(*id), _ => None, }) { self.send_sync_message(SyncMessage::RpcError { peer_id: disconnected_peer_id, - request_id, + sync_request_id, error: RPCError::Disconnected, }); } @@ -879,7 +879,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlocksByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) @@ -899,7 +899,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlobsByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids .to_vec() @@ -924,7 +924,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlocksByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) @@ -946,7 +946,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlobsByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids .to_vec() @@ -974,7 +974,8 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::DataColumnsByRoot(request), - request_id: AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), + app_request_id: + AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), } if request .data_column_ids .to_vec() diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index ca4344c0b2..2871ea2a4d 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -223,7 +223,7 @@ impl TestRig { RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( OldBlocksByRangeRequestV2 { start_slot, .. }, )), - request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) @@ -240,7 +240,7 @@ impl TestRig { RequestType::DataColumnsByRange(DataColumnsByRangeRequest { start_slot, .. }), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) { @@ -256,7 +256,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id, request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot, .. }), - request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) @@ -283,7 +283,7 @@ impl TestRig { "Completing BlocksByRange request {blocks_req_id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::BlocksByRange(blocks_req_id), + sync_request_id: SyncRequestId::BlocksByRange(blocks_req_id), peer_id: block_peer, beacon_block: None, seen_timestamp: D, @@ -297,7 +297,7 @@ impl TestRig { "Completing BlobsByRange request {id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::BlobsByRange(id), + sync_request_id: SyncRequestId::BlobsByRange(id), peer_id, blob_sidecar: None, seen_timestamp: D, @@ -310,7 +310,7 @@ impl TestRig { "Completing DataColumnsByRange request {id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcDataColumn { - request_id: SyncRequestId::DataColumnsByRange(id), + sync_request_id: SyncRequestId::DataColumnsByRange(id), peer_id, data_column: None, seen_timestamp: D, From 82d16744557522eb62cf56727767985fcc3d953e Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 4 Apr 2025 13:30:22 +1100 Subject: [PATCH 07/35] Rust 1.86.0 lints (#7254) Implement lints for the new Rust compiler version 1.86.0. --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 ++-- .../beacon_chain/src/beacon_proposer_cache.rs | 2 +- .../beacon_chain/src/block_verification.rs | 2 +- .../beacon_chain/src/early_attester_cache.rs | 2 +- .../src/eth1_finalization_cache.rs | 2 +- .../tests/payload_invalidation.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 2 +- .../src/peer_manager/mod.rs | 28 +++++++++---------- .../src/peer_manager/peerdb.rs | 6 ++-- .../lighthouse_network/src/rpc/codec.rs | 4 +-- .../src/types/sync_state.rs | 4 +-- .../lighthouse_network/tests/rpc_tests.rs | 4 +-- .../gossip_methods.rs | 2 +- beacon_node/network/src/sync/manager.rs | 2 +- beacon_node/network/src/sync/tests/lookups.rs | 2 +- .../eth2_wallet_manager/src/locked_wallet.rs | 2 +- consensus/proto_array/src/proto_array.rs | 2 +- .../src/proto_array_fork_choice.rs | 2 +- .../types/src/sync_committee_contribution.rs | 4 +-- .../src/test_utils/test_random/bitfield.rs | 4 +-- crypto/bls/src/lib.rs | 2 +- testing/ef_tests/src/cases.rs | 8 +++--- testing/ef_tests/src/cases/fork_choice.rs | 2 +- .../http_api/src/tests/keystores.rs | 4 +-- validator_client/validator_store/src/lib.rs | 4 +-- 25 files changed, 52 insertions(+), 52 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 24f83179f6..624dc968ad 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -741,7 +741,7 @@ impl BeaconChain { /// /// - `slot` always increases by `1`. /// - Skipped slots contain the root of the closest prior - /// non-skipped slot (identical to the way they are stored in `state.block_roots`). + /// non-skipped slot (identical to the way they are stored in `state.block_roots`). /// - Iterator returns `(Hash256, Slot)`. /// /// Will return a `BlockOutOfRange` error if the requested start slot is before the period of @@ -805,7 +805,7 @@ impl BeaconChain { /// /// - `slot` always decreases by `1`. /// - Skipped slots contain the root of the closest prior - /// non-skipped slot (identical to the way they are stored in `state.block_roots`) . + /// non-skipped slot (identical to the way they are stored in `state.block_roots`) . /// - Iterator returns `(Hash256, Slot)`. /// - The provided `block_root` is included as the first item in the iterator. pub fn rev_iter_block_roots_from( @@ -834,7 +834,7 @@ impl BeaconChain { /// - `slot` always decreases by `1`. /// - Iterator returns `(Hash256, Slot)`. /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot - /// returned may be earlier than the wall-clock slot. + /// returned may be earlier than the wall-clock slot. pub fn rev_iter_state_roots_from<'a>( &'a self, state_root: Hash256, diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index d10bbfbbc5..567433caee 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -178,7 +178,7 @@ pub fn compute_proposer_duties_from_head( /// - Returns an error if `state.current_epoch() > target_epoch`. /// - No-op if `state.current_epoch() == target_epoch`. /// - It must be the case that `state.canonical_root() == state_root`, but this function will not -/// check that. +/// check that. pub fn ensure_state_is_in_epoch( state: &mut BeaconState, state_root: Hash256, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4a5282a1d7..48caea9c7f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -5,7 +5,7 @@ //! - Verification for gossip blocks (i.e., should we gossip some block from the network). //! - Verification for normal blocks (e.g., some block received on the RPC during a parent lookup). //! - Verification for chain segments (e.g., some chain of blocks received on the RPC during a -//! sync). +//! sync). //! //! The primary source of complexity here is that we wish to avoid doing duplicate work as a block //! moves through the verification process. For example, if some block is verified for gossip, we diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index c94ea0e941..b62554f1b4 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -33,7 +33,7 @@ pub struct CacheItem { /// /// - Produce an attestation without using `chain.canonical_head`. /// - Verify that a block root exists (i.e., will be imported in the future) during attestation -/// verification. +/// verification. /// - Provide a block which can be sent to peers via RPC. #[derive(Default)] pub struct EarlyAttesterCache { diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs index 24b6542eab..8280d15675 100644 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -469,7 +469,7 @@ pub mod tests { let last_finalized_eth1 = eth1s_by_count .range(0..(finalized_deposits + 1)) .map(|(_, eth1)| eth1) - .last() + .next_back() .cloned(); assert_eq!( eth1cache.finalize(finalized_checkpoint), diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 01b790bb25..d41c33176a 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1283,7 +1283,7 @@ impl InvalidHeadSetup { /// /// 1. A chain where the only viable head block has an invalid execution payload. /// 2. A block (`fork_block`) which will become the head of the chain when - /// it is imported. + /// it is imported. async fn new() -> InvalidHeadSetup { let slots_per_epoch = E::slots_per_epoch(); let mut rig = InvalidPayloadRig::new().enable_attestations(); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cde6cc6f48..820ec8d6b6 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1710,7 +1710,7 @@ impl ExecutionLayer { /// /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work - /// block. + /// block. /// - `None` if the `block_hash` or its parent were not present on the execution engine. /// - `Err(_)` if there was an error connecting to the execution engine. /// diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 6067d52042..baeb597676 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -992,23 +992,23 @@ impl PeerManager { /// - Do not prune outbound peers to exceed our outbound target. /// - Do not prune more peers than our target peer count. /// - If we have an option to remove a number of peers, remove ones that have the least - /// long-lived subnets. + /// long-lived subnets. /// - When pruning peers based on subnet count. If multiple peers can be chosen, choose a peer - /// that is not subscribed to a long-lived sync committee subnet. + /// that is not subscribed to a long-lived sync committee subnet. /// - When pruning peers based on subnet count, do not prune a peer that would lower us below the - /// MIN_SYNC_COMMITTEE_PEERS peer count. To keep it simple, we favour a minimum number of sync-committee-peers over - /// uniformity subnet peers. NOTE: We could apply more sophisticated logic, but the code is - /// simpler and easier to maintain if we take this approach. If we are pruning subnet peers - /// below the MIN_SYNC_COMMITTEE_PEERS and maintaining the sync committee peers, this should be - /// fine as subnet peers are more likely to be found than sync-committee-peers. Also, we're - /// in a bit of trouble anyway if we have so few peers on subnets. The - /// MIN_SYNC_COMMITTEE_PEERS - /// number should be set low as an absolute lower bound to maintain peers on the sync - /// committees. + /// MIN_SYNC_COMMITTEE_PEERS peer count. To keep it simple, we favour a minimum number of sync-committee-peers over + /// uniformity subnet peers. NOTE: We could apply more sophisticated logic, but the code is + /// simpler and easier to maintain if we take this approach. If we are pruning subnet peers + /// below the MIN_SYNC_COMMITTEE_PEERS and maintaining the sync committee peers, this should be + /// fine as subnet peers are more likely to be found than sync-committee-peers. Also, we're + /// in a bit of trouble anyway if we have so few peers on subnets. The + /// MIN_SYNC_COMMITTEE_PEERS + /// number should be set low as an absolute lower bound to maintain peers on the sync + /// committees. /// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the - /// excess peer limit, all of the following logic is subverted as we will not prune any peers. - /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage - /// its peers across the subnets. + /// excess peer limit, all of the following logic is subverted as we will not prune any peers. + /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage + /// its peers across the subnets. /// /// Prune peers in the following order: /// 1. Remove worst scoring peers diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index b692639911..0912bd1cd2 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -155,7 +155,7 @@ impl PeerDB { matches!( self.connection_status(peer_id), Some(PeerConnectionStatus::Disconnected { .. }) - | Some(PeerConnectionStatus::Unknown { .. }) + | Some(PeerConnectionStatus::Unknown) | None ) && !self.score_state_banned_or_disconnected(peer_id) } @@ -776,8 +776,8 @@ impl PeerDB { NewConnectionState::Connected { .. } // We have established a new connection (peer may not have been seen before) | NewConnectionState::Disconnecting { .. }// We are disconnecting from a peer that may not have been registered before | NewConnectionState::Dialing { .. } // We are dialing a potentially new peer - | NewConnectionState::Disconnected { .. } // Dialing a peer that responds by a different ID can be immediately - // disconnected without having being stored in the db before + | NewConnectionState::Disconnected // Dialing a peer that responds by a different ID can be immediately + // disconnected without having being stored in the db before ) { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 2bf35b0e35..838f1b8a16 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1009,7 +1009,7 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; @@ -1028,7 +1028,7 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index 0519d6f4b0..0327f7073f 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -104,8 +104,8 @@ impl std::fmt::Display for SyncState { match self { SyncState::SyncingFinalized { .. } => write!(f, "Syncing Finalized Chain"), SyncState::SyncingHead { .. } => write!(f, "Syncing Head Chain"), - SyncState::Synced { .. } => write!(f, "Synced"), - SyncState::Stalled { .. } => write!(f, "Stalled"), + SyncState::Synced => write!(f, "Synced"), + SyncState::Stalled => write!(f, "Stalled"), SyncState::SyncTransition => write!(f, "Evaluating known peers"), SyncState::BackFillSyncing { .. } => write!(f, "Syncing Historical Blocks"), } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 4b54a24ddc..80364753d7 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -25,7 +25,7 @@ type E = MinimalEthSpec; fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; @@ -40,7 +40,7 @@ fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> Beacon fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 0956c153a6..af75791e4d 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -841,7 +841,7 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::ProposerIndexMismatch { .. } | GossipDataColumnError::IsNotLaterThanParent { .. } | GossipDataColumnError::InvalidSubnetId { .. } - | GossipDataColumnError::InvalidInclusionProof { .. } + | GossipDataColumnError::InvalidInclusionProof | GossipDataColumnError::InvalidKzgProof { .. } | GossipDataColumnError::UnexpectedDataColumn | GossipDataColumnError::InvalidColumnIndex(_) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index fc31e83727..041b1dba9f 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -688,7 +688,7 @@ impl SyncManager { if new_state.is_synced() && !matches!( old_state, - SyncState::Synced { .. } | SyncState::BackFillSyncing { .. } + SyncState::Synced | SyncState::BackFillSyncing { .. } ) { self.network.subscribe_core_topics(); diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 9ab581950c..271b2322fa 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1301,7 +1301,7 @@ impl TestRig { .sync_manager .get_sampling_request_status(block_root, index) .unwrap_or_else(|| panic!("No request state for {index}")); - if !matches!(status, crate::sync::peer_sampling::Status::NoPeers { .. }) { + if !matches!(status, crate::sync::peer_sampling::Status::NoPeers) { panic!("expected {block_root} {index} request to be no peers: {status:?}"); } } diff --git a/common/eth2_wallet_manager/src/locked_wallet.rs b/common/eth2_wallet_manager/src/locked_wallet.rs index a77f9bd780..2af863a4bf 100644 --- a/common/eth2_wallet_manager/src/locked_wallet.rs +++ b/common/eth2_wallet_manager/src/locked_wallet.rs @@ -22,7 +22,7 @@ pub const LOCK_FILE: &str = ".lock"; /// /// - Control over the `.lock` file to prevent concurrent access. /// - A `next_validator` function which wraps `Wallet::next_validator`, ensuring that the wallet is -/// persisted to disk (as JSON) between each consecutive call. +/// persisted to disk (as JSON) between each consecutive call. pub struct LockedWallet { wallet_dir: PathBuf, wallet: Wallet, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 5d0bee4c85..cf6ebb3b00 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -760,7 +760,7 @@ impl ProtoArray { /// /// - The child is already the best child but it's now invalid due to a FFG change and should be removed. /// - The child is already the best child and the parent is updated with the new - /// best-descendant. + /// best-descendant. /// - The child is not the best child but becomes the best child. /// - The child is not the best child and does not become the best child. fn maybe_update_best_child_and_descendant( diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 88d4660311..4da632bf58 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1121,7 +1121,7 @@ mod test_compute_deltas { /// /// - `A` (slot 31) is the common descendant. /// - `B` (slot 33) descends from `A`, but there is a single skip slot - /// between it and `A`. + /// between it and `A`. /// - `C` (slot 32) descends from `A` and conflicts with `B`. /// /// Imagine that the `B` chain is finalized at epoch 1. This means that the diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index 9bae770fe5..090e16fc6d 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -41,8 +41,8 @@ impl SyncCommitteeContribution { /// /// - `message`: A single `SyncCommitteeMessage`. /// - `subcommittee_index`: The subcommittee this contribution pertains to out of the broader - /// sync committee. This can be determined from the `SyncSubnetId` of the gossip subnet - /// this message was seen on. + /// sync committee. This can be determined from the `SyncSubnetId` of the gossip subnet + /// this message was seen on. /// - `validator_sync_committee_index`: The index of the validator **within** the subcommittee. pub fn from_message( message: &SyncCommitteeMessage, diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 35176d389d..e335ac7fe8 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -3,7 +3,7 @@ use smallvec::smallvec; impl TestRandom for BitList { fn random_for_test(rng: &mut impl RngCore) -> Self { - let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8); + let initial_len = std::cmp::max(1, N::to_usize().div_ceil(8)); let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); @@ -24,7 +24,7 @@ impl TestRandom for BitList { impl TestRandom for BitVector { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let mut raw_bytes = smallvec![0; std::cmp::max(1, N::to_usize().div_ceil(8))]; rng.fill_bytes(&mut raw_bytes); // If N isn't divisible by 8 // zero out bits greater than N diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 6ea85548c0..d05b34f989 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -10,7 +10,7 @@ //! //! - `supranational`: the pure-assembly, highly optimized version from the `blst` crate. //! - `fake_crypto`: an always-returns-valid implementation that is only useful for testing -//! scenarios which intend to *ignore* real cryptography. +//! scenarios which intend to *ignore* real cryptography. //! //! This crate uses traits to reduce code-duplication between the two implementations. For example, //! the `GenericPublicKey` struct exported from this crate is generic across the `TPublicKey` trait diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 4a202ee3d2..31662e831a 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -84,11 +84,11 @@ pub use transition::TransitionTest; /// /// The feature tests can be run with one of the following methods: /// 1. `handler.run_for_feature(feature_name)` for new tests that are not on existing fork, i.e. a -/// new handler. This will be temporary and the test will need to be updated to use -/// `handle.run()` once the feature is incorporated into a fork. +/// new handler. This will be temporary and the test will need to be updated to use +/// `handle.run()` once the feature is incorporated into a fork. /// 2. `handler.run()` for tests that are already on existing forks, but with new test vectors for -/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented -/// to return `true` for the feature in order for the feature test vector to be tested. +/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented +/// to return `true` for the feature in order for the feature test vector to be tested. #[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { // TODO(fulu): to be removed once we start using Fulu types for test vectors. diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 05804d7e36..c3835f425e 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -143,7 +143,7 @@ impl LoadCase for ForkChoiceTest { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let description = path .iter() - .last() + .next_back() .expect("path must be non-empty") .to_str() .expect("path must be valid OsStr") diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 6559a2bb9e..13494e5fa6 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -92,7 +92,7 @@ fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes { } fn all_with_status(count: usize, status: T) -> impl Iterator { - std::iter::repeat(status).take(count) + std::iter::repeat_n(status, count) } fn all_imported(count: usize) -> impl Iterator { @@ -1059,7 +1059,7 @@ async fn migrate_some_extra_slashing_protection() { /// - `first_vc_attestations`: attestations to sign on the first VC as `(validator_idx, att)` /// - `delete_indices`: validators to delete from the first VC /// - `slashing_protection_indices`: validators to transfer slashing protection data for. It should -/// be a subset of `delete_indices` or the test will panic. +/// be a subset of `delete_indices` or the test will panic. /// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`. /// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool /// indicates whether the signing should be successful. diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 5bd9ffd8b2..5114000325 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -265,9 +265,9 @@ impl ValidatorStore { /// are two primary functions used here: /// /// - `DoppelgangerStatus::only_safe`: only returns pubkeys which have passed doppelganger - /// protection and are safe-enough to sign messages. + /// protection and are safe-enough to sign messages. /// - `DoppelgangerStatus::ignored`: returns all the pubkeys from `only_safe` *plus* those still - /// undergoing protection. This is useful for collecting duties or other non-signing tasks. + /// undergoing protection. This is useful for collecting duties or other non-signing tasks. #[allow(clippy::needless_collect)] // Collect is required to avoid holding a lock. pub fn voting_pubkeys(&self, filter_func: F) -> I where From 57abffcd997fc8842f6d357878c1ec23f89a2d3d Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 4 Apr 2025 17:14:04 +1100 Subject: [PATCH 08/35] Disable log color when running in non-interactive mode (#7240) #7226 Checks whether the application is running in a terminal, or in non-interactive mode (e.g. using systemd). It will then set the value of `--log-color` to `false` when running non-interactively. --- lighthouse/src/main.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 60e65e6470..66dae05326 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -20,6 +20,7 @@ use lighthouse_version::VERSION; use logging::{build_workspace_filter, crit, MetricsLayer}; use malloc_utils::configure_memory_allocator; use std::backtrace::Backtrace; +use std::io::IsTerminal; use std::path::PathBuf; use std::process::exit; use std::sync::LazyLock; @@ -521,10 +522,15 @@ fn run( let log_format = matches.get_one::("log-format"); - let log_color = matches - .get_one::("log-color") - .copied() - .unwrap_or(true); + let log_color = if std::io::stdin().is_terminal() { + matches + .get_one::("log-color") + .copied() + .unwrap_or(true) + } else { + // Disable color when in non-interactive mode. + false + }; let logfile_color = matches.get_flag("logfile-color"); From 6a75f24ab13e5659a7380bc93c1a3c7fc2c7012e Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 4 Apr 2025 20:01:39 +1100 Subject: [PATCH 09/35] Fix the `getBlobs` metric and ensure it is recorded promptly to prevent miscounts (#7188) From testing conducted by Sunnyside Labs, they noticed that the "expected blobs" are quite low on bandwidth constrained nodes. This observation revealed that we don't record the `beacon_blobs_from_el_expected_total` metric at all if the EL doesn't return any response. The fetch blobs function returns without recording the metric. To fix this, I've moved `BLOBS_FROM_EL_EXPECTED_TOTAL` and `BLOBS_FROM_EL_RECEIVED_TOTAL` to as early as possible, to make the metric more accurate. --- beacon_node/beacon_chain/src/fetch_blobs.rs | 25 ++++++++------------- beacon_node/beacon_chain/src/metrics.rs | 21 ++++++++++++----- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index ceb563ffc2..3c28ac9a44 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -13,7 +13,7 @@ use crate::observed_data_sidecars::DoNotObserve; use crate::{metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError}; use execution_layer::json_structures::BlobAndProofV1; use execution_layer::Error as ExecutionLayerError; -use metrics::{inc_counter, inc_counter_by, TryExt}; +use metrics::{inc_counter, TryExt}; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::sync::Arc; @@ -73,13 +73,20 @@ pub async fn fetch_and_process_engine_blobs( .as_ref() .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); debug!(num_expected_blobs, "Fetching blobs from the EL"); let response = execution_layer .get_blobs(versioned_hashes) .await + .inspect_err(|_| { + inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); + }) .map_err(FetchEngineBlobError::RequestFailed)?; - if response.is_empty() || response.iter().all(|opt| opt.is_none()) { + let num_fetched_blobs = response.iter().filter(|opt| opt.is_some()).count(); + metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64); + + if num_fetched_blobs == 0 { debug!(num_expected_blobs, "No blobs fetched from the EL"); inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); return Ok(None); @@ -99,20 +106,6 @@ pub async fn fetch_and_process_engine_blobs( &chain.spec, )?; - let num_fetched_blobs = fixed_blob_sidecar_list - .iter() - .filter(|b| b.is_some()) - .count(); - - inc_counter_by( - &metrics::BLOBS_FROM_EL_EXPECTED_TOTAL, - num_expected_blobs as u64, - ); - inc_counter_by( - &metrics::BLOBS_FROM_EL_RECEIVED_TOTAL, - num_fetched_blobs as u64, - ); - // Gossip verify blobs before publishing. This prevents blobs with invalid KZG proofs from // the EL making it into the data availability checker. We do not immediately add these // blobs to the observed blobs/columns cache because we want to allow blobs/columns to arrive on gossip diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index d1c7a2a5df..463319a1f5 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1662,28 +1662,37 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = LazyLock::new(|| { try_create_int_counter( "beacon_blobs_from_el_hit_total", - "Number of blob batches fetched from the execution layer", + "Number of non-empty blob batches fetched from the execution layer", ) }); pub static BLOBS_FROM_EL_MISS_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( "beacon_blobs_from_el_miss_total", - "Number of blob batches failed to fetch from the execution layer", + "Number of empty blob responses from the execution layer", ) }); -pub static BLOBS_FROM_EL_EXPECTED_TOTAL: LazyLock> = LazyLock::new(|| { +pub static BLOBS_FROM_EL_ERROR_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_blobs_from_el_expected_total", + "beacon_blobs_from_el_error_total", + "Number of failed blob fetches from the execution layer", + ) +}); + +pub static BLOBS_FROM_EL_EXPECTED: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_blobs_from_el_expected", "Number of blobs expected from the execution layer", + Ok(vec![0.0, 3.0, 6.0, 9.0, 12.0, 18.0, 24.0, 30.0]), ) }); -pub static BLOBS_FROM_EL_RECEIVED_TOTAL: LazyLock> = LazyLock::new(|| { - try_create_int_counter( +pub static BLOBS_FROM_EL_RECEIVED: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_blobs_from_el_received_total", "Number of blobs fetched from the execution layer", + linear_buckets(0.0, 4.0, 20), ) }); From 7cc64cab8352b9f8f82076f5f71fc7a7a08e3376 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 4 Apr 2025 20:01:42 +1100 Subject: [PATCH 10/35] Add missing error log and remove redundant id field from lookup logs (#6990) Partially #6989. This PR adds the missing error log when a batch fails due to issues with converting the response into `RpcBlock`. See the above linked issue for more details. Adding this log reveals that we're completing range requests with missing columns, hence causing the batch to fail. It looks like we've hit the case where we've received enough stream terminations, but not all columns are returned. ``` Feb 12 06:12:16.558 DEBG Failed to convert range block components into RpcBlock, error: No column for block 0xc5b6c7fa02f5ef603d45819c08c6519f1dba661fd5d44a2fc849d3e7028b6007 index 18, id: 3456/RangeSync/116/3432, service: sync, module: network::sync::network_context:488 ``` I've also removed some redundant `id` logging, as the `id` debug representation is difficult to read, and is now being logged as part of `req_id` in a more succinct format (relevant PR: #6914) --- beacon_node/network/src/sync/network_context/custody.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index 018381a850..e7e6e62349 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -102,7 +102,6 @@ impl ActiveCustodyRequest { ) -> CustodyRequestResult { let Some(batch_request) = self.active_batch_columns_requests.get_mut(&req_id) else { warn!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, "Received custody column response for unrequested index" @@ -113,7 +112,6 @@ impl ActiveCustodyRequest { match resp { Ok((data_columns, seen_timestamp)) => { debug!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, %peer_id, @@ -161,7 +159,6 @@ impl ActiveCustodyRequest { if !missing_column_indexes.is_empty() { // Note: Batch logging that columns are missing to not spam logger debug!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, %peer_id, @@ -175,7 +172,6 @@ impl ActiveCustodyRequest { } Err(err) => { debug!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, %peer_id, From 5dd998a6163c46b24709b18e4f06c2a14c13c1d0 Mon Sep 17 00:00:00 2001 From: Mac L Date: Sat, 5 Apr 2025 21:37:42 +1100 Subject: [PATCH 11/35] Bump openssl to fix cargo audit failure (#7263) `cargo audit` failure -> https://rustsec.org/advisories/RUSTSEC-2025-0022 Bump `openssl` version --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1a284ab65..746cac9c14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6336,9 +6336,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.70" +version = "0.10.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" +checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ "bitflags 2.8.0", "cfg-if", @@ -6377,9 +6377,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.105" +version = "0.9.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" +checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" dependencies = [ "cc", "libc", From e77fb01a063cc4de1dd33a881de2174487a680e6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sun, 6 Apr 2025 19:08:59 +1000 Subject: [PATCH 12/35] Remove CLI conflict for secrets-dir and datadir (#7265) Redo this PR: - https://github.com/sigp/lighthouse/pull/5480 After a regression during the switch to `clap_derive`. - https://github.com/sigp/lighthouse/pull/6300 - Remove `conflicts_with` - Add test to prevent future regression --- lighthouse/tests/validator_client.rs | 16 ++++++++++++++++ validator_client/src/cli.rs | 1 - 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index eccd97d486..b9edeceaee 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -70,6 +70,22 @@ fn validators_and_secrets_dir_flags() { }); } +#[test] +fn datadir_and_secrets_dir_flags() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("datadir", dir.path().join("data").to_str()) + .flag("secrets-dir", dir.path().join("secrets").to_str()) + .run_with_no_datadir() + .with_config(|config| { + assert_eq!( + config.validator_dir, + dir.path().join("data").join("validators") + ); + assert_eq!(config.secrets_dir, dir.path().join("secrets")); + }); +} + #[test] fn validators_dir_alias_flags() { let dir = TempDir::new().expect("Unable to create temporary directory"); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 18bd736957..3dd138619b 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -67,7 +67,6 @@ pub struct ValidatorClient { #[clap( long, value_name = "SECRETS_DIRECTORY", - conflicts_with = "datadir", help = "The directory which contains the password to unlock the validator \ voting keypairs. Each password should be contained in a file where the \ name is the 0x-prefixed hex representation of the validators voting public \ From b5d40e3db06d2dd5fb604cb725544711d8c2536e Mon Sep 17 00:00:00 2001 From: ThreeHrSleep <151536303+ThreeHrSleep@users.noreply.github.com> Date: Mon, 7 Apr 2025 07:08:15 +0530 Subject: [PATCH 13/35] Align logs (#7256) https://github.com/sigp/lighthouse/issues/7249 --- common/logging/src/tracing_logging_layer.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index 810f7e960e..c3784a8f62 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -13,6 +13,9 @@ use tracing_subscriber::layer::Context; use tracing_subscriber::registry::LookupSpan; use tracing_subscriber::Layer; +const FIXED_MESSAGE_WIDTH: usize = 44; +const ALIGNED_LEVEL_WIDTH: usize = 5; + pub struct LoggingLayer { pub non_blocking_writer: NonBlocking, _guard: WorkerGuard, @@ -368,13 +371,18 @@ fn build_log_text<'a, S>( } } - let level_str = if use_color { - color_level_str + let pad = if plain_level_str.len() < ALIGNED_LEVEL_WIDTH { + " " } else { - plain_level_str + "" + }; + + let level_str = if use_color { + format!("{}{}", color_level_str, pad) + } else { + format!("{}{}", plain_level_str, pad) }; - let fixed_message_width = 44; let message_len = visitor.message.len(); let message_content = if use_color { @@ -383,7 +391,7 @@ fn build_log_text<'a, S>( visitor.message.clone() }; - let padded_message = if message_len < fixed_message_width { + let padded_message = if message_len < FIXED_MESSAGE_WIDTH { let extra_color_len = if use_color { bold_start.len() + bold_end.len() } else { @@ -392,7 +400,7 @@ fn build_log_text<'a, S>( format!( "{: Date: Mon, 7 Apr 2025 00:16:41 -0300 Subject: [PATCH 14/35] Compute roots for unfinalized by_range requests with fork-choice (#7098) Includes PRs - https://github.com/sigp/lighthouse/pull/7058 - https://github.com/sigp/lighthouse/pull/7066 Cleaner for the `release-v7.0.0` branch --- beacon_node/beacon_chain/src/beacon_chain.rs | 25 ++ beacon_node/http_api/Cargo.toml | 1 + beacon_node/network/src/metrics.rs | 9 + .../network_beacon_processor/rpc_methods.rs | 361 ++++++++---------- .../src/proto_array_fork_choice.rs | 10 +- 5 files changed, 209 insertions(+), 197 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 624dc968ad..42e6deaf16 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7344,6 +7344,31 @@ impl BeaconChain { Ok(None) } + + /// Retrieves block roots (in ascending slot order) within some slot range from fork choice. + pub fn block_roots_from_fork_choice(&self, start_slot: u64, count: u64) -> Vec { + let head_block_root = self.canonical_head.cached_head().head_block_root(); + let fork_choice_read_lock = self.canonical_head.fork_choice_read_lock(); + let block_roots_iter = fork_choice_read_lock + .proto_array() + .iter_block_roots(&head_block_root); + let end_slot = start_slot.saturating_add(count); + let mut roots = vec![]; + + for (root, slot) in block_roots_iter { + if slot < end_slot && slot >= start_slot { + roots.push(root); + } + if slot < start_slot { + break; + } + } + + drop(fork_choice_read_lock); + // return in ascending slot order + roots.reverse(); + roots + } } impl Drop for BeaconChain { diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 2fb3ec06bf..7861f03000 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -28,6 +28,7 @@ metrics = { workspace = true } network = { workspace = true } operation_pool = { workspace = true } parking_lot = { workspace = true } +proto_array = { workspace = true } rand = { workspace = true } safe_arith = { workspace = true } sensitive_url = { workspace = true } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 154a59eade..92b3349577 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -87,6 +87,15 @@ pub static BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec_with_buckets( + "beacon_processor_get_block_roots_time_seconds", + "Time to complete get_block_roots when serving by_range requests", + decimal_buckets(-3, -1), + &["source"], + ) + }); /* * Gossip processor diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 857fc266da..dec28eeb72 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -1,9 +1,10 @@ +use crate::metrics; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; -use itertools::process_results; +use itertools::{process_results, Itertools}; use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, @@ -675,86 +676,49 @@ impl NetworkBeaconProcessor { request_id: RequestId, req: BlocksByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { + let req_start_slot = *req.start_slot(); + let req_count = *req.count(); + debug!(self.log, "Received BlocksByRange Request"; "peer_id" => %peer_id, - "count" => req.count(), - "start_slot" => req.start_slot(), + "start_slot" => req_start_slot, + "count" => req_count, ); - let forwards_block_root_iter = match self - .chain - .forwards_iter_block_roots(Slot::from(*req.start_slot())) - { - Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockOutOfRange { - slot, - oldest_block_slot, - }) => { - debug!(self.log, "Range request failed during backfill"; - "requested_slot" => slot, - "oldest_known_slot" => oldest_block_slot - ); - return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); - } - Err(e) => { - error!(self.log, "Unable to obtain root iter"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // Pick out the required blocks, ignoring skip-slots. - let mut last_block_root = None; - let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| { - slot.as_u64() < req.start_slot().saturating_add(*req.count()) - }) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() - }); - - let block_roots = match maybe_block_roots { - Ok(block_roots) => block_roots, - Err(e) => { - error!(self.log, "Error during iteration over blocks"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e - ); - return Err((RpcErrorResponse::ServerError, "Iteration error")); - } - }; - - // remove all skip slots - let block_roots = block_roots.into_iter().flatten().collect::>(); + // Spawn a blocking handle since get_block_roots_for_slot_range takes a sync lock on the + // fork-choice. + let network_beacon_processor = self.clone(); + let block_roots = self + .executor + .spawn_blocking_handle( + move || { + network_beacon_processor.get_block_roots_for_slot_range( + req_start_slot, + req_count, + "BlocksByRange", + ) + }, + "get_block_roots_for_slot_range", + ) + .ok_or((RpcErrorResponse::ServerError, "shutting down"))? + .await + .map_err(|_| (RpcErrorResponse::ServerError, "tokio join"))??; let current_slot = self .chain .slot() .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - let log_results = |req: BlocksByRangeRequest, peer_id, blocks_sent| { - if blocks_sent < (*req.count() as usize) { + let log_results = |peer_id, blocks_sent| { + if blocks_sent < (req_count as usize) { debug!( self.log, "BlocksByRange outgoing response processed"; "peer" => %peer_id, "msg" => "Failed to return all requested blocks", - "start_slot" => req.start_slot(), + "start_slot" => req_start_slot, "current_slot" => current_slot, - "requested" => req.count(), + "requested" => req_count, "returned" => blocks_sent ); } else { @@ -762,9 +726,9 @@ impl NetworkBeaconProcessor { self.log, "BlocksByRange outgoing response processed"; "peer" => %peer_id, - "start_slot" => req.start_slot(), + "start_slot" => req_start_slot, "current_slot" => current_slot, - "requested" => req.count(), + "requested" => req_count, "returned" => blocks_sent ); } @@ -785,8 +749,7 @@ impl NetworkBeaconProcessor { Ok(Some(block)) => { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending - if block.slot() >= *req.start_slot() - && block.slot() < req.start_slot() + req.count() + if block.slot() >= req_start_slot && block.slot() < req_start_slot + req.count() { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { @@ -805,7 +768,7 @@ impl NetworkBeaconProcessor { "peer" => %peer_id, "request_root" => ?root ); - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); return Err((RpcErrorResponse::ServerError, "Database inconsistency")); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { @@ -815,7 +778,7 @@ impl NetworkBeaconProcessor { "block_root" => ?root, "reason" => "execution layer not synced", ); - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); // send the stream terminator return Err(( RpcErrorResponse::ResourceUnavailable, @@ -843,17 +806,142 @@ impl NetworkBeaconProcessor { "error" => ?e ); } - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); // send the stream terminator return Err((RpcErrorResponse::ServerError, "Failed fetching blocks")); } } } - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); Ok(()) } + fn get_block_roots_for_slot_range( + &self, + req_start_slot: u64, + req_count: u64, + req_type: &str, + ) -> Result, (RpcErrorResponse, &'static str)> { + let start_time = std::time::Instant::now(); + let finalized_slot = self + .chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + let (block_roots, source) = if req_start_slot >= finalized_slot.as_u64() { + // If the entire requested range is after finalization, use fork_choice + ( + self.chain + .block_roots_from_fork_choice(req_start_slot, req_count), + "fork_choice", + ) + } else if req_start_slot + req_count <= finalized_slot.as_u64() { + // If the entire requested range is before finalization, use store + ( + self.get_block_roots_from_store(req_start_slot, req_count)?, + "store", + ) + } else { + // Split the request at the finalization boundary + let count_from_store = finalized_slot.as_u64() - req_start_slot; + let count_from_fork_choice = req_count - count_from_store; + let start_slot_fork_choice = finalized_slot.as_u64(); + + // Get roots from store (up to and including finalized slot) + let mut roots_from_store = + self.get_block_roots_from_store(req_start_slot, count_from_store)?; + + // Get roots from fork choice (after finalized slot) + let roots_from_fork_choice = self + .chain + .block_roots_from_fork_choice(start_slot_fork_choice, count_from_fork_choice); + + roots_from_store.extend(roots_from_fork_choice); + + (roots_from_store, "mixed") + }; + + let elapsed = start_time.elapsed(); + metrics::observe_timer_vec( + &metrics::BEACON_PROCESSOR_GET_BLOCK_ROOTS_TIME, + &[source], + elapsed, + ); + + debug!( + self.log, + "Range request block roots retrieved"; + "req_type" => req_type, + "start_slot" => req_start_slot, + "req_count" => req_count, + "roots_count" => block_roots.len(), + "source" => source, + "elapsed" => ?elapsed, + "finalized_slot" => finalized_slot + ); + + Ok(block_roots) + } + + /// Get block roots for a `BlocksByRangeRequest` from the store using roots iterator. + fn get_block_roots_from_store( + &self, + start_slot: u64, + count: u64, + ) -> Result, (RpcErrorResponse, &'static str)> { + let forwards_block_root_iter = + match self.chain.forwards_iter_block_roots(Slot::from(start_slot)) { + Ok(iter) => iter, + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { + debug!(self.log, "Range request failed during backfill"; + "requested_slot" => slot, + "oldest_known_slot" => oldest_block_slot + ); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); + } + Err(e) => { + error!(self.log, "Unable to obtain root iter for range request"; + "start_slot" => start_slot, + "count" => count, + "error" => ?e + ); + return Err((RpcErrorResponse::ServerError, "Database error")); + } + }; + + // Pick out the required blocks, ignoring skip-slots. + let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + iter.take_while(|(_, slot)| slot.as_u64() < start_slot.saturating_add(count)) + .collect::>() + }); + + let block_roots = match maybe_block_roots { + Ok(block_roots) => block_roots, + Err(e) => { + error!(self.log, "Error during iteration over blocks for range request"; + "start_slot" => start_slot, + "count" => count, + "error" => ?e + ); + return Err((RpcErrorResponse::ServerError, "Iteration error")); + } + }; + + // remove all skip slots i.e. duplicated roots + Ok(block_roots + .into_iter() + .map(|(root, _)| root) + .unique() + .collect::>()) + } + /// Handle a `BlobsByRange` request from the peer. pub fn handle_blobs_by_range_request( self: Arc, @@ -932,65 +1020,8 @@ impl NetworkBeaconProcessor { }; } - let forwards_block_root_iter = - match self.chain.forwards_iter_block_roots(request_start_slot) { - Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockOutOfRange { - slot, - oldest_block_slot, - }) => { - debug!(self.log, "Range request failed during backfill"; - "requested_slot" => slot, - "oldest_known_slot" => oldest_block_slot - ); - return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); - } - Err(e) => { - error!(self.log, "Unable to obtain root iter"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to - // `request_start_slot` in order to check whether the `request_start_slot` is a skip. - let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { - self.chain - .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) - .ok() - .flatten() - }); - - // Pick out the required blocks, ignoring skip-slots. - let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() - }); - - let block_roots = match maybe_block_roots { - Ok(block_roots) => block_roots, - Err(e) => { - error!(self.log, "Error during iteration over blocks"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; + let block_roots = + self.get_block_roots_for_slot_range(req.start_slot, req.count, "BlobsByRange")?; let current_slot = self .chain @@ -1009,8 +1040,6 @@ impl NetworkBeaconProcessor { ); }; - // remove all skip slots - let block_roots = block_roots.into_iter().flatten(); let mut blobs_sent = 0; for root in block_roots { @@ -1136,68 +1165,8 @@ impl NetworkBeaconProcessor { }; } - let forwards_block_root_iter = - match self.chain.forwards_iter_block_roots(request_start_slot) { - Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockOutOfRange { - slot, - oldest_block_slot, - }) => { - debug!(self.log, "Range request failed during backfill"; - "requested_slot" => slot, - "oldest_known_slot" => oldest_block_slot - ); - return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); - } - Err(e) => { - error!(self.log, "Unable to obtain root iter"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to - // `request_start_slot` in order to check whether the `request_start_slot` is a skip. - let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { - self.chain - .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) - .ok() - .flatten() - }); - - // Pick out the required blocks, ignoring skip-slots. - let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() - }); - - let block_roots = match maybe_block_roots { - Ok(block_roots) => block_roots, - Err(e) => { - error!(self.log, "Error during iteration over blocks"; - "request" => ?req, - "peer" => %peer_id, - "error" => ?e - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // remove all skip slots - let block_roots = block_roots.into_iter().flatten(); + let block_roots = + self.get_block_roots_for_slot_range(req.start_slot, req.count, "DataColumnsByRange")?; let mut data_columns_sent = 0; for root in block_roots { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 4da632bf58..e7e6b54f1d 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -856,10 +856,18 @@ impl ProtoArrayForkChoice { } /// See `ProtoArray::iter_nodes` - pub fn iter_nodes<'a>(&'a self, block_root: &Hash256) -> Iter<'a> { + pub fn iter_nodes(&self, block_root: &Hash256) -> Iter { self.proto_array.iter_nodes(block_root) } + /// See `ProtoArray::iter_block_roots` + pub fn iter_block_roots( + &self, + block_root: &Hash256, + ) -> impl Iterator + use<'_> { + self.proto_array.iter_block_roots(block_root) + } + pub fn as_bytes(&self) -> Vec { SszContainer::from(self).as_ssz_bytes() } From 091e292c9928b341bda53b8331f560bbc62a5060 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sun, 6 Apr 2025 20:16:48 -0700 Subject: [PATCH 15/35] Return eth1_data early post transition (#7248) N/A Return state.eth1_data() early if we have passed the transition period post electra. Even if we don't return early, the function would still return state.eth1_data() based on the current conditions. However, doing this explicitly here to match the spec. This covers setting the right eth1_data in our block. The other thing we need to ensure is that the deposits returned by the eth1_chain is empty post transition. The only way we get non-empty deposits post the transition is if `state.eth1_deposit_index` in the below code is less than `min(deposit_requests_start_index, state.eth1_data().deposit_count)`. https://github.com/sigp/lighthouse/blob/0850bcfb89d1048030c1aced795f3d43d91abeb0/beacon_node/beacon_chain/src/eth1_chain.rs#L543-L579 This can never happen because state.eth1_deposit_index will be equal to state.eth1_data.deposit count and cannot exceed the value. @michaelsproul @ethDreamer please double check the logic for deposits being empty post transition. Following the logic in the spec makes my head hurt. --- beacon_node/beacon_chain/src/eth1_chain.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index ad4f106517..7ff2de9548 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -369,6 +369,12 @@ pub struct DummyEth1ChainBackend(PhantomData); impl Eth1ChainBackend for DummyEth1ChainBackend { /// Produce some deterministic junk based upon the current epoch. fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { + // [New in Electra:EIP6110] + if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { + if state.eth1_deposit_index() == deposit_requests_start_index { + return Ok(state.eth1_data().clone()); + } + } let current_epoch = state.current_epoch(); let slots_per_voting_period = E::slots_per_eth1_voting_period() as u64; let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; @@ -467,6 +473,12 @@ impl CachingEth1Backend { impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { + // [New in Electra:EIP6110] + if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { + if state.eth1_deposit_index() == deposit_requests_start_index { + return Ok(state.eth1_data().clone()); + } + } let period = E::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; let voting_period_start_seconds = slot_start_seconds( From 70850fe58d5676d9072d418f04f4db7257c1513b Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 7 Apr 2025 01:23:52 -0300 Subject: [PATCH 16/35] Drop head tracker for summaries DAG (#6744) The head tracker is a persisted piece of state that must be kept in sync with the fork-choice. It has been a source of pruning issues in the past, so we want to remove it - see https://github.com/sigp/lighthouse/issues/1785 When implementing tree-states in the hot DB we have to change the pruning routine (more details below) so we want to do those changes first in isolation. - see https://github.com/sigp/lighthouse/issues/6580 - If you want to see the full feature of tree-states hot https://github.com/dapplion/lighthouse/pull/39 Closes https://github.com/sigp/lighthouse/issues/1785 **Current DB migration routine** - Locate abandoned heads with head tracker - Use a roots iterator to collect the ancestors of those heads can be pruned - Delete those abandoned blocks / states - Migrate the newly finalized chain to the freezer In summary, it computes what it has to delete and keeps the rest. Then it migrates data to the freezer. If the abandoned forks routine has a bug it can break the freezer migration. **Proposed migration routine (this PR)** - Migrate the newly finalized chain to the freezer - Load all state summaries from disk - From those, just knowing the head and finalized block compute two sets: (1) descendants of finalized (2) newly finalized chain - Iterate all summaries, if a summary does not belong to set (1) or (2), delete This strategy is more sound as it just checks what's there in the hot DB, computes what it has to keep and deletes the rest. Because it does not rely and 3rd pieces of data we can drop the head tracker and pruning checkpoint. Since the DB migration happens **first** now, as long as the computation of the sets to keep is correct we won't have pruning issues. --- beacon_node/beacon_chain/src/beacon_chain.rs | 92 +-- .../beacon_chain/src/block_verification.rs | 44 +- .../src/block_verification_types.rs | 2 - beacon_node/beacon_chain/src/builder.rs | 18 +- .../beacon_chain/src/canonical_head.rs | 13 +- .../overflow_lru_cache.rs | 2 - .../state_lru_cache.rs | 10 +- beacon_node/beacon_chain/src/head_tracker.rs | 214 ------- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/metrics.rs | 6 - beacon_node/beacon_chain/src/migrate.rs | 575 +++++++++--------- .../src/persisted_beacon_chain.rs | 13 - beacon_node/beacon_chain/src/schema_change.rs | 9 + .../src/schema_change/migration_schema_v23.rs | 147 +++++ .../beacon_chain/src/state_advance_timer.rs | 22 +- beacon_node/beacon_chain/src/summaries_dag.rs | 464 ++++++++++++++ beacon_node/beacon_chain/src/test_utils.rs | 22 + .../tests/payload_invalidation.rs | 4 +- beacon_node/beacon_chain/tests/store_tests.rs | 113 ++-- .../store/src/database/leveldb_impl.rs | 1 - beacon_node/store/src/errors.rs | 2 +- beacon_node/store/src/garbage_collection.rs | 36 -- beacon_node/store/src/hot_cold_store.rs | 251 ++------ beacon_node/store/src/lib.rs | 9 +- beacon_node/store/src/metadata.rs | 2 +- consensus/proto_array/src/proto_array.rs | 15 + .../src/proto_array_fork_choice.rs | 5 + 27 files changed, 1110 insertions(+), 983 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/head_tracker.rs create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs create mode 100644 beacon_node/beacon_chain/src/summaries_dag.rs delete mode 100644 beacon_node/store/src/garbage_collection.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 668da4f0fa..d9ac2fa6ea 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -33,7 +33,6 @@ use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; -use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; use crate::kzg_utils::reconstruct_blobs; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, @@ -57,7 +56,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; -use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; +use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -454,8 +453,6 @@ pub struct BeaconChain { /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, - /// Used to track the heads of the beacon chain. - pub(crate) head_tracker: Arc, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: RwLock, /// A cache of eth1 deposit data at epoch boundaries for deposit finalization @@ -607,57 +604,13 @@ impl BeaconChain { }) } - /// Persists the head tracker and fork choice. + /// Return a database operation for writing the `PersistedBeaconChain` to disk. /// - /// We do it atomically even though no guarantees need to be made about blocks from - /// the head tracker also being present in fork choice. - pub fn persist_head_and_fork_choice(&self) -> Result<(), Error> { - let mut batch = vec![]; - - let _head_timer = metrics::start_timer(&metrics::PERSIST_HEAD); - - // Hold a lock to head_tracker until it has been persisted to disk. Otherwise there's a race - // condition with the pruning thread which can result in a block present in the head tracker - // but absent in the DB. This inconsistency halts pruning and dramastically increases disk - // size. Ref: https://github.com/sigp/lighthouse/issues/4773 - let head_tracker = self.head_tracker.0.read(); - batch.push(self.persist_head_in_batch(&head_tracker)); - - let _fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE); - batch.push(self.persist_fork_choice_in_batch()); - - self.store.hot_db.do_atomically(batch)?; - drop(head_tracker); - - Ok(()) - } - - /// Return a `PersistedBeaconChain` without reference to a `BeaconChain`. - pub fn make_persisted_head( - genesis_block_root: Hash256, - head_tracker_reader: &HeadTrackerReader, - ) -> PersistedBeaconChain { - PersistedBeaconChain { - _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, - genesis_block_root, - ssz_head_tracker: SszHeadTracker::from_map(head_tracker_reader), - } - } - - /// Return a database operation for writing the beacon chain head to disk. - pub fn persist_head_in_batch( - &self, - head_tracker_reader: &HeadTrackerReader, - ) -> KeyValueStoreOp { - Self::persist_head_in_batch_standalone(self.genesis_block_root, head_tracker_reader) - } - - pub fn persist_head_in_batch_standalone( - genesis_block_root: Hash256, - head_tracker_reader: &HeadTrackerReader, - ) -> KeyValueStoreOp { - Self::make_persisted_head(genesis_block_root, head_tracker_reader) - .as_kv_store_op(BEACON_CHAIN_DB_KEY) + /// These days the `PersistedBeaconChain` is only used to store the genesis block root, so it + /// should only ever be written once at startup. It used to be written more frequently, but + /// this is no longer necessary. + pub fn persist_head_in_batch_standalone(genesis_block_root: Hash256) -> KeyValueStoreOp { + PersistedBeaconChain { genesis_block_root }.as_kv_store_op(BEACON_CHAIN_DB_KEY) } /// Load fork choice from disk, returning `None` if it isn't found. @@ -1450,12 +1403,13 @@ impl BeaconChain { /// /// Returns `(block_root, block_slot)`. pub fn heads(&self) -> Vec<(Hash256, Slot)> { - self.head_tracker.heads() - } - - /// Only used in tests. - pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { - self.head_tracker.contains_head((*block_hash).into()) + self.canonical_head + .fork_choice_read_lock() + .proto_array() + .heads_descended_from_finalization::() + .iter() + .map(|node| (node.root, node.slot)) + .collect() } /// Returns the `BeaconState` at the given slot. @@ -1735,8 +1689,6 @@ impl BeaconChain { let notif = ManualFinalizationNotification { state_root: state_root.into(), checkpoint, - head_tracker: self.head_tracker.clone(), - genesis_block_root: self.genesis_block_root, }; self.store_migrator.process_manual_finalization(notif); @@ -3762,7 +3714,6 @@ impl BeaconChain { state, parent_block, parent_eth1_finalization_data, - confirmed_state_roots, consensus_context, } = import_data; @@ -3786,7 +3737,6 @@ impl BeaconChain { block, block_root, state, - confirmed_state_roots, payload_verification_outcome.payload_verification_status, parent_block, parent_eth1_finalization_data, @@ -3824,7 +3774,6 @@ impl BeaconChain { signed_block: AvailableBlock, block_root: Hash256, mut state: BeaconState, - confirmed_state_roots: Vec, payload_verification_status: PayloadVerificationStatus, parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, @@ -4012,11 +3961,6 @@ impl BeaconChain { let block = signed_block.message(); let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); - ops.extend( - confirmed_state_roots - .into_iter() - .map(StoreOp::DeleteStateTemporaryFlag), - ); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); @@ -4043,9 +3987,6 @@ impl BeaconChain { // about it. let block_time_imported = timestamp_now(); - let parent_root = block.parent_root(); - let slot = block.slot(); - let current_eth1_finalization_data = Eth1FinalizationData { eth1_data: state.eth1_data().clone(), eth1_deposit_index: state.eth1_deposit_index(), @@ -4066,9 +4007,6 @@ impl BeaconChain { }); } - self.head_tracker - .register_block(block_root, parent_root, slot); - metrics::stop_timer(db_write_timer); metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); @@ -7208,7 +7146,7 @@ impl BeaconChain { impl Drop for BeaconChain { fn drop(&mut self) { let drop = || -> Result<(), Error> { - self.persist_head_and_fork_choice()?; + self.persist_fork_choice()?; self.persist_op_pool()?; self.persist_eth1_cache() }; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 0a0ffab7fa..39bad34cd6 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1453,22 +1453,8 @@ impl ExecutionPendingBlock { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); - // Stage a batch of operations to be completed atomically if this block is imported - // successfully. If there is a skipped slot, we include the state root of the pre-state, - // which may be an advanced state that was stored in the DB with a `temporary` flag. let mut state = parent.pre_state; - let mut confirmed_state_roots = - if block.slot() > state.slot() && state.slot() > parent.beacon_block.slot() { - // Advanced pre-state. Delete its temporary flag. - let pre_state_root = state.update_tree_hash_cache()?; - vec![pre_state_root] - } else { - // Pre state is either unadvanced, or should not be stored long-term because there - // is no skipped slot between `parent` and `block`. - vec![] - }; - // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { return Err(BlockError::BlockIsNotLaterThanParent { @@ -1515,38 +1501,29 @@ impl ExecutionPendingBlock { // processing, but we get early access to it. let state_root = state.update_tree_hash_cache()?; - // Store the state immediately, marking it as temporary, and staging the deletion - // of its temporary status as part of the larger atomic operation. + // Store the state immediately. let txn_lock = chain.store.hot_db.begin_rw_transaction(); let state_already_exists = chain.store.load_hot_state_summary(&state_root)?.is_some(); let state_batch = if state_already_exists { - // If the state exists, it could be temporary or permanent, but in neither case - // should we rewrite it or store a new temporary flag for it. We *will* stage - // the temporary flag for deletion because it's OK to double-delete the flag, - // and we don't mind if another thread gets there first. + // If the state exists, we do not need to re-write it. vec![] } else { - vec![ - if state.slot() % T::EthSpec::slots_per_epoch() == 0 { - StoreOp::PutState(state_root, &state) - } else { - StoreOp::PutStateSummary( - state_root, - HotStateSummary::new(&state_root, &state)?, - ) - }, - StoreOp::PutStateTemporaryFlag(state_root), - ] + vec![if state.slot() % T::EthSpec::slots_per_epoch() == 0 { + StoreOp::PutState(state_root, &state) + } else { + StoreOp::PutStateSummary( + state_root, + HotStateSummary::new(&state_root, &state)?, + ) + }] }; chain .store .do_atomically_with_block_and_blobs_cache(state_batch)?; drop(txn_lock); - confirmed_state_roots.push(state_root); - state_root }; @@ -1713,7 +1690,6 @@ impl ExecutionPendingBlock { state, parent_block: parent.beacon_block, parent_eth1_finalization_data, - confirmed_state_roots, consensus_context, }, payload_verification_handle, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index d3a6e93862..aa7418646f 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -358,7 +358,6 @@ pub struct BlockImportData { pub state: BeaconState, pub parent_block: SignedBeaconBlock>, pub parent_eth1_finalization_data: Eth1FinalizationData, - pub confirmed_state_roots: Vec, pub consensus_context: ConsensusContext, } @@ -376,7 +375,6 @@ impl BlockImportData { eth1_data: <_>::default(), eth1_deposit_index: 0, }, - confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f6d18c3705..6f8a0dcb7c 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -8,7 +8,6 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; -use crate::head_tracker::HeadTracker; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; @@ -93,7 +92,6 @@ pub struct BeaconChainBuilder { slot_clock: Option, shutdown_sender: Option>, light_client_server_tx: Option>>, - head_tracker: Option, validator_pubkey_cache: Option>, spec: Arc, chain_config: ChainConfig, @@ -136,7 +134,6 @@ where slot_clock: None, shutdown_sender: None, light_client_server_tx: None, - head_tracker: None, validator_pubkey_cache: None, spec: Arc::new(E::default_spec()), chain_config: ChainConfig::default(), @@ -314,10 +311,6 @@ where self.genesis_block_root = Some(chain.genesis_block_root); self.genesis_state_root = Some(genesis_block.state_root()); - self.head_tracker = Some( - HeadTracker::from_ssz_container(&chain.ssz_head_tracker) - .map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?, - ); self.validator_pubkey_cache = Some(pubkey_cache); self.fork_choice = Some(fork_choice); @@ -729,7 +722,6 @@ where .genesis_state_root .ok_or("Cannot build without a genesis state root")?; let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); - let head_tracker = Arc::new(self.head_tracker.unwrap_or_default()); let beacon_proposer_cache: Arc> = <_>::default(); let mut validator_monitor = @@ -769,8 +761,6 @@ where &self.spec, )?; - // Update head tracker. - head_tracker.register_block(block_root, block.parent_root(), block.slot()); (block_root, block, true) } Err(e) => return Err(descriptive_db_error("head block", &e)), @@ -846,8 +836,7 @@ where })?; let migrator_config = self.store_migrator_config.unwrap_or_default(); - let store_migrator = - BackgroundMigrator::new(store.clone(), migrator_config, genesis_block_root); + let store_migrator = BackgroundMigrator::new(store.clone(), migrator_config); if let Some(slot) = slot_clock.now() { validator_monitor.process_valid_state( @@ -872,11 +861,10 @@ where // // This *must* be stored before constructing the `BeaconChain`, so that its `Drop` instance // doesn't write a `PersistedBeaconChain` without the rest of the batch. - let head_tracker_reader = head_tracker.0.read(); self.pending_io_batch.push(BeaconChain::< Witness, >::persist_head_in_batch_standalone( - genesis_block_root, &head_tracker_reader + genesis_block_root )); self.pending_io_batch.push(BeaconChain::< Witness, @@ -887,7 +875,6 @@ where .hot_db .do_atomically(self.pending_io_batch) .map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?; - drop(head_tracker_reader); let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); @@ -968,7 +955,6 @@ where fork_choice_signal_tx, fork_choice_signal_rx, event_handler: self.event_handler, - head_tracker, shuffling_cache: RwLock::new(ShufflingCache::new( shuffling_cache_size, head_shuffling_ids, diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index d99c6038d3..a6f5179fdc 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -53,7 +53,7 @@ use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; use std::time::Duration; -use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; +use store::{iter::StateRootsIterator, KeyValueStore, KeyValueStoreOp, StoreItem}; use task_executor::{JoinHandle, ShutdownReason}; use tracing::{debug, error, info, warn}; use types::*; @@ -840,7 +840,7 @@ impl BeaconChain { ); if is_epoch_transition || reorg_distance.is_some() { - self.persist_head_and_fork_choice()?; + self.persist_fork_choice()?; self.op_pool.prune_attestations(self.epoch()?); } @@ -983,7 +983,6 @@ impl BeaconChain { self.store_migrator.process_finalization( new_finalized_state_root.into(), new_view.finalized_checkpoint, - self.head_tracker.clone(), )?; // Prune blobs in the background. @@ -998,6 +997,14 @@ impl BeaconChain { Ok(()) } + /// Persist fork choice to disk, writing immediately. + pub fn persist_fork_choice(&self) -> Result<(), Error> { + let _fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE); + let batch = vec![self.persist_fork_choice_in_batch()]; + self.store.hot_db.do_atomically(batch)?; + Ok(()) + } + /// Return a database operation for writing fork choice to disk. pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { Self::persist_fork_choice_in_batch_standalone(&self.canonical_head.fork_choice_read_lock()) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index f38a3b8b9c..4359d7fbdb 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -920,7 +920,6 @@ mod test { state, parent_block, parent_eth1_finalization_data, - confirmed_state_roots: vec![], consensus_context, }; @@ -1305,7 +1304,6 @@ mod pending_components_tests { eth1_data: Default::default(), eth1_deposit_index: 0, }, - confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), }, payload_verification_outcome: PayloadVerificationOutcome { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 09d0563a4a..5fe674f30c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -7,26 +7,21 @@ use crate::{ }; use lru::LruCache; use parking_lot::RwLock; -use ssz_derive::{Decode, Encode}; use state_processing::BlockReplayer; use std::sync::Arc; use store::OnDiskConsensusContext; use types::beacon_block_body::KzgCommitments; -use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except /// that it is much smaller because it contains only a state root instead of /// a full `BeaconState`. -#[derive(Encode, Decode, Clone)] +#[derive(Clone)] pub struct DietAvailabilityPendingExecutedBlock { - #[ssz(with = "ssz_tagged_signed_beacon_block_arc")] block: Arc>, state_root: Hash256, - #[ssz(with = "ssz_tagged_signed_beacon_block")] parent_block: SignedBeaconBlock>, parent_eth1_finalization_data: Eth1FinalizationData, - confirmed_state_roots: Vec, consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, custody_columns_count: usize, @@ -108,7 +103,6 @@ impl StateLRUCache { state_root, parent_block: executed_block.import_data.parent_block, parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, - confirmed_state_roots: executed_block.import_data.confirmed_state_roots, consensus_context: OnDiskConsensusContext::from_consensus_context( executed_block.import_data.consensus_context, ), @@ -138,7 +132,6 @@ impl StateLRUCache { state, parent_block: diet_executed_block.parent_block, parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, - confirmed_state_roots: diet_executed_block.confirmed_state_roots, consensus_context: diet_executed_block .consensus_context .into_consensus_context(), @@ -227,7 +220,6 @@ impl From> state_root: value.import_data.state.canonical_root().unwrap(), parent_block: value.import_data.parent_block, parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, - confirmed_state_roots: value.import_data.confirmed_state_roots, consensus_context: OnDiskConsensusContext::from_consensus_context( value.import_data.consensus_context, ), diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs deleted file mode 100644 index 9c06ef33a1..0000000000 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ /dev/null @@ -1,214 +0,0 @@ -use parking_lot::{RwLock, RwLockReadGuard}; -use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; -use types::{Hash256, Slot}; - -#[derive(Debug, PartialEq)] -pub enum Error { - MismatchingLengths { roots_len: usize, slots_len: usize }, -} - -/// Maintains a list of `BeaconChain` head block roots and slots. -/// -/// Each time a new block is imported, it should be applied to the `Self::register_block` function. -/// In order for this struct to be effective, every single block that is imported must be -/// registered here. -#[derive(Default, Debug)] -pub struct HeadTracker(pub RwLock>); - -pub type HeadTrackerReader<'a> = RwLockReadGuard<'a, HashMap>; - -impl HeadTracker { - /// Register a block with `Self`, so it may or may not be included in a `Self::heads` call. - /// - /// This function assumes that no block is imported without its parent having already been - /// imported. It cannot detect an error if this is not the case, it is the responsibility of - /// the upstream user. - pub fn register_block(&self, block_root: Hash256, parent_root: Hash256, slot: Slot) { - let mut map = self.0.write(); - map.remove(&parent_root); - map.insert(block_root, slot); - } - - /// Returns true iff `block_root` is a recognized head. - pub fn contains_head(&self, block_root: Hash256) -> bool { - self.0.read().contains_key(&block_root) - } - - /// Returns the list of heads in the chain. - pub fn heads(&self) -> Vec<(Hash256, Slot)> { - self.0 - .read() - .iter() - .map(|(root, slot)| (*root, *slot)) - .collect() - } - - /// Returns a `SszHeadTracker`, which contains all necessary information to restore the state - /// of `Self` at some later point. - /// - /// Should ONLY be used for tests, due to the potential for database races. - /// - /// See - #[cfg(test)] - pub fn to_ssz_container(&self) -> SszHeadTracker { - SszHeadTracker::from_map(&self.0.read()) - } - - /// Creates a new `Self` from the given `SszHeadTracker`, restoring `Self` to the same state of - /// the `Self` that created the `SszHeadTracker`. - pub fn from_ssz_container(ssz_container: &SszHeadTracker) -> Result { - let roots_len = ssz_container.roots.len(); - let slots_len = ssz_container.slots.len(); - - if roots_len != slots_len { - Err(Error::MismatchingLengths { - roots_len, - slots_len, - }) - } else { - let map = ssz_container - .roots - .iter() - .zip(ssz_container.slots.iter()) - .map(|(root, slot)| (*root, *slot)) - .collect::>(); - - Ok(Self(RwLock::new(map))) - } - } -} - -impl PartialEq for HeadTracker { - fn eq(&self, other: &HeadTracker) -> bool { - *self.0.read() == *other.0.read() - } -} - -/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. -/// -/// This is used when persisting the state of the `BeaconChain` to disk. -#[derive(Encode, Decode, Clone)] -pub struct SszHeadTracker { - roots: Vec, - slots: Vec, -} - -impl SszHeadTracker { - pub fn from_map(map: &HashMap) -> Self { - let (roots, slots) = map.iter().map(|(hash, slot)| (*hash, *slot)).unzip(); - SszHeadTracker { roots, slots } - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::{Decode, Encode}; - use types::{BeaconBlock, EthSpec, FixedBytesExtended, MainnetEthSpec}; - - type E = MainnetEthSpec; - - #[test] - fn block_add() { - let spec = &E::default_spec(); - - let head_tracker = HeadTracker::default(); - - for i in 0..16 { - let mut block: BeaconBlock = BeaconBlock::empty(spec); - let block_root = Hash256::from_low_u64_be(i); - - *block.slot_mut() = Slot::new(i); - *block.parent_root_mut() = if i == 0 { - Hash256::random() - } else { - Hash256::from_low_u64_be(i - 1) - }; - - head_tracker.register_block(block_root, block.parent_root(), block.slot()); - } - - assert_eq!( - head_tracker.heads(), - vec![(Hash256::from_low_u64_be(15), Slot::new(15))], - "should only have one head" - ); - - let mut block: BeaconBlock = BeaconBlock::empty(spec); - let block_root = Hash256::from_low_u64_be(42); - *block.slot_mut() = Slot::new(15); - *block.parent_root_mut() = Hash256::from_low_u64_be(14); - head_tracker.register_block(block_root, block.parent_root(), block.slot()); - - let heads = head_tracker.heads(); - - assert_eq!(heads.len(), 2, "should only have two heads"); - assert!( - heads - .iter() - .any(|(root, slot)| *root == Hash256::from_low_u64_be(15) && *slot == Slot::new(15)), - "should contain first head" - ); - assert!( - heads - .iter() - .any(|(root, slot)| *root == Hash256::from_low_u64_be(42) && *slot == Slot::new(15)), - "should contain second head" - ); - } - - #[test] - fn empty_round_trip() { - let non_empty = HeadTracker::default(); - for i in 0..16 { - non_empty.0.write().insert(Hash256::random(), Slot::new(i)); - } - let bytes = non_empty.to_ssz_container().as_ssz_bytes(); - - assert_eq!( - HeadTracker::from_ssz_container( - &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") - ), - Ok(non_empty), - "non_empty should pass round trip" - ); - } - - #[test] - fn non_empty_round_trip() { - let non_empty = HeadTracker::default(); - for i in 0..16 { - non_empty.0.write().insert(Hash256::random(), Slot::new(i)); - } - let bytes = non_empty.to_ssz_container().as_ssz_bytes(); - - assert_eq!( - HeadTracker::from_ssz_container( - &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") - ), - Ok(non_empty), - "non_empty should pass round trip" - ); - } - - #[test] - fn bad_length() { - let container = SszHeadTracker { - roots: vec![Hash256::random()], - slots: vec![], - }; - let bytes = container.as_ssz_bytes(); - - assert_eq!( - HeadTracker::from_ssz_container( - &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") - ), - Err(Error::MismatchingLengths { - roots_len: 1, - slots_len: 0 - }), - "should fail decoding with bad lengths" - ); - } -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 48168aeb02..5b79312d37 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -33,7 +33,6 @@ pub mod fork_choice_signal; pub mod fork_revert; pub mod fulu_readiness; pub mod graffiti_calculator; -mod head_tracker; pub mod historical_blocks; pub mod kzg_utils; pub mod light_client_finality_update_verification; @@ -56,6 +55,7 @@ pub mod schema_change; pub mod shuffling_cache; pub mod single_attestation; pub mod state_advance_timer; +pub mod summaries_dag; pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 463319a1f5..871721b4d8 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -601,12 +601,6 @@ pub static BALANCES_CACHE_MISSES: LazyLock> = LazyLock::new(| /* * Persisting BeaconChain components to disk */ -pub static PERSIST_HEAD: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_persist_head", - "Time taken to persist the canonical head", - ) -}); pub static PERSIST_OP_POOL: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_persist_op_pool", diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index cda5b34103..94fa0a1890 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -1,22 +1,16 @@ -use crate::beacon_chain::BEACON_CHAIN_DB_KEY; use crate::errors::BeaconChainError; -use crate::head_tracker::{HeadTracker, SszHeadTracker}; -use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; +use crate::summaries_dag::{DAGStateSummaryV22, Error as SummariesDagError, StateSummariesDAG}; use parking_lot::Mutex; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::mem; use std::sync::{mpsc, Arc}; use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::{migrate_database, HotColdDBError}; -use store::iter::RootsIterator; -use store::{Error, ItemStore, StoreItem, StoreOp}; +use store::{Error, ItemStore, StoreOp}; pub use store::{HotColdDB, MemoryStore}; use tracing::{debug, error, info, warn}; -use types::{ - BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - Hash256, SignedBeaconBlockHash, Slot, -}; +use types::{BeaconState, BeaconStateHash, Checkpoint, Epoch, EthSpec, Hash256, Slot}; /// Compact at least this frequently, finalization permitting (7 days). const MAX_COMPACTION_PERIOD_SECONDS: u64 = 604800; @@ -42,8 +36,6 @@ pub struct BackgroundMigrator, Cold: ItemStore> prev_migration: Arc>, #[allow(clippy::type_complexity)] tx_thread: Option, thread::JoinHandle<()>)>>, - /// Genesis block root, for persisting the `PersistedBeaconChain`. - genesis_block_root: Hash256, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -89,7 +81,7 @@ pub struct PrevMigration { pub enum PruningOutcome { /// The pruning succeeded and updated the pruning checkpoint from `old_finalized_checkpoint`. Successful { - old_finalized_checkpoint: Checkpoint, + old_finalized_checkpoint_epoch: Epoch, }, /// The run was aborted because the new finalized checkpoint is older than the previous one. OutOfOrderFinalization { @@ -116,6 +108,11 @@ pub enum PruningError { }, UnexpectedEqualStateRoots, UnexpectedUnequalStateRoots, + MissingSummaryForFinalizedCheckpoint(Hash256), + MissingBlindedBlock(Hash256), + SummariesDagError(&'static str, SummariesDagError), + EmptyFinalizedStates, + EmptyFinalizedBlocks, } /// Message sent to the migration thread containing the information it needs to run. @@ -130,25 +127,17 @@ pub enum Notification { pub struct ManualFinalizationNotification { pub state_root: BeaconStateHash, pub checkpoint: Checkpoint, - pub head_tracker: Arc, - pub genesis_block_root: Hash256, } pub struct FinalizationNotification { pub finalized_state_root: BeaconStateHash, pub finalized_checkpoint: Checkpoint, - pub head_tracker: Arc, pub prev_migration: Arc>, - pub genesis_block_root: Hash256, } impl, Cold: ItemStore> BackgroundMigrator { /// Create a new `BackgroundMigrator` and spawn its thread if necessary. - pub fn new( - db: Arc>, - config: MigratorConfig, - genesis_block_root: Hash256, - ) -> Self { + pub fn new(db: Arc>, config: MigratorConfig) -> Self { // Estimate last migration run from DB split slot. let prev_migration = Arc::new(Mutex::new(PrevMigration { epoch: db.get_split_slot().epoch(E::slots_per_epoch()), @@ -163,7 +152,6 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, ) -> Result<(), BeaconChainError> { let notif = FinalizationNotification { finalized_state_root, finalized_checkpoint, - head_tracker, prev_migration: self.prev_migration.clone(), - genesis_block_root: self.genesis_block_root, }; // Send to background thread if configured, otherwise run in foreground. @@ -314,9 +299,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator {} + Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { + debug!( + slot = slot.as_u64(), + "Database migration postponed, unaligned finalized block" + ); + } + Err(e) => { + warn!(error = ?e, "Database migration failed"); + return; + } + }; + + let old_finalized_checkpoint_epoch = match Self::prune_hot_db( + db.clone(), + finalized_state_root.into(), &finalized_state, notif.finalized_checkpoint, - notif.genesis_block_root, ) { Ok(PruningOutcome::Successful { - old_finalized_checkpoint, - }) => old_finalized_checkpoint, + old_finalized_checkpoint_epoch, + }) => old_finalized_checkpoint_epoch, Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation) => { warn!( message = "this is expected only very rarely!", @@ -391,26 +391,10 @@ impl, Cold: ItemStore> BackgroundMigrator { - warn!(error = ?e,"Block pruning failed"); - return; - } - }; - - match migrate_database( - db.clone(), - finalized_state_root.into(), - finalized_block_root, - &finalized_state, - ) { - Ok(()) => {} - Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { - debug!( - slot = slot.as_u64(), - "Database migration postponed, unaligned finalized block" + warn!( + error = ?e, + "Hot DB pruning failed" ); - } - Err(e) => { - warn!(error = ?e, "Database migration failed"); return; } }; @@ -418,7 +402,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, - head_tracker: Arc, - new_finalized_state_hash: BeaconStateHash, + new_finalized_state_root: Hash256, new_finalized_state: &BeaconState, new_finalized_checkpoint: Checkpoint, - genesis_block_root: Hash256, ) -> Result { - let old_finalized_checkpoint = - store - .load_pruning_checkpoint()? - .unwrap_or_else(|| Checkpoint { - epoch: Epoch::new(0), - root: Hash256::zero(), - }); - - let old_finalized_slot = old_finalized_checkpoint - .epoch - .start_slot(E::slots_per_epoch()); let new_finalized_slot = new_finalized_checkpoint .epoch .start_slot(E::slots_per_epoch()); - let new_finalized_block_hash = new_finalized_checkpoint.root.into(); // The finalized state must be for the epoch boundary slot, not the slot of the finalized // block. @@ -549,200 +518,220 @@ impl, Cold: ItemStore> BackgroundMigrator new_finalized_slot { - return Ok(PruningOutcome::OutOfOrderFinalization { - old_finalized_checkpoint, - new_finalized_checkpoint, - }); - } - debug!( - old_finalized_epoch = %old_finalized_checkpoint.epoch, - new_finalized_epoch = %new_finalized_checkpoint.epoch, + new_finalized_checkpoint = ?new_finalized_checkpoint, + new_finalized_state_root = %new_finalized_state_root, "Starting database pruning" ); - // For each slot between the new finalized checkpoint and the old finalized checkpoint, - // collect the beacon block root and state root of the canonical chain. - let newly_finalized_chain: HashMap = - std::iter::once(Ok(( - new_finalized_slot, - (new_finalized_block_hash, new_finalized_state_hash), - ))) - .chain(RootsIterator::new(&store, new_finalized_state).map(|res| { - res.map(|(block_root, state_root, slot)| { - (slot, (block_root.into(), state_root.into())) + + let state_summaries_dag = { + let state_summaries = store + .load_hot_state_summaries()? + .into_iter() + .map(|(state_root, summary)| { + let block_root = summary.latest_block_root; + // This error should never happen unless we break a DB invariant + let block = store + .get_blinded_block(&block_root)? + .ok_or(PruningError::MissingBlindedBlock(block_root))?; + Ok(( + state_root, + DAGStateSummaryV22 { + slot: summary.slot, + latest_block_root: summary.latest_block_root, + block_slot: block.slot(), + block_parent_root: block.parent_root(), + }, + )) }) - })) - .take_while(|res| { - res.as_ref() - .map_or(true, |(slot, _)| *slot >= old_finalized_slot) - }) - .collect::>()?; + .collect::, BeaconChainError>>()?; + + // De-duplicate block roots to reduce block reads below + let summary_block_roots = HashSet::::from_iter( + state_summaries + .iter() + .map(|(_, summary)| summary.latest_block_root), + ); + + // Sanity check, there is at least one summary with the new finalized block root + if !summary_block_roots.contains(&new_finalized_checkpoint.root) { + return Err(BeaconChainError::PruningError( + PruningError::MissingSummaryForFinalizedCheckpoint( + new_finalized_checkpoint.root, + ), + )); + } + + StateSummariesDAG::new_from_v22(state_summaries) + .map_err(|e| PruningError::SummariesDagError("new StateSumariesDAG", e))? + }; + + // To debug faulty trees log if we unexpectedly have more than one root. These trees may not + // result in an error, as they may not be queried in the codepaths below. + let state_summaries_dag_roots = state_summaries_dag.tree_roots(); + if state_summaries_dag_roots.len() > 1 { + warn!( + state_summaries_dag_roots = ?state_summaries_dag_roots, + "Prune state summaries dag found more than one root" + ); + } + + // `new_finalized_state_root` is the *state at the slot of the finalized epoch*, + // rather than the state of the latest finalized block. These two values will only + // differ when the first slot of the finalized epoch is a skip slot. + let finalized_and_descendant_state_roots_of_finalized_checkpoint = + HashSet::::from_iter( + std::iter::once(new_finalized_state_root).chain( + state_summaries_dag + .descendants_of(&new_finalized_state_root) + .map_err(|e| PruningError::SummariesDagError("descendants of", e))?, + ), + ); + + // Collect all `latest_block_roots` of the + // finalized_and_descendant_state_roots_of_finalized_checkpoint set. Includes the finalized + // block as `new_finalized_state_root` always has a latest block root equal to the finalized + // block. + let finalized_and_descendant_block_roots_of_finalized_checkpoint = + HashSet::::from_iter( + state_summaries_dag + .blocks_of_states( + finalized_and_descendant_state_roots_of_finalized_checkpoint.iter(), + ) + // should never error, we just constructed + // finalized_and_descendant_state_roots_of_finalized_checkpoint from the + // state_summaries_dag + .map_err(|e| PruningError::SummariesDagError("blocks of descendant", e))? + .into_iter() + .map(|(block_root, _)| block_root), + ); + + // Note: ancestors_of includes the finalized state root + let newly_finalized_state_summaries = state_summaries_dag + .ancestors_of(new_finalized_state_root) + .map_err(|e| PruningError::SummariesDagError("ancestors of", e))?; + let newly_finalized_state_roots = newly_finalized_state_summaries + .iter() + .map(|(root, _)| *root) + .collect::>(); + let newly_finalized_states_min_slot = *newly_finalized_state_summaries + .iter() + .map(|(_, slot)| slot) + .min() + .ok_or(PruningError::EmptyFinalizedStates)?; + + // Note: ancestors_of includes the finalized block + let newly_finalized_blocks = state_summaries_dag + .blocks_of_states(newly_finalized_state_roots.iter()) + .map_err(|e| PruningError::SummariesDagError("blocks of newly finalized", e))?; // We don't know which blocks are shared among abandoned chains, so we buffer and delete // everything in one fell swoop. - let mut abandoned_blocks: HashSet = HashSet::new(); - let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new(); - let mut abandoned_heads: HashSet = HashSet::new(); + let mut blocks_to_prune: HashSet = HashSet::new(); + let mut states_to_prune: HashSet<(Slot, Hash256)> = HashSet::new(); - let heads = head_tracker.heads(); - debug!( - old_finalized_root = ?old_finalized_checkpoint.root, - new_finalized_root = ?new_finalized_checkpoint.root, - head_count = heads.len(), - "Extra pruning information" - ); + // Consider the following block tree where we finalize block `[0]` at the checkpoint `(f)`. + // There's a block `[3]` that descendends from the finalized block but NOT from the + // finalized checkpoint. The block tree rooted in `[3]` conflicts with finality and must be + // pruned. Therefore we collect all state summaries descendant of `(f)`. + // + // finalize epoch boundary + // | /-------[2]----- + // [0]-------|--(f)--[1]---------- + // \---[3]--|-----------------[4] + // | - for (head_hash, head_slot) in heads { - // Load head block. If it fails with a decode error, it's likely a reverted block, - // so delete it from the head tracker but leave it and its states in the database - // This is suboptimal as it wastes disk space, but it's difficult to fix. A re-sync - // can be used to reclaim the space. - let head_state_root = match store.get_blinded_block(&head_hash) { - Ok(Some(block)) => block.state_root(), - Ok(None) => { - return Err(BeaconStateError::MissingBeaconBlock(head_hash.into()).into()) + for (_, summaries) in state_summaries_dag.summaries_by_slot_ascending() { + for (state_root, summary) in summaries { + let should_prune = if finalized_and_descendant_state_roots_of_finalized_checkpoint + .contains(&state_root) + { + // This state is a viable descendant of the finalized checkpoint, so does not + // conflict with finality and can be built on or become a head + false + } else { + // Everything else, prune + true + }; + + if should_prune { + // States are migrated into the cold DB in the migrate step. All hot states + // prior to finalized can be pruned from the hot DB columns + states_to_prune.insert((summary.slot, state_root)); } - Err(Error::SszDecodeError(e)) => { - warn!( - block_root = ?head_hash, - error = ?e, - "Forgetting invalid head block" - ); - abandoned_heads.insert(head_hash); - continue; - } - Err(e) => return Err(e.into()), + } + } + + for (block_root, slot) in state_summaries_dag.iter_blocks() { + // Blocks both finalized and unfinalized are in the same DB column. We must only + // prune blocks from abandoned forks. Note that block pruning and state pruning differ. + // The blocks DB column is shared for hot and cold data, while the states have different + // columns. Thus, we only prune unviable blocks or from abandoned forks. + let should_prune = if finalized_and_descendant_block_roots_of_finalized_checkpoint + .contains(&block_root) + { + // Keep unfinalized blocks descendant of finalized checkpoint + finalized block + // itself Note that we anchor this set on the finalized checkpoint instead of the + // finalized block. A diagram above shows a relevant example. + false + } else if newly_finalized_blocks.contains(&(block_root, slot)) { + // Keep recently finalized blocks + false + } else if slot < newly_finalized_states_min_slot { + // Keep recently finalized blocks that we know are canonical. Blocks with slots < + // that `newly_finalized_blocks_min_slot` we don't have canonical information so we + // assume they are part of the finalized pruned chain + // + // Pruning these would risk breaking the DB by deleting canonical blocks once the + // HDiff grid advances. If the pruning routine is correct this condition should + // never be hit. + false + } else { + // Everything else, prune + true }; - let mut potentially_abandoned_head = Some(head_hash); - let mut potentially_abandoned_blocks = vec![]; - - // Iterate backwards from this head, staging blocks and states for deletion. - let iter = std::iter::once(Ok((head_hash, head_state_root, head_slot))) - .chain(RootsIterator::from_block(&store, head_hash)?); - - for maybe_tuple in iter { - let (block_root, state_root, slot) = maybe_tuple?; - let block_root = SignedBeaconBlockHash::from(block_root); - let state_root = BeaconStateHash::from(state_root); - - match newly_finalized_chain.get(&slot) { - // If there's no information about a slot on the finalized chain, then - // it should be because it's ahead of the new finalized slot. Stage - // the fork's block and state for possible deletion. - None => { - if slot > new_finalized_slot { - potentially_abandoned_blocks.push(( - slot, - Some(block_root), - Some(state_root), - )); - } else if slot >= old_finalized_slot { - return Err(PruningError::MissingInfoForCanonicalChain { slot }.into()); - } else { - // We must assume here any candidate chains include the old finalized - // checkpoint, i.e. there aren't any forks starting at a block that is a - // strict ancestor of old_finalized_checkpoint. - warn!( - head_block_root = ?head_hash, - %head_slot, - "Found a chain that should already have been pruned" - ); - potentially_abandoned_head.take(); - break; - } - } - Some((finalized_block_root, finalized_state_root)) => { - // This fork descends from a newly finalized block, we can stop. - if block_root == *finalized_block_root { - // Sanity check: if the slot and block root match, then the - // state roots should match too. - if state_root != *finalized_state_root { - return Err(PruningError::UnexpectedUnequalStateRoots.into()); - } - - // If the fork descends from the whole finalized chain, - // do not prune it. Otherwise continue to delete all - // of the blocks and states that have been staged for - // deletion so far. - if slot == new_finalized_slot { - potentially_abandoned_blocks.clear(); - potentially_abandoned_head.take(); - } - // If there are skipped slots on the fork to be pruned, then - // we will have just staged the common block for deletion. - // Unstage it. - else { - for (_, block_root, _) in - potentially_abandoned_blocks.iter_mut().rev() - { - if block_root.as_ref() == Some(finalized_block_root) { - *block_root = None; - } else { - break; - } - } - } - break; - } else { - if state_root == *finalized_state_root { - return Err(PruningError::UnexpectedEqualStateRoots.into()); - } - potentially_abandoned_blocks.push(( - slot, - Some(block_root), - Some(state_root), - )); - } - } - } - } - - if let Some(abandoned_head) = potentially_abandoned_head { - debug!( - head_block_root = ?abandoned_head, - %head_slot, - "Pruning head" - ); - abandoned_heads.insert(abandoned_head); - abandoned_blocks.extend( - potentially_abandoned_blocks - .iter() - .filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash), - ); - abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map( - |(slot, _, maybe_state_hash)| maybe_state_hash.map(|sr| (*slot, sr)), - )); + if should_prune { + blocks_to_prune.insert(block_root); } } - // Update the head tracker before the database, so that we maintain the invariant - // that a block present in the head tracker is present in the database. - // See https://github.com/sigp/lighthouse/issues/1557 - let mut head_tracker_lock = head_tracker.0.write(); + // Sort states to prune to make it more readable + let mut states_to_prune = states_to_prune.into_iter().collect::>(); + states_to_prune.sort_by_key(|(slot, _)| *slot); - // Check that all the heads to be deleted are still present. The absence of any - // head indicates a race, that will likely resolve itself, so we defer pruning until - // later. - for head_hash in &abandoned_heads { - if !head_tracker_lock.contains_key(head_hash) { - return Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation); - } + debug!( + new_finalized_checkpoint = ?new_finalized_checkpoint, + newly_finalized_blocks = newly_finalized_blocks.len(), + newly_finalized_state_roots = newly_finalized_state_roots.len(), + newly_finalized_states_min_slot = %newly_finalized_states_min_slot, + state_summaries_count = state_summaries_dag.summaries_count(), + state_summaries_dag_roots = ?state_summaries_dag_roots, + finalized_and_descendant_state_roots_of_finalized_checkpoint = finalized_and_descendant_state_roots_of_finalized_checkpoint.len(), + finalized_and_descendant_state_roots_of_finalized_checkpoint = finalized_and_descendant_state_roots_of_finalized_checkpoint.len(), + blocks_to_prune = blocks_to_prune.len(), + states_to_prune = states_to_prune.len(), + "Extra pruning information" + ); + // Don't log the full `states_to_prune` in the log statement above as it can result in a + // single log line of +1Kb and break logging setups. + for block_root in &blocks_to_prune { + debug!( + block_root = ?block_root, + "Pruning block" + ); + } + for (slot, state_root) in &states_to_prune { + debug!( + ?state_root, + %slot, + "Pruning hot state" + ); } - // Then remove them for real. - for head_hash in abandoned_heads { - head_tracker_lock.remove(&head_hash); - } - - let mut batch: Vec> = abandoned_blocks + let mut batch: Vec> = blocks_to_prune .into_iter() - .map(Into::into) - .flat_map(|block_root: Hash256| { + .flat_map(|block_root| { [ StoreOp::DeleteBlock(block_root), StoreOp::DeleteExecutionPayload(block_root), @@ -750,43 +739,87 @@ impl, Cold: ItemStore> BackgroundMigrator>, + ) { + for (block_root, slot) in finalized_blocks { + // Delete the execution payload if payload pruning is enabled. At a skipped slot we may + // delete the payload for the finalized block itself, but that's OK as we only guarantee + // that payloads are present for slots >= the split slot. + if *slot < new_finalized_slot { + hot_db_ops.push(StoreOp::DeleteExecutionPayload(*block_root)); + } + } + } + + fn prune_non_checkpoint_sync_committee_branches( + finalized_blocks_desc: &[(Hash256, Slot)], + hot_db_ops: &mut Vec>, + ) { + let mut epoch_boundary_blocks = HashSet::new(); + let mut non_checkpoint_block_roots = HashSet::new(); + + // Then, iterate states in slot ascending order, as they are stored wrt previous states. + for (block_root, slot) in finalized_blocks_desc.iter().rev() { + // At a missed slot, `state_root_iter` will return the block root + // from the previous non-missed slot. This ensures that the block root at an + // epoch boundary is always a checkpoint block root. We keep track of block roots + // at epoch boundaries by storing them in the `epoch_boundary_blocks` hash set. + // We then ensure that block roots at the epoch boundary aren't included in the + // `non_checkpoint_block_roots` hash set. + if *slot % E::slots_per_epoch() == 0 { + epoch_boundary_blocks.insert(block_root); + } else { + non_checkpoint_block_roots.insert(block_root); + } + + if epoch_boundary_blocks.contains(&block_root) { + non_checkpoint_block_roots.remove(&block_root); + } + } + + // Prune sync committee branch data for all non checkpoint block roots. + // Note that `non_checkpoint_block_roots` should only contain non checkpoint block roots + // as long as `finalized_state.slot()` is at an epoch boundary. If this were not the case + // we risk the chance of pruning a `sync_committee_branch` for a checkpoint block root. + // E.g. if `current_split_slot` = (Epoch A slot 0) and `finalized_state.slot()` = (Epoch C slot 31) + // and (Epoch D slot 0) is a skipped slot, we will have pruned a `sync_committee_branch` + // for a checkpoint block root. + non_checkpoint_block_roots + .into_iter() + .for_each(|block_root| { + hot_db_ops.push(StoreOp::DeleteSyncCommitteeBranch(*block_root)); + }); + } + /// Compact the database if it has been more than `COMPACTION_PERIOD_SECONDS` since it /// was last compacted. pub fn run_compaction( diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index adb68def0d..83affb0dcd 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -1,24 +1,11 @@ -use crate::head_tracker::SszHeadTracker; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error as StoreError, StoreItem}; use types::Hash256; -/// Dummy value to use for the canonical head block root, see below. -pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); - #[derive(Clone, Encode, Decode)] pub struct PersistedBeaconChain { - /// This value is ignored to resolve the issue described here: - /// - /// https://github.com/sigp/lighthouse/pull/1639 - /// - /// Its removal is tracked here: - /// - /// https://github.com/sigp/lighthouse/issues/1784 - pub _canonical_head_block_root: Hash256, pub genesis_block_root: Hash256, - pub ssz_head_tracker: SszHeadTracker, } impl StoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ccfae1b182..49aa116f6c 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,6 +2,7 @@ mod migration_schema_v20; mod migration_schema_v21; mod migration_schema_v22; +mod migration_schema_v23; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; @@ -57,6 +58,14 @@ pub fn migrate_schema( // bumped inside the upgrade_to_v22 fn migration_schema_v22::upgrade_to_v22::(db.clone(), genesis_state_root) } + (SchemaVersion(22), SchemaVersion(23)) => { + let ops = migration_schema_v23::upgrade_to_v23::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(23), SchemaVersion(22)) => { + let ops = migration_schema_v23::downgrade_from_v23::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs new file mode 100644 index 0000000000..e66178df53 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -0,0 +1,147 @@ +use crate::beacon_chain::BeaconChainTypes; +use crate::persisted_fork_choice::PersistedForkChoice; +use crate::schema_change::StoreError; +use crate::test_utils::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::BeaconForkChoiceStore; +use fork_choice::{ForkChoice, ResetPayloadStatuses}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::sync::Arc; +use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; +use types::{Hash256, Slot}; + +/// Dummy value to use for the canonical head block root, see below. +pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); + +pub fn upgrade_to_v23( + db: Arc>, +) -> Result, Error> { + // 1) Set the head-tracker to empty + let Some(persisted_beacon_chain_v22) = + db.get_item::(&BEACON_CHAIN_DB_KEY)? + else { + return Err(Error::MigrationError( + "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string() + )); + }; + + let persisted_beacon_chain = PersistedBeaconChain { + genesis_block_root: persisted_beacon_chain_v22.genesis_block_root, + }; + + let mut ops = vec![persisted_beacon_chain.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; + + // 2) Wipe out all state temporary flags. While un-used in V23, if there's a rollback we could + // end-up with an inconsistent DB. + for state_root_result in db + .hot_db + .iter_column_keys::(DBColumn::BeaconStateTemporary) + { + ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateTemporary, + state_root_result?.as_slice().to_vec(), + )); + } + + Ok(ops) +} + +pub fn downgrade_from_v23( + db: Arc>, +) -> Result, Error> { + let Some(persisted_beacon_chain) = db.get_item::(&BEACON_CHAIN_DB_KEY)? + else { + // The `PersistedBeaconChain` must exist if fork choice exists. + return Err(Error::MigrationError( + "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string(), + )); + }; + + // Recreate head-tracker from fork choice. + let Some(persisted_fork_choice) = db.get_item::(&FORK_CHOICE_DB_KEY)? + else { + // Fork choice should exist if the database exists. + return Err(Error::MigrationError( + "No fork choice found in DB".to_string(), + )); + }; + + let fc_store = + BeaconForkChoiceStore::from_persisted(persisted_fork_choice.fork_choice_store, db.clone()) + .map_err(|e| { + Error::MigrationError(format!( + "Error loading fork choise store from persisted: {e:?}" + )) + })?; + + // Doesn't matter what policy we use for invalid payloads, as our head calculation just + // considers descent from finalization. + let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; + let fork_choice = ForkChoice::from_persisted( + persisted_fork_choice.fork_choice, + reset_payload_statuses, + fc_store, + &db.spec, + ) + .map_err(|e| { + Error::MigrationError(format!("Error loading fork choice from persisted: {e:?}")) + })?; + + let heads = fork_choice + .proto_array() + .heads_descended_from_finalization::(); + + let head_roots = heads.iter().map(|node| node.root).collect(); + let head_slots = heads.iter().map(|node| node.slot).collect(); + + let persisted_beacon_chain_v22 = PersistedBeaconChainV22 { + _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, + genesis_block_root: persisted_beacon_chain.genesis_block_root, + ssz_head_tracker: SszHeadTracker { + roots: head_roots, + slots: head_slots, + }, + }; + + let ops = vec![persisted_beacon_chain_v22.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; + + Ok(ops) +} + +/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. +/// +/// This is used when persisting the state of the `BeaconChain` to disk. +#[derive(Encode, Decode, Clone)] +pub struct SszHeadTracker { + roots: Vec, + slots: Vec, +} + +#[derive(Clone, Encode, Decode)] +pub struct PersistedBeaconChainV22 { + /// This value is ignored to resolve the issue described here: + /// + /// https://github.com/sigp/lighthouse/pull/1639 + /// + /// Its removal is tracked here: + /// + /// https://github.com/sigp/lighthouse/issues/1784 + pub _canonical_head_block_root: Hash256, + pub genesis_block_root: Hash256, + /// DEPRECATED + pub ssz_head_tracker: SszHeadTracker, +} + +impl StoreItem for PersistedBeaconChainV22 { + fn db_column() -> DBColumn { + DBColumn::BeaconChain + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } +} diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index f4216ef76d..9135c3ce88 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -23,7 +23,6 @@ use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; -use store::KeyValueStore; use task_executor::TaskExecutor; use tokio::time::{sleep, sleep_until, Instant}; use tracing::{debug, error, warn}; @@ -297,7 +296,7 @@ fn advance_head(beacon_chain: &Arc>) -> Resu // Protect against advancing a state more than a single slot. // // Advancing more than one slot without storing the intermediate state would corrupt the - // database. Future works might store temporary, intermediate states inside this function. + // database. Future works might store intermediate states inside this function. match state.slot().cmp(&state.latest_block_header().slot) { std::cmp::Ordering::Equal => (), std::cmp::Ordering::Greater => { @@ -432,20 +431,13 @@ fn advance_head(beacon_chain: &Arc>) -> Resu ); } - // Write the advanced state to the database with a temporary flag that will be deleted when - // a block is imported on top of this state. We should delete this once we bring in the DB - // changes from tree-states that allow us to prune states without temporary flags. + // Write the advanced state to the database. + // We no longer use a transaction lock here when checking whether the state exists, because + // even if we race with the deletion of this state by the finalization pruning code, the worst + // case is we end up with a finalized state stored, that will get pruned the next time pruning + // runs. let advanced_state_root = state.update_tree_hash_cache()?; - let txn_lock = beacon_chain.store.hot_db.begin_rw_transaction(); - let state_already_exists = beacon_chain - .store - .load_hot_state_summary(&advanced_state_root)? - .is_some(); - let temporary = !state_already_exists; - beacon_chain - .store - .put_state_possibly_temporary(&advanced_state_root, &state, temporary)?; - drop(txn_lock); + beacon_chain.store.put_state(&advanced_state_root, &state)?; debug!( ?head_block_root, diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs new file mode 100644 index 0000000000..ab379d1eb2 --- /dev/null +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -0,0 +1,464 @@ +use itertools::Itertools; +use std::{ + cmp::Ordering, + collections::{btree_map::Entry, BTreeMap, HashMap}, +}; +use types::{Hash256, Slot}; + +#[derive(Debug, Clone, Copy)] +pub struct DAGStateSummary { + pub slot: Slot, + pub latest_block_root: Hash256, + pub latest_block_slot: Slot, + pub previous_state_root: Hash256, +} + +#[derive(Debug, Clone, Copy)] +pub struct DAGStateSummaryV22 { + pub slot: Slot, + pub latest_block_root: Hash256, + pub block_slot: Slot, + pub block_parent_root: Hash256, +} + +pub struct StateSummariesDAG { + // state_root -> state_summary + state_summaries_by_state_root: HashMap, + // block_root -> state slot -> (state_root, state summary) + state_summaries_by_block_root: HashMap>, + // parent_state_root -> Vec + // cached value to prevent having to recompute in each recursive call into `descendants_of` + child_state_roots: HashMap>, +} + +#[derive(Debug)] +pub enum Error { + DuplicateStateSummary { + block_root: Hash256, + existing_state_summary: Box<(Slot, Hash256)>, + new_state_summary: (Slot, Hash256), + }, + MissingStateSummary(Hash256), + MissingStateSummaryByBlockRoot { + state_root: Hash256, + latest_block_root: Hash256, + }, + StateSummariesNotContiguous { + state_root: Hash256, + state_slot: Slot, + latest_block_root: Hash256, + parent_block_root: Box, + parent_block_latest_state_summary: Box>, + }, + MissingChildStateRoot(Hash256), + RequestedSlotAboveSummary { + starting_state_root: Hash256, + ancestor_slot: Slot, + state_root: Hash256, + state_slot: Slot, + }, + RootUnknownPreviousStateRoot(Slot, Hash256), + RootUnknownAncestorStateRoot { + starting_state_root: Hash256, + ancestor_slot: Slot, + root_state_root: Hash256, + root_state_slot: Slot, + }, +} + +impl StateSummariesDAG { + pub fn new(state_summaries: Vec<(Hash256, DAGStateSummary)>) -> Result { + // Group them by latest block root, and sorted state slot + let mut state_summaries_by_state_root = HashMap::new(); + let mut state_summaries_by_block_root = HashMap::<_, BTreeMap<_, _>>::new(); + let mut child_state_roots = HashMap::<_, Vec<_>>::new(); + + for (state_root, summary) in state_summaries.into_iter() { + let summaries = state_summaries_by_block_root + .entry(summary.latest_block_root) + .or_default(); + + // Sanity check to ensure no duplicate summaries for the tuple (block_root, state_slot) + match summaries.entry(summary.slot) { + Entry::Vacant(entry) => { + entry.insert((state_root, summary)); + } + Entry::Occupied(existing) => { + return Err(Error::DuplicateStateSummary { + block_root: summary.latest_block_root, + existing_state_summary: (summary.slot, state_root).into(), + new_state_summary: (*existing.key(), existing.get().0), + }) + } + } + + state_summaries_by_state_root.insert(state_root, summary); + + child_state_roots + .entry(summary.previous_state_root) + .or_default() + .push(state_root); + // Add empty entry for the child state + child_state_roots.entry(state_root).or_default(); + } + + Ok(Self { + state_summaries_by_state_root, + state_summaries_by_block_root, + child_state_roots, + }) + } + + /// Computes a DAG from a sequence of state summaries, including their parent block + /// relationships. + /// + /// - Expects summaries to be contiguous per slot: there must exist a summary at every slot + /// of each tree branch + /// - Maybe include multiple disjoint trees. The root of each tree will have a ZERO parent state + /// root, which will error later when calling `previous_state_root`. + pub fn new_from_v22( + state_summaries_v22: Vec<(Hash256, DAGStateSummaryV22)>, + ) -> Result { + // Group them by latest block root, and sorted state slot + let mut state_summaries_by_block_root = HashMap::<_, BTreeMap<_, _>>::new(); + for (state_root, summary) in state_summaries_v22.iter() { + let summaries = state_summaries_by_block_root + .entry(summary.latest_block_root) + .or_default(); + + // Sanity check to ensure no duplicate summaries for the tuple (block_root, state_slot) + match summaries.entry(summary.slot) { + Entry::Vacant(entry) => { + entry.insert((state_root, summary)); + } + Entry::Occupied(existing) => { + return Err(Error::DuplicateStateSummary { + block_root: summary.latest_block_root, + existing_state_summary: (summary.slot, *state_root).into(), + new_state_summary: (*existing.key(), *existing.get().0), + }) + } + } + } + + let state_summaries = state_summaries_v22 + .iter() + .map(|(state_root, summary)| { + let previous_state_root = if summary.slot == 0 { + Hash256::ZERO + } else { + let previous_slot = summary.slot - 1; + + // Check the set of states in the same state's block root + let same_block_root_summaries = state_summaries_by_block_root + .get(&summary.latest_block_root) + // Should never error: we construct the HashMap here and must have at least + // one entry per block root + .ok_or(Error::MissingStateSummaryByBlockRoot { + state_root: *state_root, + latest_block_root: summary.latest_block_root, + })?; + if let Some((state_root, _)) = same_block_root_summaries.get(&previous_slot) { + // Skipped slot: block root at previous slot is the same as latest block root. + **state_root + } else { + // Common case: not a skipped slot. + let parent_block_root = summary.block_parent_root; + if let Some(parent_block_summaries) = + state_summaries_by_block_root.get(&parent_block_root) + { + *parent_block_summaries + .get(&previous_slot) + // Should never error: summaries are contiguous, so if there's an + // entry it must contain at least one summary at the previous slot. + .ok_or(Error::StateSummariesNotContiguous { + state_root: *state_root, + state_slot: summary.slot, + latest_block_root: summary.latest_block_root, + parent_block_root: parent_block_root.into(), + parent_block_latest_state_summary: parent_block_summaries + .iter() + .max_by(|a, b| a.0.cmp(b.0)) + .map(|(slot, (state_root, _))| (*slot, **state_root)) + .into(), + })? + .0 + } else { + // We don't know of any summary with this parent block root. We'll + // consider this summary to be a root of `state_summaries_v22` + // collection and mark it as zero. + // The test store_tests::finalizes_non_epoch_start_slot manages to send two + // disjoint trees on its second migration. + Hash256::ZERO + } + } + }; + + Ok(( + *state_root, + DAGStateSummary { + slot: summary.slot, + latest_block_root: summary.latest_block_root, + latest_block_slot: summary.block_slot, + previous_state_root, + }, + )) + }) + .collect::, _>>()?; + + Self::new(state_summaries) + } + + // Returns all non-unique latest block roots of a given set of states + pub fn blocks_of_states<'a, I: Iterator>( + &self, + state_roots: I, + ) -> Result, Error> { + state_roots + .map(|state_root| { + let summary = self + .state_summaries_by_state_root + .get(state_root) + .ok_or(Error::MissingStateSummary(*state_root))?; + Ok((summary.latest_block_root, summary.latest_block_slot)) + }) + .collect() + } + + // Returns all unique latest blocks of this DAG's summaries + pub fn iter_blocks(&self) -> impl Iterator + '_ { + self.state_summaries_by_state_root + .values() + .map(|summary| (summary.latest_block_root, summary.latest_block_slot)) + .unique() + } + + /// Returns a vec of state summaries that have an unknown parent when forming the DAG tree + pub fn tree_roots(&self) -> Vec<(Hash256, DAGStateSummary)> { + self.state_summaries_by_state_root + .iter() + .filter_map(|(state_root, summary)| { + if self + .state_summaries_by_state_root + .contains_key(&summary.previous_state_root) + { + // Summaries with a known parent are not roots + None + } else { + Some((*state_root, *summary)) + } + }) + .collect() + } + + pub fn summaries_count(&self) -> usize { + self.state_summaries_by_block_root + .values() + .map(|s| s.len()) + .sum() + } + + pub fn summaries_by_slot_ascending(&self) -> BTreeMap> { + let mut summaries = BTreeMap::>::new(); + for (state_root, summary) in self.state_summaries_by_state_root.iter() { + summaries + .entry(summary.slot) + .or_default() + .push((*state_root, *summary)); + } + summaries + } + + pub fn previous_state_root(&self, state_root: Hash256) -> Result { + let summary = self + .state_summaries_by_state_root + .get(&state_root) + .ok_or(Error::MissingStateSummary(state_root))?; + if summary.previous_state_root == Hash256::ZERO { + Err(Error::RootUnknownPreviousStateRoot( + summary.slot, + state_root, + )) + } else { + Ok(summary.previous_state_root) + } + } + + pub fn ancestor_state_root_at_slot( + &self, + starting_state_root: Hash256, + ancestor_slot: Slot, + ) -> Result { + let mut state_root = starting_state_root; + // Walk backwards until we reach the state at `ancestor_slot`. + loop { + let summary = self + .state_summaries_by_state_root + .get(&state_root) + .ok_or(Error::MissingStateSummary(state_root))?; + + // Assumes all summaries are contiguous + match summary.slot.cmp(&ancestor_slot) { + Ordering::Less => { + return Err(Error::RequestedSlotAboveSummary { + starting_state_root, + ancestor_slot, + state_root, + state_slot: summary.slot, + }) + } + Ordering::Equal => { + return Ok(state_root); + } + Ordering::Greater => { + if summary.previous_state_root == Hash256::ZERO { + return Err(Error::RootUnknownAncestorStateRoot { + starting_state_root, + ancestor_slot, + root_state_root: state_root, + root_state_slot: summary.slot, + }); + } else { + state_root = summary.previous_state_root; + } + } + } + } + } + + /// Returns all ancestors of `state_root` INCLUDING `state_root` until the next parent is not + /// known. + pub fn ancestors_of(&self, mut state_root: Hash256) -> Result, Error> { + // Sanity check that the first summary exists + if !self.state_summaries_by_state_root.contains_key(&state_root) { + return Err(Error::MissingStateSummary(state_root)); + } + + let mut ancestors = vec![]; + loop { + if let Some(summary) = self.state_summaries_by_state_root.get(&state_root) { + ancestors.push((state_root, summary.slot)); + state_root = summary.previous_state_root + } else { + return Ok(ancestors); + } + } + } + + /// Returns of the descendant state summaries roots given an initiail state root. + pub fn descendants_of(&self, query_state_root: &Hash256) -> Result, Error> { + let mut descendants = vec![]; + for child_root in self + .child_state_roots + .get(query_state_root) + .ok_or(Error::MissingChildStateRoot(*query_state_root))? + { + descendants.push(*child_root); + descendants.extend(self.descendants_of(child_root)?); + } + Ok(descendants) + } +} + +#[cfg(test)] +mod tests { + use super::{DAGStateSummaryV22, Error, StateSummariesDAG}; + use bls::FixedBytesExtended; + use types::{Hash256, Slot}; + + fn root(n: u64) -> Hash256 { + Hash256::from_low_u64_le(n) + } + + #[test] + fn new_from_v22_empty() { + StateSummariesDAG::new_from_v22(vec![]).unwrap(); + } + + fn assert_previous_state_root_is_zero(dag: &StateSummariesDAG, root: Hash256) { + assert!(matches!( + dag.previous_state_root(root).unwrap_err(), + Error::RootUnknownPreviousStateRoot { .. } + )); + } + + #[test] + fn new_from_v22_one_state() { + let root_a = root(0xa); + let root_1 = root(1); + let root_2 = root(2); + let summary_1 = DAGStateSummaryV22 { + slot: Slot::new(1), + latest_block_root: root_1, + block_parent_root: root_2, + block_slot: Slot::new(1), + }; + + let dag = StateSummariesDAG::new_from_v22(vec![(root_a, summary_1)]).unwrap(); + + // The parent of the root summary is ZERO + assert_previous_state_root_is_zero(&dag, root_a); + } + + #[test] + fn new_from_v22_multiple_states() { + let dag = StateSummariesDAG::new_from_v22(vec![ + ( + root(0xa), + DAGStateSummaryV22 { + slot: Slot::new(3), + latest_block_root: root(3), + block_parent_root: root(1), + block_slot: Slot::new(3), + }, + ), + ( + root(0xb), + DAGStateSummaryV22 { + slot: Slot::new(4), + latest_block_root: root(4), + block_parent_root: root(3), + block_slot: Slot::new(4), + }, + ), + // fork 1 + ( + root(0xc), + DAGStateSummaryV22 { + slot: Slot::new(5), + latest_block_root: root(5), + block_parent_root: root(4), + block_slot: Slot::new(5), + }, + ), + // fork 2 + // skipped slot + ( + root(0xd), + DAGStateSummaryV22 { + slot: Slot::new(5), + latest_block_root: root(4), + block_parent_root: root(3), + block_slot: Slot::new(4), + }, + ), + // normal slot + ( + root(0xe), + DAGStateSummaryV22 { + slot: Slot::new(6), + latest_block_root: root(6), + block_parent_root: root(4), + block_slot: Slot::new(6), + }, + ), + ]) + .unwrap(); + + // The parent of the root summary is ZERO + assert_previous_state_root_is_zero(&dag, root(0xa)); + assert_eq!(dag.previous_state_root(root(0xc)).unwrap(), root(0xb)); + assert_eq!(dag.previous_state_root(root(0xd)).unwrap(), root(0xb)); + assert_eq!(dag.previous_state_root(root(0xe)).unwrap(), root(0xd)); + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index beff95eb77..fe78d83c03 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -893,6 +893,28 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { + self.chain + .heads() + .iter() + .any(|(head, _)| *head == Hash256::from(*block_hash)) + } + + pub fn assert_knows_head(&self, head_block_root: Hash256) { + let heads = self.chain.heads(); + if !heads.iter().any(|(head, _)| *head == head_block_root) { + let fork_choice = self.chain.canonical_head.fork_choice_read_lock(); + if heads.is_empty() { + let nodes = &fork_choice.proto_array().core_proto_array().nodes; + panic!("Expected to know head block root {head_block_root:?}, but heads is empty. Nodes: {nodes:#?}"); + } else { + panic!( + "Expected to know head block root {head_block_root:?}, known heads {heads:#?}" + ); + } + } + } + pub async fn make_blinded_block( &self, state: BeaconState, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index ac7627b0b1..4c4f0d8c6a 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1419,8 +1419,8 @@ async fn recover_from_invalid_head_after_persist_and_reboot() { let slot_clock = rig.harness.chain.slot_clock.clone(); - // Forcefully persist the head and fork choice. - rig.harness.chain.persist_head_and_fork_choice().unwrap(); + // Forcefully persist fork choice. + rig.harness.chain.persist_fork_choice().unwrap(); let resumed = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 38ff87d0c8..e41f547fb5 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -31,7 +31,6 @@ use store::{ BlobInfo, DBColumn, HotColdDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; -use tokio::time::sleep; use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; @@ -120,6 +119,17 @@ fn get_harness_generic( harness } +fn count_states_descendant_of_block( + store: &HotColdDB, BeaconNodeBackend>, + block_root: Hash256, +) -> usize { + let summaries = store.load_hot_state_summaries().unwrap(); + summaries + .iter() + .filter(|(_, s)| s.latest_block_root == block_root) + .count() +} + #[tokio::test] async fn light_client_bootstrap_test() { let spec = test_spec::(); @@ -1225,7 +1235,7 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { assert_eq!(rig.get_finalized_checkpoints(), hashset! {}); - assert!(rig.chain.knows_head(&stray_head)); + rig.assert_knows_head(stray_head.into()); // Trigger finalization let finalization_slots: Vec = ((canonical_chain_slot + 1) @@ -1273,7 +1283,7 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { ); } - assert!(!rig.chain.knows_head(&stray_head)); + assert!(!rig.knows_head(&stray_head)); } #[tokio::test] @@ -1399,7 +1409,7 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { ); } - assert!(!rig.chain.knows_head(&stray_head)); + assert!(!rig.knows_head(&stray_head)); let chain_dump = rig.chain.chain_dump().unwrap(); assert!(get_blocks(&chain_dump).contains(&shared_head)); } @@ -1492,7 +1502,7 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { ); } - assert!(rig.chain.knows_head(&stray_head)); + rig.assert_knows_head(stray_head.into()); } #[tokio::test] @@ -1576,7 +1586,7 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // Precondition: Nothing is finalized yet assert_eq!(rig.get_finalized_checkpoints(), hashset! {},); - assert!(rig.chain.knows_head(&stray_head)); + rig.assert_knows_head(stray_head.into()); // Trigger finalization let canonical_slots: Vec = (rig.epoch_start_slot(2)..=rig.epoch_start_slot(6)) @@ -1631,7 +1641,7 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { ); } - assert!(!rig.chain.knows_head(&stray_head)); + assert!(!rig.knows_head(&stray_head)); } // This is to check if state outside of normal block processing are pruned correctly. @@ -2150,64 +2160,6 @@ async fn pruning_test( check_no_blocks_exist(&harness, stray_blocks.values()); } -#[tokio::test] -async fn garbage_collect_temp_states_from_failed_block_on_startup() { - let db_path = tempdir().unwrap(); - - // Wrap these functions to ensure the variables are dropped before we try to open another - // instance of the store. - let mut store = { - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - - let slots_per_epoch = E::slots_per_epoch(); - - let genesis_state = harness.get_current_state(); - let block_slot = Slot::new(2 * slots_per_epoch); - let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; - - let (mut block, _) = (*signed_block).clone().deconstruct(); - - // Mutate the block to make it invalid, and re-sign it. - *block.state_root_mut() = Hash256::repeat_byte(0xff); - let proposer_index = block.proposer_index() as usize; - let block = Arc::new(block.sign( - &harness.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &harness.spec, - )); - - // The block should be rejected, but should store a bunch of temporary states. - harness.set_current_slot(block_slot); - harness - .process_block_result((block, None)) - .await - .unwrap_err(); - - assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 - ); - store - }; - - // Wait until all the references to the store have been dropped, this helps ensure we can - // re-open the store later. - loop { - store = if let Err(store_arc) = Arc::try_unwrap(store) { - sleep(Duration::from_millis(500)).await; - store_arc - } else { - break; - } - } - - // On startup, the store should garbage collect all the temporary states. - let store = get_store(&db_path); - assert_eq!(store.iter_temporary_state_roots().count(), 0); -} - #[tokio::test] async fn garbage_collect_temp_states_from_failed_block_on_finalization() { let db_path = tempdir().unwrap(); @@ -2222,6 +2174,7 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; let (mut block, _) = (*signed_block).clone().deconstruct(); + let bad_block_parent_root = block.parent_root(); // Mutate the block to make it invalid, and re-sign it. *block.state_root_mut() = Hash256::repeat_byte(0xff); @@ -2240,9 +2193,11 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { .await .unwrap_err(); + // The bad block parent root is the genesis block root. There's `block_slot - 1` temporary + // states to remove + the genesis state = block_slot. assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 + count_states_descendant_of_block(&store, bad_block_parent_root), + block_slot.as_usize(), ); // Finalize the chain without the block, which should result in pruning of all temporary states. @@ -2259,8 +2214,12 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { // Check that the finalization migration ran. assert_ne!(store.get_split_slot(), 0); - // Check that temporary states have been pruned. - assert_eq!(store.iter_temporary_state_roots().count(), 0); + // Check that temporary states have been pruned. The genesis block is not a descendant of the + // latest finalized checkpoint, so all its states have been pruned from the hot DB, = 0. + assert_eq!( + count_states_descendant_of_block(&store, bad_block_parent_root), + 0 + ); } #[tokio::test] @@ -2785,8 +2744,8 @@ async fn finalizes_after_resuming_from_db() { harness .chain - .persist_head_and_fork_choice() - .expect("should persist the head and fork choice"); + .persist_fork_choice() + .expect("should persist fork choice"); harness .chain .persist_op_pool() @@ -2999,11 +2958,13 @@ async fn revert_minority_fork_on_resume() { resumed_harness.chain.recompute_head_at_current_slot().await; assert_eq!(resumed_harness.head_slot(), fork_slot - 1); - // Head track should know the canonical head and the rogue head. - assert_eq!(resumed_harness.chain.heads().len(), 2); - assert!(resumed_harness - .chain - .knows_head(&resumed_harness.head_block_root().into())); + // Fork choice should only know the canonical head. When we reverted the head we also should + // have called `reset_fork_choice_to_finalization` which rebuilds fork choice from scratch + // without the reverted block. + assert_eq!( + resumed_harness.chain.heads(), + vec![(resumed_harness.head_block_root(), fork_slot - 1)] + ); // Apply blocks from the majority chain and trigger finalization. let initial_split_slot = resumed_harness.chain.store.get_split_slot(); diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 3d8bbe1473..81d6d1d4bd 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -195,7 +195,6 @@ impl LevelDB { }; for (start_key, end_key) in [ - endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconState), endpoints(DBColumn::BeaconStateSummary), ] { diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 41fd17ef43..ed6154da80 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -25,7 +25,7 @@ pub enum Error { NoContinuationData, SplitPointModified(Slot, Slot), ConfigError(StoreConfigError), - SchemaMigrationError(String), + MigrationError(String), /// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied. AnchorInfoConcurrentMutation, /// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied. diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs deleted file mode 100644 index 586db44c89..0000000000 --- a/beacon_node/store/src/garbage_collection.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! Garbage collection process that runs at start-up to clean up the database. -use crate::database::interface::BeaconNodeBackend; -use crate::hot_cold_store::HotColdDB; -use crate::{DBColumn, Error}; -use tracing::debug; -use types::EthSpec; - -impl HotColdDB, BeaconNodeBackend> -where - E: EthSpec, -{ - /// Clean up the database by performing one-off maintenance at start-up. - pub fn remove_garbage(&self) -> Result<(), Error> { - self.delete_temp_states()?; - Ok(()) - } - - /// Delete the temporary states that were leftover by failed block imports. - pub fn delete_temp_states(&self) -> Result<(), Error> { - let mut ops = vec![]; - self.iter_temporary_state_roots().for_each(|state_root| { - if let Ok(state_root) = state_root { - ops.push(state_root); - } - }); - if !ops.is_empty() { - debug!("Garbage collecting {} temporary states", ops.len()); - - self.delete_batch(DBColumn::BeaconState, ops.clone())?; - self.delete_batch(DBColumn::BeaconStateSummary, ops.clone())?; - self.delete_batch(DBColumn::BeaconStateTemporary, ops)?; - } - - Ok(()) - } -} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6a30d8a428..362c5d8014 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,8 +14,8 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, ColumnKeyIter, - DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, DBColumn, + DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, StoreOp, }; use itertools::{process_results, Itertools}; use lru::LruCache; @@ -36,7 +36,7 @@ use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; -use tracing::{debug, error, info, trace, warn}; +use tracing::{debug, error, info, warn}; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; use zstd::{Decoder, Encoder}; @@ -80,7 +80,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// HTTP API. historic_state_cache: Mutex>, /// Chain spec. - pub(crate) spec: Arc, + pub spec: Arc, /// Mere vessel for E. _phantom: PhantomData, } @@ -161,7 +161,7 @@ pub enum HotColdDBError { MissingRestorePoint(Hash256), MissingColdStateSummary(Hash256), MissingHotStateSummary(Hash256), - MissingEpochBoundaryState(Hash256), + MissingEpochBoundaryState(Hash256, Hash256), MissingPrevState(Hash256), MissingSplitState(Hash256, Slot), MissingStateDiff(Hash256), @@ -390,8 +390,11 @@ impl HotColdDB, BeaconNodeBackend> { } db.store_config()?; - // Run a garbage collection pass. - db.remove_garbage()?; + // TODO(tree-states): Here we can choose to prune advanced states to reclaim disk space. As + // it's a foreground task there's no risk of race condition that can corrupt the DB. + // Advanced states for invalid blocks that were never written to the DB, or descendants of + // heads can be safely pruned at the expense of potentially having to recompute them in the + // future. However this would require a new dedicated pruning routine. // If configured, run a foreground compaction pass. if db.config.compact_on_init { @@ -402,12 +405,6 @@ impl HotColdDB, BeaconNodeBackend> { Ok(db) } - - /// Return an iterator over the state roots of all temporary states. - pub fn iter_temporary_state_roots(&self) -> ColumnKeyIter { - self.hot_db - .iter_column_keys::(DBColumn::BeaconStateTemporary) - } } impl, Cold: ItemStore> HotColdDB { @@ -903,26 +900,11 @@ impl, Cold: ItemStore> HotColdDB /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { - self.put_state_possibly_temporary(state_root, state, false) - } - - /// Store a state in the store. - /// - /// The `temporary` flag indicates whether this state should be considered canonical. - pub fn put_state_possibly_temporary( - &self, - state_root: &Hash256, - state: &BeaconState, - temporary: bool, - ) -> Result<(), Error> { let mut ops: Vec = Vec::new(); if state.slot() < self.get_split_slot() { self.store_cold_state(state_root, state, &mut ops)?; self.cold_db.do_atomically(ops) } else { - if temporary { - ops.push(TemporaryFlag.as_kv_store_op(*state_root)); - } self.store_hot_state(state_root, state, &mut ops)?; self.hot_db.do_atomically(ops) } @@ -1138,6 +1120,7 @@ impl, Cold: ItemStore> HotColdDB .load_hot_state(&epoch_boundary_state_root, true)? .ok_or(HotColdDBError::MissingEpochBoundaryState( epoch_boundary_state_root, + *state_root, ))?; Ok(Some(state)) } else { @@ -1201,17 +1184,6 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(summary.as_kv_store_op(state_root)); } - StoreOp::PutStateTemporaryFlag(state_root) => { - key_value_batch.push(TemporaryFlag.as_kv_store_op(state_root)); - } - - StoreOp::DeleteStateTemporaryFlag(state_root) => { - key_value_batch.push(KeyValueStoreOp::DeleteKey( - TemporaryFlag::db_column(), - state_root.as_slice().to_vec(), - )); - } - StoreOp::DeleteBlock(block_root) => { key_value_batch.push(KeyValueStoreOp::DeleteKey( DBColumn::BeaconBlock, @@ -1241,13 +1213,6 @@ impl, Cold: ItemStore> HotColdDB state_root.as_slice().to_vec(), )); - // Delete the state temporary flag (if any). Temporary flags are commonly - // created by the state advance routine. - key_value_batch.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateTemporary, - state_root.as_slice().to_vec(), - )); - if slot.is_none_or(|slot| slot % E::slots_per_epoch() == 0) { key_value_batch.push(KeyValueStoreOp::DeleteKey( DBColumn::BeaconState, @@ -1408,10 +1373,6 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutStateSummary(_, _) => (), - StoreOp::PutStateTemporaryFlag(_) => (), - - StoreOp::DeleteStateTemporaryFlag(_) => (), - StoreOp::DeleteBlock(block_root) => { guard.delete_block(&block_root); self.state_cache.lock().delete_block_states(&block_root); @@ -1492,8 +1453,8 @@ impl, Cold: ItemStore> HotColdDB // On the epoch boundary, store the full state. if state.slot() % E::slots_per_epoch() == 0 { - trace!( - slot = %state.slot().as_u64(), + debug!( + slot = %state.slot(), ?state_root, "Storing full state on epoch boundary" ); @@ -1571,12 +1532,6 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - // If the state is marked as temporary, do not return it. It will become visible - // only once its transaction commits and deletes its temporary flag. - if self.load_state_temporary_flag(state_root)?.is_some() { - return Ok(None); - } - if let Some(HotStateSummary { slot, latest_block_root, @@ -1585,7 +1540,10 @@ impl, Cold: ItemStore> HotColdDB { let mut boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or( - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), + HotColdDBError::MissingEpochBoundaryState( + epoch_boundary_state_root, + *state_root, + ), )?; // Immediately rebase the state from disk on the finalized state so that we can reuse @@ -2545,15 +2503,16 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.get(state_root) } - /// Load the temporary flag for a state root, if one exists. - /// - /// Returns `Some` if the state is temporary, or `None` if the state is permanent or does not - /// exist -- you should call `load_hot_state_summary` to find out which. - pub fn load_state_temporary_flag( - &self, - state_root: &Hash256, - ) -> Result, Error> { - self.hot_db.get(state_root) + /// Load all hot state summaries present in the hot DB + pub fn load_hot_state_summaries(&self) -> Result, Error> { + self.hot_db + .iter_column::(DBColumn::BeaconStateSummary) + .map(|res| { + let (state_root, value) = res?; + let summary = HotStateSummary::from_ssz_bytes(&value)?; + Ok((state_root, summary)) + }) + .collect() } /// Run a compaction pass to free up space used by deleted states. @@ -2985,54 +2944,13 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } - - /// Prune states from the hot database which are prior to the split. - /// - /// This routine is important for cleaning up advanced states which are stored in the database - /// with a temporary flag. - pub fn prune_old_hot_states(&self) -> Result<(), Error> { - let split = self.get_split_info(); - debug!( - %split.slot, - "Database state pruning started" - ); - let mut state_delete_batch = vec![]; - for res in self - .hot_db - .iter_column::(DBColumn::BeaconStateSummary) - { - let (state_root, summary_bytes) = res?; - let summary = HotStateSummary::from_ssz_bytes(&summary_bytes)?; - - if summary.slot <= split.slot { - let old = summary.slot < split.slot; - let non_canonical = summary.slot == split.slot - && state_root != split.state_root - && !split.state_root.is_zero(); - if old || non_canonical { - let reason = if old { - "old dangling state" - } else { - "non-canonical" - }; - debug!( - ?state_root, - slot = %summary.slot, - %reason, - "Deleting state" - ); - state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot))); - } - } - } - let num_deleted_states = state_delete_batch.len(); - self.do_atomically_with_block_and_blobs_cache(state_delete_batch)?; - debug!(%num_deleted_states, "Database state pruning complete"); - Ok(()) - } } -/// Advance the split point of the store, moving new finalized states to the freezer. +/// Advance the split point of the store, copying new finalized states to the freezer. +/// +/// This function previously did a combination of freezer migration alongside pruning. Now it is +/// *just* responsible for copying relevant data to the freezer, while pruning is implemented +/// in `prune_hot_db`. pub fn migrate_database, Cold: ItemStore>( store: Arc>, finalized_state_root: Hash256, @@ -3064,29 +2982,17 @@ pub fn migrate_database, Cold: ItemStore>( return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } - let mut hot_db_ops = vec![]; let mut cold_db_block_ops = vec![]; - let mut epoch_boundary_blocks = HashSet::new(); - let mut non_checkpoint_block_roots = HashSet::new(); // Iterate in descending order until the current split slot - let state_roots = RootsIterator::new(&store, finalized_state) - .take_while(|result| match result { - Ok((_, _, slot)) => *slot >= current_split_slot, - Err(_) => true, - }) - .collect::, _>>()?; + let state_roots: Vec<_> = + process_results(RootsIterator::new(&store, finalized_state), |iter| { + iter.take_while(|(_, _, slot)| *slot >= current_split_slot) + .collect() + })?; // Then, iterate states in slot ascending order, as they are stored wrt previous states. for (block_root, state_root, slot) in state_roots.into_iter().rev() { - // Delete the execution payload if payload pruning is enabled. At a skipped slot we may - // delete the payload for the finalized block itself, but that's OK as we only guarantee - // that payloads are present for slots >= the split slot. The payload fetching code is also - // forgiving of missing payloads. - if store.config.prune_payloads { - hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); - } - // Store the slot to block root mapping. cold_db_block_ops.push(KeyValueStoreOp::PutKeyValue( DBColumn::BeaconBlockRoots, @@ -3094,44 +3000,27 @@ pub fn migrate_database, Cold: ItemStore>( block_root.as_slice().to_vec(), )); - // At a missed slot, `state_root_iter` will return the block root - // from the previous non-missed slot. This ensures that the block root at an - // epoch boundary is always a checkpoint block root. We keep track of block roots - // at epoch boundaries by storing them in the `epoch_boundary_blocks` hash set. - // We then ensure that block roots at the epoch boundary aren't included in the - // `non_checkpoint_block_roots` hash set. - if slot % E::slots_per_epoch() == 0 { - epoch_boundary_blocks.insert(block_root); - } else { - non_checkpoint_block_roots.insert(block_root); - } - - if epoch_boundary_blocks.contains(&block_root) { - non_checkpoint_block_roots.remove(&block_root); - } - - // Delete the old summary, and the full state if we lie on an epoch boundary. - hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); - // Do not try to store states if a restore point is yet to be stored, or will never be // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state // which always needs to be copied from the hot DB to the freezer and should not be deleted. if slot != 0 && slot < anchor_info.state_upper_limit { - debug!(%slot, "Pruning finalized state"); continue; } - let mut cold_db_ops = vec![]; + let mut cold_db_state_ops = vec![]; // Only store the cold state if it's on a diff boundary. // Calling `store_cold_state_summary` instead of `store_cold_state` for those allows us // to skip loading many hot states. - if matches!( - store.hierarchy.storage_strategy(slot)?, - StorageStrategy::ReplayFrom(..) - ) { + if let StorageStrategy::ReplayFrom(from) = store.hierarchy.storage_strategy(slot)? { // Store slot -> state_root and state_root -> slot mappings. - store.store_cold_state_summary(&state_root, slot, &mut cold_db_ops)?; + debug!( + strategy = "replay", + from_slot = %from, + %slot, + "Storing cold state" + ); + store.store_cold_state_summary(&state_root, slot, &mut cold_db_state_ops)?; } else { // This is some state that we want to migrate to the freezer db. // There is no reason to cache this state. @@ -3139,36 +3028,22 @@ pub fn migrate_database, Cold: ItemStore>( .get_hot_state(&state_root, false)? .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; - store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; + store.store_cold_state(&state_root, &state, &mut cold_db_state_ops)?; } // Cold states are diffed with respect to each other, so we need to finish writing previous // states before storing new ones. - store.cold_db.do_atomically(cold_db_ops)?; + store.cold_db.do_atomically(cold_db_state_ops)?; } - // Prune sync committee branch data for all non checkpoint block roots. - // Note that `non_checkpoint_block_roots` should only contain non checkpoint block roots - // as long as `finalized_state.slot()` is at an epoch boundary. If this were not the case - // we risk the chance of pruning a `sync_committee_branch` for a checkpoint block root. - // E.g. if `current_split_slot` = (Epoch A slot 0) and `finalized_state.slot()` = (Epoch C slot 31) - // and (Epoch D slot 0) is a skipped slot, we will have pruned a `sync_committee_branch` - // for a checkpoint block root. - non_checkpoint_block_roots - .into_iter() - .for_each(|block_root| { - hot_db_ops.push(StoreOp::DeleteSyncCommitteeBranch(block_root)); - }); - - // Warning: Critical section. We have to take care not to put any of the two databases in an + // Warning: Critical section. We have to take care not to put any of the two databases in an // inconsistent state if the OS process dies at any point during the freezing // procedure. // // Since it is pretty much impossible to be atomic across more than one database, we trade - // losing track of states to delete, for consistency. In other words: We should be safe to die - // at any point below but it may happen that some states won't be deleted from the hot database - // and will remain there forever. Since dying in these particular few lines should be an - // exceedingly rare event, this should be an acceptable tradeoff. + // potentially re-doing the migration to copy data to the freezer, for consistency. If we crash + // after writing all new block & state data to the freezer but before updating the split, then + // in the worst case we will restart with the old split and re-run the migration. store.cold_db.do_atomically(cold_db_block_ops)?; store.cold_db.sync()?; { @@ -3181,7 +3056,7 @@ pub fn migrate_database, Cold: ItemStore>( error!( previous_split_slot = %current_split_slot, current_split_slot = %latest_split_slot, - "Race condition detected: Split point changed while moving states to the freezer" + "Race condition detected: Split point changed while copying states to the freezer" ); // Assume the freezing procedure will be retried in case this happens. @@ -3206,9 +3081,6 @@ pub fn migrate_database, Cold: ItemStore>( *split_guard = split; } - // Delete the blocks and states from the hot database if we got this far. - store.do_atomically_with_block_and_blobs_cache(hot_db_ops)?; - // Update the cache's view of the finalized state. store.update_finalized_state( finalized_state_root, @@ -3325,23 +3197,6 @@ impl StoreItem for ColdStateSummary { } } -#[derive(Debug, Clone, Copy, Default)] -pub struct TemporaryFlag; - -impl StoreItem for TemporaryFlag { - fn db_column() -> DBColumn { - DBColumn::BeaconStateTemporary - } - - fn as_store_bytes(&self) -> Vec { - vec![] - } - - fn from_store_bytes(_: &[u8]) -> Result { - Ok(TemporaryFlag) - } -} - #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct BytesKey { pub key: Vec, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 2b5be03489..5b30971fd8 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -14,7 +14,6 @@ pub mod config; pub mod consensus_context; pub mod errors; mod forwards_iter; -mod garbage_collection; pub mod hdiff; pub mod historic_state_cache; pub mod hot_cold_store; @@ -241,8 +240,6 @@ pub enum StoreOp<'a, E: EthSpec> { PutBlobs(Hash256, BlobSidecarList), PutDataColumns(Hash256, DataColumnSidecarList), PutStateSummary(Hash256, HotStateSummary), - PutStateTemporaryFlag(Hash256), - DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), DeleteBlobs(Hash256), DeleteDataColumns(Hash256, Vec), @@ -287,8 +284,10 @@ pub enum DBColumn { /// Mapping from state root to `ColdStateSummary` in the cold DB. #[strum(serialize = "bcs")] BeaconColdStateSummary, - /// For the list of temporary states stored during block import, - /// and then made non-temporary by the deletion of their state root from this column. + /// DEPRECATED. + /// + /// Previously used for the list of temporary states stored during block import, and then made + /// non-temporary by the deletion of their state root from this column. #[strum(serialize = "bst")] BeaconStateTemporary, /// Execution payloads for blocks more recent than the finalized checkpoint. diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 1d70e105b9..55c64bf850 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(22); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(23); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index cf6ebb3b00..cbae54bd36 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1041,6 +1041,21 @@ impl ProtoArray { }) .map(|node| node.root) } + + /// Returns all nodes that have zero children and are descended from the finalized checkpoint. + /// + /// For informational purposes like the beacon HTTP API, we use this as the list of known heads, + /// even though some of them might not be viable. We do this to maintain consistency between the + /// definition of "head" used by pruning (which does not consider viability) and fork choice. + pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { + self.nodes + .iter() + .filter(|node| { + node.best_child.is_none() + && self.is_finalized_checkpoint_or_descendant::(node.root) + }) + .collect() + } } /// A helper method to calculate the proposer boost based on the given `justified_balances`. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 4da632bf58..880c93d5c9 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -885,6 +885,11 @@ impl ProtoArrayForkChoice { pub fn core_proto_array_mut(&mut self) -> &mut ProtoArray { &mut self.proto_array } + + /// Returns all nodes that have zero children and are descended from the finalized checkpoint. + pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { + self.proto_array.heads_descended_from_finalization::() + } } /// Returns a list of `deltas`, where there is one delta for each of the indices in From 47a85cd1186d94c2aa8f00c67cd9ccd7e4a88178 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 7 Apr 2025 16:01:20 +1000 Subject: [PATCH 17/35] Bump version to v7.1.0-beta.0 (not a release) (#7269) Having merged the drop-headtracker PR we now have a DB schema change in `unstable` compared to `release-v7.0.0`: - https://github.com/sigp/lighthouse/pull/6744 There is a DB downgrade available, however this needs to be applied manually and it's usually a bit of a hassle. This PR bumps the version on `unstable` to `v7.1.0-beta.0` _without_ actually cutting a `v7.1.0-beta.0` release, so that we can tell at a glance which schema version a node is using. --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 10761f08bb..86019c913d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -808,7 +808,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "account_utils", "beacon_chain", @@ -1046,7 +1046,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "beacon_node", "bytes", @@ -4704,7 +4704,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "account_utils", "beacon_chain", @@ -5266,7 +5266,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index cf963535c7..30d6846964 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index bd5e31e3ab..b20708e7b0 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-beta.5-", - fallback = "Lighthouse/v7.0.0-beta.5" + prefix = "Lighthouse/v7.1.0-beta.0-", + fallback = "Lighthouse/v7.1.0-beta.0" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0-beta.5" + "7.1.0-beta.0" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 22b19f7413..9acbe2569c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3774a9c458..04c8efcdba 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From e924264e17b8917ef077639edaa6043610347f20 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 8 Apr 2025 13:20:31 +1000 Subject: [PATCH 18/35] Fullnodes to publish data columns from EL `getBlobs` (#7258) Previously only supernode contributes to data column publishing in Lighthouse. Recently we've [updated the spec](https://github.com/ethereum/consensus-specs/pull/4183) to have full nodes publishing data columns as well, to ensure all nodes contributes to propagation. This also prevents already imported data columns from being imported again (because we don't "observe" them), and ensures columns that are observed in the [gossip seen cache](https://github.com/sigp/lighthouse/blob/d60c24ef1cc0b5dfa930e1dd4fc85abc29e5fc4c/beacon_node/beacon_chain/src/data_column_verification.rs#L492) are forwarded to its peers, rather than being ignored. --- .../src/network_beacon_processor/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 3431c1abb9..cdcbe1bb8d 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -843,19 +843,19 @@ impl NetworkBeaconProcessor { block_root: Hash256, publish_blobs: bool, ) { + let custody_columns = self.network_globals.sampling_columns.clone(); let is_supernode = self.network_globals.is_supernode(); - let self_cloned = self.clone(); let publish_fn = move |blobs_or_data_column| { - // At the moment non supernodes are not required to publish any columns. - // TODO(das): we could experiment with having full nodes publish their custodied - // columns here. - if publish_blobs && is_supernode { + if publish_blobs { match blobs_or_data_column { BlobsOrDataColumns::Blobs(blobs) => { self_cloned.publish_blobs_gradually(blobs, block_root); } - BlobsOrDataColumns::DataColumns(columns) => { + BlobsOrDataColumns::DataColumns(mut columns) => { + if !is_supernode { + columns.retain(|col| custody_columns.contains(&col.index)); + } self_cloned.publish_data_columns_gradually(columns, block_root); } }; @@ -1055,7 +1055,7 @@ impl NetworkBeaconProcessor { /// /// This is an optimisation to reduce outbound bandwidth and ensures each column is published /// by some nodes on the network as soon as possible. Our hope is that some columns arrive from - /// other supernodes in the meantime, obviating the need for us to publish them. If no other + /// other nodes in the meantime, obviating the need for us to publish them. If no other /// publisher exists for a column, it will eventually get published here. fn publish_data_columns_gradually( self: &Arc, @@ -1080,9 +1080,9 @@ impl NetworkBeaconProcessor { }); }; - // If this node is a super node, permute the columns and split them into batches. + // Permute the columns and split them into batches. // The hope is that we won't need to publish some columns because we will receive them - // on gossip from other supernodes. + // on gossip from other nodes. data_columns_to_publish.shuffle(&mut rand::thread_rng()); let blob_publication_batch_interval = chain.config.blob_publication_batch_interval; From 759b0612b37f354b70bd00298082e6aacb20ba89 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 8 Apr 2025 17:37:16 +1000 Subject: [PATCH 19/35] Offloading KZG Proof Computation from the beacon node (#7117) Addresses #7108 - Add EL integration for `getPayloadV5` and `getBlobsV2` - Offload proof computation and use proofs from EL RPC APIs --- beacon_node/beacon_chain/benches/benches.rs | 12 +- beacon_node/beacon_chain/src/beacon_chain.rs | 137 ++++---- .../beacon_chain/src/block_verification.rs | 15 +- beacon_node/beacon_chain/src/builder.rs | 59 +++- .../src/data_availability_checker.rs | 64 ++-- .../overflow_lru_cache.rs | 82 +---- .../src/data_column_verification.rs | 43 ++- .../beacon_chain/src/early_attester_cache.rs | 4 - beacon_node/beacon_chain/src/fetch_blobs.rs | 301 +++++++++++------- .../beacon_chain/src/fulu_readiness.rs | 8 +- .../beacon_chain/src/historical_blocks.rs | 2 +- beacon_node/beacon_chain/src/kzg_utils.rs | 143 +++++++-- beacon_node/beacon_chain/src/metrics.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/execution_layer/src/engine_api.rs | 13 +- .../execution_layer/src/engine_api/http.rs | 37 ++- .../src/engine_api/json_structures.rs | 15 +- beacon_node/execution_layer/src/lib.rs | 35 +- .../test_utils/execution_block_generator.rs | 126 +++++--- .../fixtures/mainnet/test_blobs_bundle_v2.ssz | Bin 0 -> 137276 bytes .../src/test_utils/handle_rpc.rs | 21 +- .../src/test_utils/mock_builder.rs | 6 +- .../execution_layer/src/test_utils/mod.rs | 1 + beacon_node/http_api/src/publish_blocks.rs | 5 +- .../gossip_methods.rs | 3 +- .../src/network_beacon_processor/mod.rs | 7 +- consensus/types/src/data_column_sidecar.rs | 4 +- consensus/types/src/eth_spec.rs | 12 +- consensus/types/src/lib.rs | 9 +- crypto/kzg/src/lib.rs | 13 +- scripts/local_testnet/network_params_das.yaml | 12 +- 31 files changed, 721 insertions(+), 476 deletions(-) create mode 100644 beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle_v2.ssz diff --git a/beacon_node/beacon_chain/benches/benches.rs b/beacon_node/beacon_chain/benches/benches.rs index c09af00be6..aae627da13 100644 --- a/beacon_node/beacon_chain/benches/benches.rs +++ b/beacon_node/beacon_chain/benches/benches.rs @@ -5,16 +5,16 @@ use beacon_chain::test_utils::get_kzg; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use bls::Signature; -use kzg::KzgCommitment; +use kzg::{KzgCommitment, KzgProof}; use types::{ beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec, - EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + EmptyBlock, EthSpec, KzgProofs, MainnetEthSpec, SignedBeaconBlock, }; fn create_test_block_and_blobs( num_of_blobs: usize, spec: &ChainSpec, -) -> (SignedBeaconBlock, BlobsList) { +) -> (SignedBeaconBlock, BlobsList, KzgProofs) { let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); let mut body = block.body_mut(); let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); @@ -27,8 +27,9 @@ fn create_test_block_and_blobs( .map(|_| Blob::::default()) .collect::>() .into(); + let proofs = vec![KzgProof::empty(); num_of_blobs * spec.number_of_columns as usize].into(); - (signed_block, blobs) + (signed_block, blobs, proofs) } fn all_benches(c: &mut Criterion) { @@ -37,10 +38,11 @@ fn all_benches(c: &mut Criterion) { let kzg = get_kzg(&spec); for blob_count in [1, 2, 3, 6] { - let (signed_block, blobs) = create_test_block_and_blobs::(blob_count, &spec); + let (signed_block, blobs, proofs) = create_test_block_and_blobs::(blob_count, &spec); let column_sidecars = blobs_to_data_column_sidecars( &blobs.iter().collect::>(), + proofs.to_vec(), &signed_block, &kzg, &spec, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d9ac2fa6ea..d6475de243 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -31,6 +31,7 @@ use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; +use crate::fetch_blobs::EngineGetBlobsOutput; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; use crate::kzg_utils::reconstruct_blobs; @@ -121,7 +122,6 @@ use store::{ KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::sync::oneshot; use tokio_stream::Stream; use tracing::{debug, error, info, trace, warn}; use tree_hash::TreeHash; @@ -3137,16 +3137,11 @@ impl BeaconChain { } /// Process blobs retrieved from the EL and returns the `AvailabilityProcessingStatus`. - /// - /// `data_column_recv`: An optional receiver for `DataColumnSidecarList`. - /// If PeerDAS is enabled, this receiver will be provided and used to send - /// the `DataColumnSidecar`s once they have been successfully computed. pub async fn process_engine_blobs( self: &Arc, slot: Slot, block_root: Hash256, - blobs: FixedBlobSidecarList, - data_column_recv: Option>>, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its blobs again. @@ -3160,15 +3155,12 @@ impl BeaconChain { // process_engine_blobs is called for both pre and post PeerDAS. However, post PeerDAS // consumers don't expect the blobs event to fire erratically. - if !self - .spec - .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) - { + if let EngineGetBlobsOutput::Blobs(blobs) = &engine_get_blobs_output { self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); } let r = self - .check_engine_blob_availability_and_import(slot, block_root, blobs, data_column_recv) + .check_engine_blobs_availability_and_import(slot, block_root, engine_get_blobs_output) .await; self.remove_notified(&block_root, r) } @@ -3618,20 +3610,24 @@ impl BeaconChain { .await } - async fn check_engine_blob_availability_and_import( + async fn check_engine_blobs_availability_and_import( self: &Arc, slot: Slot, block_root: Hash256, - blobs: FixedBlobSidecarList, - data_column_recv: Option>>, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { - self.check_blobs_for_slashability(block_root, &blobs)?; - let availability = self.data_availability_checker.put_engine_blobs( - block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - blobs, - data_column_recv, - )?; + let availability = match engine_get_blobs_output { + EngineGetBlobsOutput::Blobs(blobs) => { + self.check_blobs_for_slashability(block_root, &blobs)?; + self.data_availability_checker + .put_engine_blobs(block_root, blobs)? + } + EngineGetBlobsOutput::CustodyColumns(data_columns) => { + self.check_columns_for_slashability(block_root, &data_columns)?; + self.data_availability_checker + .put_engine_data_columns(block_root, data_columns)? + } + }; self.process_availability(slot, availability, || Ok(())) .await @@ -3645,27 +3641,7 @@ impl BeaconChain { block_root: Hash256, custody_columns: DataColumnSidecarList, ) -> Result { - // Need to scope this to ensure the lock is dropped before calling `process_availability` - // Even an explicit drop is not enough to convince the borrow checker. - { - let mut slashable_cache = self.observed_slashable.write(); - // Assumes all items in custody_columns are for the same block_root - if let Some(column) = custody_columns.first() { - let header = &column.signed_block_header; - if verify_header_signature::(self, header).is_ok() { - slashable_cache - .observe_slashable( - header.message.slot, - header.message.proposer_index, - block_root, - ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - if let Some(slasher) = self.slasher.as_ref() { - slasher.accept_block_header(header.clone()); - } - } - } - } + self.check_columns_for_slashability(block_root, &custody_columns)?; // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. @@ -3677,6 +3653,31 @@ impl BeaconChain { .await } + fn check_columns_for_slashability( + self: &Arc, + block_root: Hash256, + custody_columns: &DataColumnSidecarList, + ) -> Result<(), BlockError> { + let mut slashable_cache = self.observed_slashable.write(); + // Assumes all items in custody_columns are for the same block_root + if let Some(column) = custody_columns.first() { + let header = &column.signed_block_header; + if verify_header_signature::(self, header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header.clone()); + } + } + } + Ok(()) + } + /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` /// /// An error is returned if the block was unable to be imported. It may be partially imported @@ -5798,15 +5799,26 @@ impl BeaconChain { let kzg_proofs = Vec::from(proofs); let kzg = self.kzg.as_ref(); - - // TODO(fulu): we no longer need blob proofs from PeerDAS and could avoid computing. - kzg_utils::validate_blobs::( - kzg, - expected_kzg_commitments, - blobs.iter().collect(), - &kzg_proofs, - ) - .map_err(BlockProductionError::KzgError)?; + if self + .spec + .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) + { + kzg_utils::validate_blobs_and_cell_proofs::( + kzg, + blobs.iter().collect(), + &kzg_proofs, + expected_kzg_commitments, + ) + .map_err(BlockProductionError::KzgError)?; + } else { + kzg_utils::validate_blobs::( + kzg, + expected_kzg_commitments, + blobs.iter().collect(), + &kzg_proofs, + ) + .map_err(BlockProductionError::KzgError)?; + } Some((kzg_proofs.into(), blobs)) } @@ -7118,27 +7130,6 @@ impl BeaconChain { ); Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))) } - AvailableBlockData::DataColumnsRecv(data_column_recv) => { - // Blobs were available from the EL, in this case we wait for the data columns to be computed (blocking). - let _column_recv_timer = - metrics::start_timer(&metrics::BLOCK_PROCESSING_DATA_COLUMNS_WAIT); - // Unable to receive data columns from sender, sender is either dropped or - // failed to compute data columns from blobs. We restore fork choice here and - // return to avoid inconsistency in database. - let computed_data_columns = data_column_recv - .blocking_recv() - .map_err(|e| format!("Did not receive data columns from sender: {e:?}"))?; - debug!( - %block_root, - count = computed_data_columns.len(), - "Writing data columns to store" - ); - // TODO(das): Store only this node's custody columns - Ok(Some(StoreOp::PutDataColumns( - block_root, - computed_data_columns, - ))) - } } } } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 39bad34cd6..46ba1bc992 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -97,8 +97,8 @@ use tracing::{debug, error}; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, - Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + Hash256, InconsistentFork, KzgProofs, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -755,6 +755,7 @@ pub fn build_blob_data_column_sidecars( chain: &BeaconChain, block: &SignedBeaconBlock>, blobs: BlobsList, + kzg_cell_proofs: KzgProofs, ) -> Result, DataColumnSidecarError> { // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. if blobs.is_empty() { @@ -766,8 +767,14 @@ pub fn build_blob_data_column_sidecars( &[&blobs.len().to_string()], ); let blob_refs = blobs.iter().collect::>(); - let sidecars = blobs_to_data_column_sidecars(&blob_refs, block, &chain.kzg, &chain.spec) - .discard_timer_on_break(&mut timer)?; + let sidecars = blobs_to_data_column_sidecars( + &blob_refs, + kzg_cell_proofs.to_vec(), + block, + &chain.kzg, + &chain.spec, + ) + .discard_timer_on_break(&mut timer)?; drop(timer); Ok(sidecars) } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 6f8a0dcb7c..975be33f0b 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -8,7 +8,7 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; -use crate::kzg_utils::blobs_to_data_column_sidecars; +use crate::kzg_utils::build_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; @@ -30,6 +30,7 @@ use logging::crit; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; +use rayon::prelude::*; use slasher::Slasher; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::{per_slot_processing, AllCaches}; @@ -40,8 +41,8 @@ use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use tracing::{debug, error, info}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, DataColumnSidecarList, Epoch, + EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -546,15 +547,8 @@ where { // After PeerDAS recompute columns from blobs to not force the checkpointz server // into exposing another route. - let blobs = blobs - .iter() - .map(|blob_sidecar| &blob_sidecar.blob) - .collect::>(); let data_columns = - blobs_to_data_column_sidecars(&blobs, &weak_subj_block, &self.kzg, &self.spec) - .map_err(|e| { - format!("Failed to compute weak subjectivity data_columns: {e:?}") - })?; + build_data_columns_from_blobs(&weak_subj_block, &blobs, &self.kzg, &self.spec)?; // TODO(das): only persist the columns under custody store .put_data_columns(&weak_subj_block_root, data_columns) @@ -1138,6 +1132,49 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { ) } +/// Build data columns and proofs from blobs. +fn build_data_columns_from_blobs( + block: &SignedBeaconBlock, + blobs: &BlobSidecarList, + kzg: &Kzg, + spec: &ChainSpec, +) -> Result, String> { + let blob_cells_and_proofs_vec = blobs + .into_par_iter() + .map(|blob_sidecar| { + let kzg_blob_ref = blob_sidecar + .blob + .as_ref() + .try_into() + .map_err(|e| format!("Failed to convert blob to kzg blob: {e:?}"))?; + let cells_and_proofs = kzg + .compute_cells_and_proofs(kzg_blob_ref) + .map_err(|e| format!("Failed to compute cell kzg proofs: {e:?}"))?; + Ok(cells_and_proofs) + }) + .collect::, String>>()?; + + let data_columns = { + let beacon_block_body = block.message().body(); + let kzg_commitments = beacon_block_body + .blob_kzg_commitments() + .cloned() + .map_err(|e| format!("Unexpected pre Deneb block: {e:?}"))?; + let kzg_commitments_inclusion_proof = beacon_block_body + .kzg_commitments_merkle_proof() + .map_err(|e| format!("Failed to compute kzg commitments merkle proof: {e:?}"))?; + build_data_column_sidecars( + kzg_commitments, + kzg_commitments_inclusion_proof, + block.signed_block_header(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))? + }; + Ok(data_columns) +} + #[cfg(not(debug_assertions))] #[cfg(test)] mod test { diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 2b7ae9e4d1..033b472da0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -14,7 +14,6 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; -use tokio::sync::oneshot; use tracing::{debug, error, info_span, Instrument}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ @@ -226,27 +225,45 @@ impl DataAvailabilityChecker { pub fn put_engine_blobs( &self, block_root: Hash256, - block_epoch: Epoch, blobs: FixedBlobSidecarList, - data_columns_recv: Option>>, ) -> Result, AvailabilityCheckError> { - // `data_columns_recv` is always Some if block_root is post-PeerDAS - if let Some(data_columns_recv) = data_columns_recv { - self.availability_cache.put_computed_data_columns_recv( - block_root, - block_epoch, - data_columns_recv, - ) - } else { - let seen_timestamp = self - .slot_clock - .now_duration() - .ok_or(AvailabilityCheckError::SlotClockError)?; - self.availability_cache.put_kzg_verified_blobs( - block_root, - KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), - ) - } + let seen_timestamp = self + .slot_clock + .now_duration() + .ok_or(AvailabilityCheckError::SlotClockError)?; + self.availability_cache.put_kzg_verified_blobs( + block_root, + KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), + ) + } + + /// Put a list of data columns computed from blobs received from the EL pool into the + /// availability cache. + /// + /// This DOES NOT perform KZG proof and inclusion proof verification because + /// - The KZG proofs should have been verified by the trusted EL. + /// - The KZG commitments inclusion proof should have been constructed immediately prior to + /// calling this function so they are assumed to be valid. + /// + /// This method is used if the EL already has the blobs and returns them via the `getBlobsV2` + /// engine method. + /// More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). + pub fn put_engine_data_columns( + &self, + block_root: Hash256, + data_columns: DataColumnSidecarList, + ) -> Result, AvailabilityCheckError> { + let kzg_verified_custody_columns = data_columns + .into_iter() + .map(|d| { + KzgVerifiedCustodyDataColumn::from_asserted_custody( + KzgVerifiedDataColumn::from_verified(d), + ) + }) + .collect::>(); + + self.availability_cache + .put_kzg_verified_data_columns(block_root, kzg_verified_custody_columns) } /// Check if we've cached other blobs for this block. If it completes a set and we also @@ -704,9 +721,6 @@ pub enum AvailableBlockData { Blobs(BlobSidecarList), /// Block is post-PeerDAS and has more than zero blobs DataColumns(DataColumnSidecarList), - /// Block is post-PeerDAS, has more than zero blobs and we recomputed the columns from the EL's - /// mempool blobs - DataColumnsRecv(oneshot::Receiver>), } /// A fully available block that is ready to be imported into fork choice. @@ -756,7 +770,6 @@ impl AvailableBlock { AvailableBlockData::NoData => false, AvailableBlockData::Blobs(..) => true, AvailableBlockData::DataColumns(_) => false, - AvailableBlockData::DataColumnsRecv(_) => false, } } @@ -782,9 +795,6 @@ impl AvailableBlock { AvailableBlockData::DataColumns(data_columns) => { AvailableBlockData::DataColumns(data_columns.clone()) } - AvailableBlockData::DataColumnsRecv(_) => { - return Err("Can't clone DataColumnsRecv".to_owned()) - } }, blobs_available_timestamp: self.blobs_available_timestamp, spec: self.spec.clone(), diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 4359d7fbdb..f5fd24483a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -13,13 +13,11 @@ use parking_lot::RwLock; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; -use tokio::sync::oneshot; use tracing::debug; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, + Hash256, RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, }; /// This represents the components of a partially available block @@ -32,12 +30,6 @@ pub struct PendingComponents { pub verified_data_columns: Vec>, pub executed_block: Option>, pub reconstruction_started: bool, - /// Receiver for data columns that are computed asynchronously; - /// - /// If `data_column_recv` is `Some`, it means data column computation or reconstruction has been - /// started. This can happen either via engine blobs fetching or data column reconstruction - /// (triggered when >= 50% columns are received via gossip). - pub data_column_recv: Option>>, } impl PendingComponents { @@ -202,13 +194,8 @@ impl PendingComponents { Some(AvailableBlockData::DataColumns(data_columns)) } Ordering::Less => { - // The data_columns_recv is an infallible promise that we will receive all expected - // columns, so we consider the block available. - // We take the receiver as it can't be cloned, and make_available should never - // be called again once it returns `Some`. - self.data_column_recv - .take() - .map(AvailableBlockData::DataColumnsRecv) + // Not enough data columns received yet + None } } } else { @@ -261,7 +248,6 @@ impl PendingComponents { .max(), // TODO(das): To be fixed with https://github.com/sigp/lighthouse/pull/6850 AvailableBlockData::DataColumns(_) => None, - AvailableBlockData::DataColumnsRecv(_) => None, }; let AvailabilityPendingExecutedBlock { @@ -293,7 +279,6 @@ impl PendingComponents { verified_data_columns: vec![], executed_block: None, reconstruction_started: false, - data_column_recv: None, } } @@ -331,17 +316,11 @@ impl PendingComponents { } else { "?" }; - let data_column_recv_count = if self.data_column_recv.is_some() { - 1 - } else { - 0 - }; format!( - "block {} data_columns {}/{} data_columns_recv {}", + "block {} data_columns {}/{}", block_count, self.verified_data_columns.len(), custody_columns_count, - data_column_recv_count, ) } else { let num_expected_blobs = if let Some(block) = self.get_cached_block() { @@ -498,7 +477,6 @@ impl DataAvailabilityCheckerInner { self.state_cache.recover_pending_executed_block(block) })? { // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. write_lock.put(block_root, pending_components); drop(write_lock); Ok(Availability::Available(Box::new(available_block))) @@ -551,55 +529,6 @@ impl DataAvailabilityCheckerInner { self.state_cache.recover_pending_executed_block(block) })? { // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. - write_lock.put(block_root, pending_components); - drop(write_lock); - Ok(Availability::Available(Box::new(available_block))) - } else { - write_lock.put(block_root, pending_components); - Ok(Availability::MissingComponents(block_root)) - } - } - - /// The `data_column_recv` parameter is a `Receiver` for data columns that are computed - /// asynchronously. This method is used if the EL already has the blobs and returns them via the - /// `getBlobsV1` engine method. More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). - pub fn put_computed_data_columns_recv( - &self, - block_root: Hash256, - block_epoch: Epoch, - data_column_recv: oneshot::Receiver>, - ) -> Result, AvailabilityCheckError> { - let mut write_lock = self.critical.write(); - - // Grab existing entry or create a new entry. - let mut pending_components = write_lock - .pop_entry(&block_root) - .map(|(_, v)| v) - .unwrap_or_else(|| { - PendingComponents::empty( - block_root, - self.spec.max_blobs_per_block(block_epoch) as usize, - ) - }); - - // We have all the blobs from engine, and have started computing data columns. We store the - // receiver in `PendingComponents` for later use when importing the block. - // TODO(das): Error or log if we overwrite a prior receiver https://github.com/sigp/lighthouse/issues/6764 - pending_components.data_column_recv = Some(data_column_recv); - - debug!( - component = "data_columns_recv", - ?block_root, - status = pending_components.status_str(block_epoch, &self.spec), - "Component added to data availability checker" - ); - - if let Some(available_block) = pending_components.make_available(&self.spec, |block| { - self.state_cache.recover_pending_executed_block(block) - })? { - // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. write_lock.put(block_root, pending_components); drop(write_lock); Ok(Availability::Available(Box::new(available_block))) @@ -694,7 +623,6 @@ impl DataAvailabilityCheckerInner { self.state_cache.recover_pending_executed_block(block) })? { // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. write_lock.put(block_root, pending_components); drop(write_lock); Ok(Availability::Available(Box::new(available_block))) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 2f95d834b5..57efbb0a77 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -141,13 +141,23 @@ pub enum GossipDataColumnError { /// /// The column sidecar is invalid and the peer is faulty UnexpectedDataColumn, - /// The data column length must be equal to the number of commitments/proofs, otherwise the + /// The data column length must be equal to the number of commitments, otherwise the /// sidecar is invalid. /// /// ## Peer scoring /// /// The column sidecar is invalid and the peer is faulty - InconsistentCommitmentsOrProofLength, + InconsistentCommitmentsLength { + cells_len: usize, + commitments_len: usize, + }, + /// The data column length must be equal to the number of proofs, otherwise the + /// sidecar is invalid. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InconsistentProofsLength { cells_len: usize, proofs_len: usize }, } impl From for GossipDataColumnError { @@ -240,6 +250,14 @@ impl KzgVerifiedDataColumn { verify_kzg_for_data_column(data_column, kzg) } + /// Create a `KzgVerifiedDataColumn` from `data_column` that are already KZG verified. + /// + /// This should be used with caution, as used incorrectly it could result in KZG verification + /// being skipped and invalid data_columns being deemed valid. + pub fn from_verified(data_column: Arc>) -> Self { + Self { data: data_column } + } + pub fn from_batch( data_columns: Vec>>, kzg: &Kzg, @@ -473,10 +491,23 @@ fn verify_data_column_sidecar( if data_column.kzg_commitments.is_empty() { return Err(GossipDataColumnError::UnexpectedDataColumn); } - if data_column.column.len() != data_column.kzg_commitments.len() - || data_column.column.len() != data_column.kzg_proofs.len() - { - return Err(GossipDataColumnError::InconsistentCommitmentsOrProofLength); + + let cells_len = data_column.column.len(); + let commitments_len = data_column.kzg_commitments.len(); + let proofs_len = data_column.kzg_proofs.len(); + + if cells_len != commitments_len { + return Err(GossipDataColumnError::InconsistentCommitmentsLength { + cells_len, + commitments_len, + }); + } + + if cells_len != proofs_len { + return Err(GossipDataColumnError::InconsistentProofsLength { + cells_len, + proofs_len, + }); } Ok(()) diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index f4810e7b4a..5665ef3775 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -74,10 +74,6 @@ impl EarlyAttesterCache { AvailableBlockData::NoData => (None, None), AvailableBlockData::Blobs(blobs) => (Some(blobs.clone()), None), AvailableBlockData::DataColumns(data_columns) => (None, Some(data_columns.clone())), - // TODO(das): Once the columns are received, they will not be available in - // the early attester cache. If someone does a query to us via RPC we - // will get downscored. - AvailableBlockData::DataColumnsRecv(_) => (None, None), }; let item = CacheItem { diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index 3c28ac9a44..3b576da1c7 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -7,34 +7,52 @@ //! on P2P gossip to the network. From PeerDAS onwards, together with the increase in blob count, //! broadcasting blobs requires a much higher bandwidth, and is only done by high capacity //! supernodes. + use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_data_sidecars::DoNotObserve; -use crate::{metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError}; -use execution_layer::json_structures::BlobAndProofV1; +use crate::{ + metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, + BlockError, +}; +use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; use execution_layer::Error as ExecutionLayerError; use metrics::{inc_counter, TryExt}; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; +use std::collections::HashSet; use std::sync::Arc; -use tokio::sync::oneshot; -use tracing::{debug, error}; +use tracing::debug; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; +use types::data_column_sidecar::DataColumnSidecarError; use types::{ - BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, - FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, + BeaconStateError, Blob, BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecarList, EthSpec, + FullPayload, Hash256, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, VersionedHash, }; +/// Blobs or data column to be published to the gossip network. pub enum BlobsOrDataColumns { Blobs(Vec>), DataColumns(DataColumnSidecarList), } +/// Result from engine get blobs to be passed onto `DataAvailabilityChecker`. +/// +/// The blobs are retrieved from a trusted EL and columns are computed locally, therefore they are +/// considered valid without requiring extra validation. +pub enum EngineGetBlobsOutput { + Blobs(FixedBlobSidecarList), + /// A filtered list of custody data columns to be imported into the `DataAvailabilityChecker`. + CustodyColumns(DataColumnSidecarList), +} + #[derive(Debug)] pub enum FetchEngineBlobError { BeaconStateError(BeaconStateError), + BeaconChainError(BeaconChainError), BlobProcessingError(BlockError), BlobSidecarError(BlobSidecarError), + DataColumnSidecarError(DataColumnSidecarError), ExecutionLayerMissing, InternalError(String), GossipBlob(GossipBlobError), @@ -48,6 +66,7 @@ pub async fn fetch_and_process_engine_blobs( chain: Arc>, block_root: Hash256, block: Arc>>, + custody_columns: HashSet, publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, ) -> Result, FetchEngineBlobError> { let versioned_hashes = if let Some(kzg_commitments) = block @@ -66,8 +85,34 @@ pub async fn fetch_and_process_engine_blobs( return Ok(None); }; - let num_expected_blobs = versioned_hashes.len(); + debug!( + num_expected_blobs = versioned_hashes.len(), + "Fetching blobs from the EL" + ); + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + fetch_and_process_blobs_v2( + chain, + block_root, + block, + versioned_hashes, + custody_columns, + publish_fn, + ) + .await + } else { + fetch_and_process_blobs_v1(chain, block_root, block, versioned_hashes, publish_fn).await + } +} + +async fn fetch_and_process_blobs_v1( + chain: Arc>, + block_root: Hash256, + block: Arc>, + versioned_hashes: Vec, + publish_fn: impl Fn(BlobsOrDataColumns) + Send + Sized, +) -> Result, FetchEngineBlobError> { + let num_expected_blobs = versioned_hashes.len(); let execution_layer = chain .execution_layer .as_ref() @@ -76,7 +121,7 @@ pub async fn fetch_and_process_engine_blobs( metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); debug!(num_expected_blobs, "Fetching blobs from the EL"); let response = execution_layer - .get_blobs(versioned_hashes) + .get_blobs_v1(versioned_hashes) .await .inspect_err(|_| { inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); @@ -125,59 +170,9 @@ pub async fn fetch_and_process_engine_blobs( .collect::, _>>() .map_err(FetchEngineBlobError::GossipBlob)?; - let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); - - let data_columns_receiver_opt = if peer_das_enabled { - // Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns. - if num_fetched_blobs != num_expected_blobs { - debug!( - info = "Unable to compute data columns", - num_fetched_blobs, num_expected_blobs, "Not all blobs fetched from the EL" - ); - return Ok(None); - } - - if chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - { - // Avoid computing columns if block has already been imported. - debug!( - info = "block has already been imported", - "Ignoring EL blobs response" - ); - return Ok(None); - } - - if chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - { - // Avoid computing columns if block has already been imported. - debug!( - info = "block has already been imported", - "Ignoring EL blobs response" - ); - return Ok(None); - } - - let data_columns_receiver = spawn_compute_and_publish_data_columns_task( - &chain, - block.clone(), - fixed_blob_sidecar_list.clone(), - publish_fn, - ); - - Some(data_columns_receiver) - } else { - if !blobs_to_import_and_publish.is_empty() { - publish_fn(BlobsOrDataColumns::Blobs(blobs_to_import_and_publish)); - } - - None - }; + if !blobs_to_import_and_publish.is_empty() { + publish_fn(BlobsOrDataColumns::Blobs(blobs_to_import_and_publish)); + } debug!(num_fetched_blobs, "Processing engine blobs"); @@ -185,8 +180,7 @@ pub async fn fetch_and_process_engine_blobs( .process_engine_blobs( block.slot(), block_root, - fixed_blob_sidecar_list.clone(), - data_columns_receiver_opt, + EngineGetBlobsOutput::Blobs(fixed_blob_sidecar_list.clone()), ) .await .map_err(FetchEngineBlobError::BlobProcessingError)?; @@ -194,67 +188,140 @@ pub async fn fetch_and_process_engine_blobs( Ok(Some(availability_processing_status)) } -/// Spawn a blocking task here for long computation tasks, so it doesn't block processing, and it -/// allows blobs / data columns to propagate without waiting for processing. -/// -/// An `mpsc::Sender` is then used to send the produced data columns to the `beacon_chain` for it -/// to be persisted, **after** the block is made attestable. -/// -/// The reason for doing this is to make the block available and attestable as soon as possible, -/// while maintaining the invariant that block and data columns are persisted atomically. -fn spawn_compute_and_publish_data_columns_task( +async fn fetch_and_process_blobs_v2( + chain: Arc>, + block_root: Hash256, + block: Arc>, + versioned_hashes: Vec, + custody_columns_indices: HashSet, + publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, +) -> Result, FetchEngineBlobError> { + let num_expected_blobs = versioned_hashes.len(); + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + + metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); + debug!(num_expected_blobs, "Fetching blobs from the EL"); + let response = execution_layer + .get_blobs_v2(versioned_hashes) + .await + .inspect_err(|_| { + inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); + }) + .map_err(FetchEngineBlobError::RequestFailed)?; + + let (blobs, proofs): (Vec<_>, Vec<_>) = response + .into_iter() + .filter_map(|blob_and_proof_opt| { + blob_and_proof_opt.map(|blob_and_proof| { + let BlobAndProofV2 { blob, proofs } = blob_and_proof; + (blob, proofs) + }) + }) + .unzip(); + + let num_fetched_blobs = blobs.len(); + metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64); + + // Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns. + if num_fetched_blobs != num_expected_blobs { + debug!( + info = "Unable to compute data columns", + num_fetched_blobs, num_expected_blobs, "Not all blobs fetched from the EL" + ); + inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); + return Ok(None); + } else { + inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); + } + + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + // Avoid computing columns if block has already been imported. + debug!( + info = "block has already been imported", + "Ignoring EL blobs response" + ); + return Ok(None); + } + + let custody_columns = compute_and_publish_data_columns( + &chain, + block.clone(), + blobs, + proofs, + custody_columns_indices, + publish_fn, + ) + .await?; + + debug!(num_fetched_blobs, "Processing engine blobs"); + + let availability_processing_status = chain + .process_engine_blobs( + block.slot(), + block_root, + EngineGetBlobsOutput::CustodyColumns(custody_columns), + ) + .await + .map_err(FetchEngineBlobError::BlobProcessingError)?; + + Ok(Some(availability_processing_status)) +} + +/// Offload the data column computation to a blocking task to avoid holding up the async runtime. +async fn compute_and_publish_data_columns( chain: &Arc>, block: Arc>>, - blobs: FixedBlobSidecarList, + blobs: Vec>, + proofs: Vec>, + custody_columns_indices: HashSet, publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, -) -> oneshot::Receiver>>> { +) -> Result, FetchEngineBlobError> { let chain_cloned = chain.clone(); - let (data_columns_sender, data_columns_receiver) = oneshot::channel(); + chain + .spawn_blocking_handle( + move || { + let mut timer = metrics::start_timer_vec( + &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, + &[&blobs.len().to_string()], + ); - chain.task_executor.spawn_blocking( - move || { - let mut timer = metrics::start_timer_vec( - &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, - &[&blobs.len().to_string()], - ); - let blob_refs = blobs - .iter() - .filter_map(|b| b.as_ref().map(|b| &b.blob)) - .collect::>(); - let data_columns_result = blobs_to_data_column_sidecars( - &blob_refs, - &block, - &chain_cloned.kzg, - &chain_cloned.spec, - ) - .discard_timer_on_break(&mut timer); - drop(timer); + let blob_refs = blobs.iter().collect::>(); + let cell_proofs = proofs.into_iter().flatten().collect(); + let data_columns_result = blobs_to_data_column_sidecars( + &blob_refs, + cell_proofs, + &block, + &chain_cloned.kzg, + &chain_cloned.spec, + ) + .discard_timer_on_break(&mut timer); + drop(timer); - let all_data_columns = match data_columns_result { - Ok(d) => d, - Err(e) => { - error!( - error = ?e, - "Failed to build data column sidecars from blobs" - ); - return; - } - }; + // This filtering ensures we only import and publish the custody columns. + // `DataAvailabilityChecker` requires a strict match on custody columns count to + // consider a block available. + let custody_columns = data_columns_result + .map(|mut data_columns| { + data_columns.retain(|col| custody_columns_indices.contains(&col.index)); + data_columns + }) + .map_err(FetchEngineBlobError::DataColumnSidecarError)?; - if data_columns_sender.send(all_data_columns.clone()).is_err() { - // Data column receiver have been dropped - block may have already been imported. - // This race condition exists because gossip columns may arrive and trigger block - // import during the computation. Here we just drop the computed columns. - debug!("Failed to send computed data columns"); - return; - }; - - publish_fn(BlobsOrDataColumns::DataColumns(all_data_columns)); - }, - "compute_and_publish_data_columns", - ); - - data_columns_receiver + publish_fn(BlobsOrDataColumns::DataColumns(custody_columns.clone())); + Ok(custody_columns) + }, + "compute_and_publish_data_columns", + ) + .await + .map_err(FetchEngineBlobError::BeaconChainError) + .and_then(|r| r) } fn build_blob_sidecars( diff --git a/beacon_node/beacon_chain/src/fulu_readiness.rs b/beacon_node/beacon_chain/src/fulu_readiness.rs index 872fe58f2b..1107acad74 100644 --- a/beacon_node/beacon_chain/src/fulu_readiness.rs +++ b/beacon_node/beacon_chain/src/fulu_readiness.rs @@ -1,7 +1,7 @@ //! Provides tools for checking if a node is ready for the Fulu upgrade. use crate::{BeaconChain, BeaconChainTypes}; -use execution_layer::http::{ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V4}; +use execution_layer::http::{ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V4}; use serde::{Deserialize, Serialize}; use std::fmt; use std::time::Duration; @@ -87,12 +87,12 @@ impl BeaconChain { Ok(capabilities) => { let mut missing_methods = String::from("Required Methods Unsupported:"); let mut all_good = true; - // TODO(fulu) switch to v5 when the EL is ready - if !capabilities.get_payload_v4 { + if !capabilities.get_payload_v5 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_GET_PAYLOAD_V4); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V5); all_good = false; } + // TODO(fulu) switch to v5 when the EL is ready if !capabilities.new_payload_v4 { missing_methods.push(' '); missing_methods.push_str(ENGINE_NEW_PAYLOAD_V4); diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index ee51964910..348e6d52a6 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -132,7 +132,7 @@ impl BeaconChain { AvailableBlockData::Blobs(..) => { new_oldest_blob_slot = Some(block.slot()); } - AvailableBlockData::DataColumns(_) | AvailableBlockData::DataColumnsRecv(_) => { + AvailableBlockData::DataColumns(_) => { new_oldest_data_column_slot = Some(block.slot()); } } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 06cce14144..eaaa23130d 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,14 +1,15 @@ use kzg::{ - Blob as KzgBlob, Bytes48, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, + Blob as KzgBlob, Bytes48, Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, + Error as KzgError, Kzg, CELLS_PER_EXT_BLOB, }; use rayon::prelude::*; -use ssz_types::FixedVector; +use ssz_types::{FixedVector, VariableList}; use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; use types::{ Blob, BlobSidecar, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, + DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlindedBeaconBlock, }; @@ -43,6 +44,33 @@ pub fn validate_blob( kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } +/// Validates a list of blobs along with their corresponding KZG commitments and +/// cell proofs for the extended blobs. +pub fn validate_blobs_and_cell_proofs( + kzg: &Kzg, + blobs: Vec<&Blob>, + cell_proofs: &[KzgProof], + kzg_commitments: &KzgCommitments, +) -> Result<(), KzgError> { + let cells = compute_cells::(&blobs, kzg)?; + let cell_refs = cells.iter().map(|cell| cell.as_ref()).collect::>(); + let cell_indices = (0..blobs.len()) + .flat_map(|_| 0..CELLS_PER_EXT_BLOB as u64) + .collect::>(); + + let proofs = cell_proofs + .iter() + .map(|&proof| Bytes48::from(proof)) + .collect::>(); + + let commitments = kzg_commitments + .iter() + .flat_map(|&commitment| std::iter::repeat_n(Bytes48::from(commitment), CELLS_PER_EXT_BLOB)) + .collect::>(); + + kzg.verify_cell_proof_batch(&cell_refs, &proofs, cell_indices, &commitments) +} + /// Validate a batch of `DataColumnSidecar`. pub fn validate_data_columns<'a, E: EthSpec, I>( kzg: &Kzg, @@ -148,6 +176,7 @@ pub fn verify_kzg_proof( /// Build data column sidecars from a signed beacon block and its blobs. pub fn blobs_to_data_column_sidecars( blobs: &[&Blob], + cell_proofs: Vec, block: &SignedBeaconBlock, kzg: &Kzg, spec: &ChainSpec, @@ -164,15 +193,28 @@ pub fn blobs_to_data_column_sidecars( let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; let signed_block_header = block.signed_block_header(); + let proof_chunks = cell_proofs + .chunks_exact(spec.number_of_columns as usize) + .collect::>(); + // NOTE: assumes blob sidecars are ordered by index let blob_cells_and_proofs_vec = blobs .into_par_iter() - .map(|blob| { + .zip(proof_chunks.into_par_iter()) + .map(|(blob, proofs)| { let blob = blob .as_ref() .try_into() .expect("blob should have a guaranteed size due to FixedVector"); - kzg.compute_cells_and_proofs(blob) + + kzg.compute_cells(blob).map(|cells| { + ( + cells, + proofs + .try_into() + .expect("proof chunks should have exactly `number_of_columns` proofs"), + ) + }) }) .collect::, KzgError>>()?; @@ -186,6 +228,23 @@ pub fn blobs_to_data_column_sidecars( .map_err(DataColumnSidecarError::BuildSidecarFailed) } +pub fn compute_cells(blobs: &[&Blob], kzg: &Kzg) -> Result, KzgError> { + let cells_vec = blobs + .into_par_iter() + .map(|blob| { + let blob = blob + .as_ref() + .try_into() + .expect("blob should have a guaranteed size due to FixedVector"); + + kzg.compute_cells(blob) + }) + .collect::, KzgError>>()?; + + let cells_flattened: Vec = cells_vec.into_iter().flatten().collect(); + Ok(cells_flattened) +} + pub(crate) fn build_data_column_sidecars( kzg_commitments: KzgCommitments, kzg_commitments_inclusion_proof: FixedVector, @@ -236,7 +295,7 @@ pub(crate) fn build_data_column_sidecars( index: index as u64, column: DataColumn::::from(col), kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), + kzg_proofs: VariableList::from(proofs), signed_block_header: signed_block_header.clone(), kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), }) @@ -300,12 +359,7 @@ pub fn reconstruct_blobs( .collect(); let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; - let kzg_commitment = first_data_column - .kzg_commitments - .get(row_index) - .ok_or(format!("Missing KZG commitment for blob {row_index}"))?; - let kzg_proof = compute_blob_kzg_proof::(kzg, &blob, *kzg_commitment) - .map_err(|e| format!("{e:?}"))?; + let kzg_proof = KzgProof::empty(); BlobSidecar::::new_with_existing_proof( row_index, @@ -373,14 +427,15 @@ pub fn reconstruct_data_columns( mod test { use crate::kzg_utils::{ blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, + validate_blobs_and_cell_proofs, }; use bls::Signature; use eth2::types::BlobsBundle; use execution_layer::test_utils::generate_blobs; use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, BlobsList, ChainSpec, - EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockFulu, BlobsList, ChainSpec, + EmptyBlock, EthSpec, ForkName, FullPayload, KzgProofs, MainnetEthSpec, SignedBeaconBlock, }; type E = MainnetEthSpec; @@ -389,32 +444,52 @@ mod test { // only load it once. #[test] fn test_build_data_columns_sidecars() { - let spec = E::default_spec(); + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let kzg = get_kzg(); test_build_data_columns_empty(&kzg, &spec); test_build_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec); + test_verify_blob_and_cell_proofs(&kzg); + } + + #[track_caller] + fn test_verify_blob_and_cell_proofs(kzg: &Kzg) { + let (blobs_bundle, _) = generate_blobs::(3, ForkName::Fulu).unwrap(); + let BlobsBundle { + blobs, + commitments, + proofs, + } = blobs_bundle; + + let result = + validate_blobs_and_cell_proofs::(kzg, blobs.iter().collect(), &proofs, &commitments); + + assert!(result.is_ok()); } #[track_caller] fn test_build_data_columns_empty(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 0; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); assert!(column_sidecars.is_empty()); } #[track_caller] fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); let block_kzg_commitments = signed_block .message() @@ -448,10 +523,12 @@ mod test { #[track_caller] fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); // Now reconstruct let reconstructed_columns = reconstruct_data_columns( @@ -469,10 +546,12 @@ mod test { #[track_caller] fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); // Now reconstruct let signed_blinded_block = signed_block.into(); @@ -504,11 +583,15 @@ mod test { Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg") } - fn create_test_block_and_blobs( + fn create_test_fulu_block_and_blobs( num_of_blobs: usize, spec: &ChainSpec, - ) -> (SignedBeaconBlock, BlobsList) { - let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); + ) -> ( + SignedBeaconBlock>, + BlobsList, + KzgProofs, + ) { + let mut block = BeaconBlock::Fulu(BeaconBlockFulu::empty(spec)); let mut body = block.body_mut(); let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); *blob_kzg_commitments = @@ -516,12 +599,12 @@ mod test { .unwrap(); let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); - - let (blobs_bundle, _) = generate_blobs::(num_of_blobs).unwrap(); + let fork = signed_block.fork_name_unchecked(); + let (blobs_bundle, _) = generate_blobs::(num_of_blobs, fork).unwrap(); let BlobsBundle { blobs, commitments, - proofs: _, + proofs, } = blobs_bundle; *signed_block @@ -530,6 +613,6 @@ mod test { .blob_kzg_commitments_mut() .unwrap() = commitments; - (signed_block, blobs) + (signed_block, blobs, proofs) } } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 871721b4d8..57012161ec 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1663,7 +1663,7 @@ pub static BLOBS_FROM_EL_HIT_TOTAL: LazyLock> = LazyLock::new pub static BLOBS_FROM_EL_MISS_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( "beacon_blobs_from_el_miss_total", - "Number of empty blob responses from the execution layer", + "Number of empty or incomplete blob responses from the execution layer", ) }); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index fe78d83c03..bcab512a4b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -3194,7 +3194,7 @@ pub fn generate_rand_block_and_blobs( NumBlobs::None => 0, }; let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { @@ -3214,7 +3214,7 @@ pub fn generate_rand_block_and_blobs( NumBlobs::None => 0, }; let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); @@ -3233,7 +3233,7 @@ pub fn generate_rand_block_and_blobs( NumBlobs::None => 0, }; let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index aed6cdba67..4bfee223ff 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,10 +1,11 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, - ENGINE_GET_BLOBS_V1, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, - ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, + ENGINE_GET_BLOBS_V1, ENGINE_GET_BLOBS_V2, ENGINE_GET_CLIENT_VERSION_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, + ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, + ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -553,6 +554,7 @@ pub struct EngineCapabilities { pub get_payload_v5: bool, pub get_client_version_v1: bool, pub get_blobs_v1: bool, + pub get_blobs_v2: bool, } impl EngineCapabilities { @@ -609,6 +611,9 @@ impl EngineCapabilities { if self.get_blobs_v1 { response.push(ENGINE_GET_BLOBS_V1); } + if self.get_blobs_v2 { + response.push(ENGINE_GET_BLOBS_V2); + } response } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 747383754a..bf4c391a8d 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -61,6 +61,7 @@ pub const ENGINE_GET_CLIENT_VERSION_V1: &str = "engine_getClientVersionV1"; pub const ENGINE_GET_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_GET_BLOBS_V1: &str = "engine_getBlobsV1"; +pub const ENGINE_GET_BLOBS_V2: &str = "engine_getBlobsV2"; pub const ENGINE_GET_BLOBS_TIMEOUT: Duration = Duration::from_secs(1); /// This error is returned during a `chainId` call by Geth. @@ -87,6 +88,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_BLOBS_V1, + ENGINE_GET_BLOBS_V2, ]; /// We opt to initialize the JsonClientVersionV1 rather than the ClientVersionV1 @@ -708,7 +710,7 @@ impl HttpJsonRpc { } } - pub async fn get_blobs( + pub async fn get_blobs_v1( &self, versioned_hashes: Vec, ) -> Result>>, Error> { @@ -722,6 +724,20 @@ impl HttpJsonRpc { .await } + pub async fn get_blobs_v2( + &self, + versioned_hashes: Vec, + ) -> Result>>, Error> { + let params = json!([versioned_hashes]); + + self.rpc_request( + ENGINE_GET_BLOBS_V2, + params, + ENGINE_GET_BLOBS_TIMEOUT * self.execution_timeout_multiplier, + ) + .await + } + pub async fn get_block_by_number( &self, query: BlockByNumberQuery<'_>, @@ -963,19 +979,6 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - // TODO(fulu): remove when v5 method is ready. - ForkName::Fulu => { - let response: JsonGetPayloadResponseV5 = self - .rpc_request( - ENGINE_GET_PAYLOAD_V4, - params, - ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - JsonGetPayloadResponse::V5(response) - .try_into() - .map_err(Error::BadResponse) - } _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v4 with {}", fork_name @@ -1148,6 +1151,7 @@ impl HttpJsonRpc { get_payload_v5: capabilities.contains(ENGINE_GET_PAYLOAD_V5), get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), get_blobs_v1: capabilities.contains(ENGINE_GET_BLOBS_V1), + get_blobs_v2: capabilities.contains(ENGINE_GET_BLOBS_V2), }) } @@ -1320,9 +1324,8 @@ impl HttpJsonRpc { } } ForkName::Fulu => { - // TODO(fulu): switch to v5 when the EL is ready - if engine_capabilities.get_payload_v4 { - self.get_payload_v4(fork_name, payload_id).await + if engine_capabilities.get_payload_v5 { + self.get_payload_v5(fork_name, payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayloadv5")) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 96615297d8..30d30481ea 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -717,12 +717,23 @@ impl From> for BlobsBundle { } } +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Debug, Clone, PartialEq, Serialize, Deserialize), + serde(bound = "E: EthSpec", rename_all = "camelCase") + ) +)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound = "E: EthSpec", rename_all = "camelCase")] -pub struct BlobAndProofV1 { +pub struct BlobAndProof { #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub blob: Blob, + /// KZG proof for the blob (Deneb) + #[superstruct(only(V1))] pub proof: KzgProof, + /// KZG cell proofs for the extended blob (PeerDAS) + #[superstruct(only(V2))] + pub proofs: KzgProofs, } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 944a8e083b..ee326f22cd 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,7 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::json_structures::BlobAndProofV1; +use crate::json_structures::{BlobAndProofV1, BlobAndProofV2}; use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{strip_prefix, Auth, JwtKey}; @@ -16,8 +16,8 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; -use eth2::types::FullPayloadContents; -use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse}; +use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; +use eth2::types::{BlobsBundle, FullPayloadContents}; use ethers_core::types::Transaction as EthersTransaction; use fixed_bytes::UintExtended; use fork_choice::ForkchoiceUpdateParameters; @@ -596,13 +596,7 @@ impl ExecutionLayer { let (payload_ref, maybe_json_blobs_bundle) = payload_and_blobs; let payload = payload_ref.clone_from_ref(); - let maybe_blobs_bundle = maybe_json_blobs_bundle - .cloned() - .map(|blobs_bundle| BlobsBundle { - commitments: blobs_bundle.commitments, - proofs: blobs_bundle.proofs, - blobs: blobs_bundle.blobs, - }); + let maybe_blobs_bundle = maybe_json_blobs_bundle.cloned(); self.inner .payload_cache @@ -1846,7 +1840,7 @@ impl ExecutionLayer { } } - pub async fn get_blobs( + pub async fn get_blobs_v1( &self, query: Vec, ) -> Result>>, Error> { @@ -1854,7 +1848,24 @@ impl ExecutionLayer { if capabilities.get_blobs_v1 { self.engine() - .request(|engine| async move { engine.api.get_blobs(query).await }) + .request(|engine| async move { engine.api.get_blobs_v1(query).await }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Err(Error::GetBlobsNotSupported) + } + } + + pub async fn get_blobs_v2( + &self, + query: Vec, + ) -> Result>>, Error> { + let capabilities = self.get_engine_capabilities(None).await?; + + if capabilities.get_blobs_v2 { + self.engine() + .request(|engine| async move { engine.api.get_blobs_v2(query).await }) .await .map_err(Box::new) .map_err(Error::EngineError) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 81fb9bd7b8..b057abe887 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -20,13 +20,14 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, - ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, Transaction, Transactions, - Uint256, + ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, KzgProofs, Transaction, + Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); +const TEST_BLOB_BUNDLE_V2: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle_v2.ssz"); pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; @@ -697,15 +698,13 @@ impl ExecutionBlockGenerator { }, }; - if execution_payload.fork_name().deneb_enabled() { + let fork_name = execution_payload.fork_name(); + if fork_name.deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - let max_blobs = self - .spec - .max_blobs_per_block_by_fork(execution_payload.fork_name()) - as usize; + let max_blobs = self.spec.max_blobs_per_block_by_fork(fork_name) as usize; let num_blobs = rng.gen::() % (max_blobs + 1); - let (bundle, transactions) = generate_blobs(num_blobs)?; + let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { execution_payload .transactions_mut() @@ -721,7 +720,8 @@ impl ExecutionBlockGenerator { } } -pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, Blob), String> { +pub fn load_test_blobs_bundle_v1() -> Result<(KzgCommitment, KzgProof, Blob), String> +{ let BlobsBundle:: { commitments, proofs, @@ -745,32 +745,56 @@ pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, )) } +pub fn load_test_blobs_bundle_v2( +) -> Result<(KzgCommitment, KzgProofs, Blob), String> { + let BlobsBundle:: { + commitments, + proofs, + blobs, + } = BlobsBundle::from_ssz_bytes(TEST_BLOB_BUNDLE_V2) + .map_err(|e| format!("Unable to decode ssz: {:?}", e))?; + + Ok(( + commitments + .first() + .cloned() + .ok_or("commitment missing in test bundle")?, + // there's only one blob in the test bundle, hence we take all the cell proofs here. + proofs, + blobs + .first() + .cloned() + .ok_or("blob missing in test bundle")?, + )) +} + pub fn generate_blobs( n_blobs: usize, + fork_name: ForkName, ) -> Result<(BlobsBundle, Transactions), String> { - let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + let tx = static_valid_tx::() + .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; + let transactions = vec![tx; n_blobs]; - let mut bundle = BlobsBundle::::default(); - let mut transactions = vec![]; - - for blob_index in 0..n_blobs { - let tx = static_valid_tx::() - .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; - - transactions.push(tx); - bundle - .blobs - .push(blob.clone()) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - bundle - .commitments - .push(kzg_commitment) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - bundle - .proofs - .push(kzg_proof) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - } + let bundle = if fork_name.fulu_enabled() { + let (kzg_commitment, kzg_proofs, blob) = load_test_blobs_bundle_v2::()?; + BlobsBundle { + commitments: vec![kzg_commitment; n_blobs].into(), + proofs: vec![kzg_proofs.to_vec(); n_blobs] + .into_iter() + .flatten() + .collect::>() + .into(), + blobs: vec![blob; n_blobs].into(), + } + } else { + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; + BlobsBundle { + commitments: vec![kzg_commitment; n_blobs].into(), + proofs: vec![kzg_proof; n_blobs].into(), + blobs: vec![blob; n_blobs].into(), + } + }; Ok((bundle, transactions.into())) } @@ -905,7 +929,7 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use kzg::{trusted_setup::get_trusted_setup, TrustedSetup}; + use kzg::{trusted_setup::get_trusted_setup, Bytes48, CellRef, KzgBlobRef, TrustedSetup}; use types::{MainnetEthSpec, MinimalEthSpec}; #[test] @@ -974,20 +998,28 @@ mod test { } #[test] - fn valid_test_blobs() { + fn valid_test_blobs_bundle_v1() { assert!( - validate_blob::().is_ok(), + validate_blob_bundle_v1::().is_ok(), "Mainnet preset test blobs bundle should contain valid proofs" ); assert!( - validate_blob::().is_ok(), + validate_blob_bundle_v1::().is_ok(), "Minimal preset test blobs bundle should contain valid proofs" ); } - fn validate_blob() -> Result<(), String> { + #[test] + fn valid_test_blobs_bundle_v2() { + validate_blob_bundle_v2::() + .expect("Mainnet preset test blobs bundle v2 should contain valid proofs"); + validate_blob_bundle_v2::() + .expect("Minimal preset test blobs bundle v2 should contain valid proofs"); + } + + fn validate_blob_bundle_v1() -> Result<(), String> { let kzg = load_kzg()?; - let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) .map(Box::new) .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; @@ -995,6 +1027,26 @@ mod test { .map_err(|e| format!("Invalid blobs bundle: {e:?}")) } + fn validate_blob_bundle_v2() -> Result<(), String> { + let kzg = load_kzg()?; + let (kzg_commitments, kzg_proofs, cells) = + load_test_blobs_bundle_v2::().map(|(commitment, proofs, blob)| { + let kzg_blob: KzgBlobRef = blob.as_ref().try_into().unwrap(); + ( + vec![Bytes48::from(commitment); proofs.len()], + proofs.into_iter().map(|p| p.into()).collect::>(), + kzg.compute_cells(kzg_blob).unwrap(), + ) + })?; + let (cell_indices, cell_refs): (Vec, Vec) = cells + .iter() + .enumerate() + .map(|(cell_idx, cell)| (cell_idx as u64, CellRef::try_from(cell.as_ref()).unwrap())) + .unzip(); + kzg.verify_cell_proof_batch(&cell_refs, &kzg_proofs, cell_indices, &kzg_commitments) + .map_err(|e| format!("Invalid blobs bundle: {e:?}")) + } + fn load_kzg() -> Result { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) diff --git a/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle_v2.ssz b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle_v2.ssz new file mode 100644 index 0000000000000000000000000000000000000000..e57096c0766a553be2dfbea67b7b1a2570290305 GIT binary patch literal 137276 zcmWifV|$p37DV5uv2EM7ZQHh;#_!SdIk;v0KWep766nG zaQ_XO*E7Hqi{+1rY>$2u9E=pBIotihcW7IaTOM0dkV76tm3OAVjHYE?C^ph#sXRf0Do z+t0B<<358WT+r50G zlp*NjYIUtOqmcPz z@4Nip`|Ns}1t#f04L@>3h+wB@Nt{=NY!7K%!hl>9HsiGFYp2}AA9urE_4-pGN>Yz! z8E%gDOaqVO*zu!a&u8-}5dB!;oy^WQT=)eoWF2RdD- z3i>fpAKHug8c%+~s?x7tsw(^f1=ow^5Sw@^T5KeM*jtMM+3{bdE%hu2@(Pymiud$o zQ1O3|C2L0h+4>7av{;6m7EGPS@yX<>hn@F9v)J&~s7uc6UvpxB^I6=yhgDVEcrBw~ zVEo3Q1`~wYT7cf{ixKhlqi1LF7`0UW33h}@x6)Ot+2~%)wr3NQUms`2AR?3Y)ksZz_L~4{Gr%3j|fS=`0bIeDhXAQBmL}W zts#>$V(TjcTYhDM?N+;X5m7{ZJ|t9uWe zh-f}?Enk7PCYOoJ-yG*S4HUPKh(Zj_t~D4d9jp=Ms(imAv~+@sWH2w-Rg|z6?01Ne zFi_6tm)o##HmIf|$x;)9bo-G;Ra=hVT-47)$jdnKWB4Z5&5{_=+{QVN!oB$Qp+G7x zFSB!LR?c>e5XOj5_}!#6%Wxq`svBn^^uBe$OF5V6&mPK_vwV=gx_+_rWK=A{lsD?> z`u2MgUPE-xhFt9~U9hJvvhB6jDt-K)R%w|^dN?>kBBD?4TKz_q2+G>pi$$Js^u(-LTOJb5U{wdK=97EGk+y8)E3P*ox%5R z)Sii1Q~xeR1l{@}j3ce*{i~5H1TC0dX5%Ttha~Iq*A}P7zsW_cw-Vd+mbA`Q#56gN z`PzLpWD{JM!d&zXNKJYKJB_z5nV^8Efc!JZ~@zhng;@wm331y6s$$IUJw= zlxhWxh@MN#Ky0>Xv@WBw@>uX9JspsFey*F(HG!W{Q4}x|GT!5YK!WBdC{VBQr7#`e zXpMAL#VK*>EdRkF_xNEL`qK*6b%F7hwpd=w>7UZSl-%oE9-ynamQ(DAEe4CQ1n0@i z6W`GDb4WVGDbTi9zjpt{6ykz<=#)4jG<#LaMH@CMg|nMBVEL5wy?1gwq!L@E#a{xt zANT5aP-wc~oL|CIwFNvc``5O|IH?7p=-WZ=*d$6gIA@ytpAYEK-zb4AO<-O?TI;`e zK5;R1SEXfMFp)kes7p?iaj!@_j9@UIs;`(S$&9hTvQ%HerkV~i&CZ`Lt{^vb*;2I+8^zgohsFGUx z1O~BqGE^Ju%chV+20bK=8tGX9pfnz`bIXdyEGvD0DNNE`Zog;@WlA3NWX^?10e_4| zP(41i@{~YnlCcgb*s7hwlnjjLx*J?1)5{oj_ezFeGQz;PjN_X{#Of=}w^vZ~sFTj` zK+>-sDmP7!MESE?`j=v|J?dYrq--8IA50}Y6K$k}e60dTvAUjiX5kUIp$cvOc;IR( zVqo0#Ak~#pw!>rLe+>1yw0>+QBYM1Ct^O2$<{RX9v{r-Fz4}xCnW~%VZROYG1RThxg;J zR%*_;cA)(9T`4#)c8aH7F+~ozQ2ICx5bY+Xcb9cwlPc`{PXo66~A#z%~(a+-N zE4T3X*_INTGwO!HMDWp~O(OvFFBJ3)mCTRyD_ZQPC*8OUW->LpOO;__jZL^8?M*`I zd@|LVgp)Pu<(A1IWcDw%(9t&9p>i$xzqI^)Y8WHu!S3#HD<4X^us^mt-Nc#j20I~0 zE{4fo0Ci$MlEy-s@9HMuyr zCFVLRFjPiCm@bz*cmhBvnau|Ks0?GDHNolUL;2HS6Tz3?(g@;4|5%2ywNgf^wG!!6 zjdE<<38a=(P%2I_69nkRP;DE{ zGnbn1QPtIa&uf842`sAbPN%TVo9$vZs7Gaz?aD*JxCHx(0wc!{rZgpS* z6-nJ={3H_CrGW1RBpbvDI(hCStsA#s1gE8!;OKL+*vnO7yj(&Kc|v_Z~!SBoo|@SRb9o6$jRVerP)- z*`BAm*=lPLtAr@e@qrvcygXW`x|qVEe*ddkW-rAUTVG5lTljeA{PKFH4=*iA&PyC9 zn&pL~>bvdp@Cl=uaZ}6VgfA2o)w-TCbpI3S>Je`|48>si*Lvx4Or`8^zTMz z2H0DqZP7ME3%5w-l12M={?69lO?ASZ-@fLc-wo|kUtD$wS8_$>*`3R;nUY|4XC!Hw z!8a`B#3?3=6>VFZ^%kv-rDmwF*6Gg4%&QL7>xEnuI+4a-9oc)T4feVSdOPBFSWSFD z#o_LTy+^30uR~d33s3S-h4t^EOB1f8fxef&_J}+iQ!q!mB@m?VA$iphLo5R@Z%*wd8+;Pv)pGC+#M}>hv}K zX^dPR0q4fCuPnPX^e%doAW%*97>;aV;-1m;sNik(NZ}kx7+-_f2$|3RA!loC=Vf*b zk1T*$9J}a(?o*W`h90e65Mt%i@o4RN(ygetS*oipcxchI7&XkW_8oZq3-0J11bRNu z@Qk)1YN!ZzJ^eXXA*v&0oOCgcX7F~M*o|hp`i~@QZ5`H2EzCJ=cE|O&{ROql_P9~Dfa|87rP0h%s*oqkVFvYI zw1p3$xpsUCPr@y=M1v?|xs@3`Ce{+AFTAO(-3Cp{43YjycpdMKwW}YrG zKB7UJX8X5Cs;}R_DE84acTp^YY8B@clB&-%S)jw0(L{4b=TO*)ebyqTQB0LBWNyw#w!92^-n3>jC~&7{NCzz zB=VUfm`ZYYsbI2@w81f_J)$ft-89oQX5nk~Z-`qMCn|K~-gO6FUyieBH>Whl^@x6z zx;vnrepQ9A)rnxr9b?@Wf&8$W!{7a4RrvK;NhuNqSz^axT6#N@jZc02W6&rtVeOyf z+;--BNEL(q>1O+ILUHIfHVTCEgpKeESs?yS6vvxBXl}5535JaQITWxgaD9{BDQ_TI zv?0)MgB5SObK50>Z?rTm!wgUvLOyVD?fw|qtW&HOslhq4Ej(7al=Hs`7txo69XT3NCiA1IrcOqYkfq%APoiTM%i510DE2X$%5X`GZFxDgehbo92a>%r} zmXFw?D0S-y#GJZEDO1N-Ws-KYBcsT|AR74D#{cx97=Vo0dyj68`b{}!L#s#AXuUb{q5xhZu$`(&9o{n!@I1dCuQrix@c^_X)>fnOk3v^M9hl@Vi=0ccl<4Oei`=zj}Y zc}B}`n-FTW)$QkJ!E0MFXde-%&g!61>OKw5(6RVD*uo`{^TI==aye-a-hcbEk0ir5dYd>dA{mOC4ot^lu)c_K8(+aLrjgb)ex;8z&jC(rM0G zP_d{G-X~BD^q%tORw+etCuB%>MX8p|uBG+pjr5(2*3Ote-BA&S#wa9w&(2(pop(u8 z5++G7>E)z{l|f>#AkJEGFFKLAjr(za@Yb8R!xMiAd0?!xn&pUpliiV*y=2q+H~>h_ zd^;(NTi?h^NUB$Tz6ut{o0UU(b-fJ~R)nnPx@%@apl*wl?xmb3myhoq|%QprG7OcA3Qax>A@G<$>%mWx9x9+;Xp%1I_i3?7-em%2IQ}lvN$^r*=x1{7=(Z|2^>{z_1F71 z@Tzf%F!#$R*G(9w)da>FmU;^1P>z}v^K3m$^%%6EIPXXTq4lJ zf?&TWZTUDH&x2DDTa=sv^DfJjmAQRN1`y92>O`D`HRy$o)S=`0cdcf_ZYf7yzRgj2 zh*_B%a^)lQQZ$L`to}(bI__38xm5h6;YbrlYH{kL70D9J&tT0y!1n}s@9G9oZe*gc zlZP+1$MwA?PsuHH49|=|JS*Asq5*G4eGhID(gMYxDWwiTR31(guEjc}n z)FxIVWR60TgzJla{WUxEU%cscUlH2NRJ`45{qc3o=>GT9j3dHzMxsRKmQn|<$&o59 zM#H*+A>#+hEZZCR`5;+ryq~!##H&4(Tb3>Ai}JfkudpK7eCM!+v;pzQa(S+FW&H>AD)c2bvvtl&>& z7EAW~bx@3X_^$)tpkI^4PyS1nPz!LTLw#7WgIxK(1p@c;Pd(M6m`SYN>4cv;2WK;C zV-X%i5q4tzOJ3t4V5u8b3^=^8lZJ!LfBelc)jX43p0%;+Ar?|$O)5nBLIUglaIaKP zr>~8D-1#jezpyw@5)-xMI{wxe`~IA`C26H{9bE$q(6b&pv^almJN`gB{oC2wGGi71 zDY!;ZrrWf5W4Y*wFZGykj6aMqCtmXi?rLiePg=E--O9j26y)K0S5Bks;BazPNh91@ z8}u;S^}GmSY!hCerd7!r%d;YMx`=vbA^nZ6yh9T3gsn!1sY=Ud`6J8bubXFT?N27U zh%n1%TSW4hzPML21w@ny;{HS9j(yWy_fEHap0^MRGdUwR<$i7yhoLg@%s!gjtgokR z31jL)l%!>=@60Icnb3cucPB5nUSg_V$Z!tJqXt?UfS3C~PQD9GM8ek7V9KB^&aio6 z(W}Y;p&YAmWVF%bRFukKS(G?LPk)BoSEF0<&d1Pk*`j1ueE7ZXhXz?di}k87c?o;x zMBwxjrY!5x-uYfDg$B9EK9^Q!b~KD`2(O!$)?*1%B0a?p?K){h`}zI z5fdq=8|3PhFQ=S}*^DBak-hq$CYCpZi{CnZN;O5e2~RnlqIj_Tk1wP9^A$xmp;nhV z5vAwHVp3@_$1edrfQ*vF)N?A_Pq|hQ73MB_o=thj+32>YiOJ*3i~iga_=Wts+b{uD z5rmjuoXcbL;RU6Awl>ueXekqU31YZS5= zKvIH$=Y?(23=p#o!O<4XRwS~|TAk^@qvNnd;=uV!j4w`r5wh4mkr)anpeP;|dW0$#A&|E_fir0|}6D z?Q#F~rqnNK620JZHHrDd3bMjE?YEV(K3fCv`2uJ@22@1etd|_`++&8c$F%r0KB$H- z4mB>xj>p-i&H~QAzCo|(Q+#zu*Gbih_|EdRKS83gOzbxJKG>e7&45*rgZAx09)dKA zN2i=qfsi%b1CkxvA~n8OIgWgNAE5r~N_jW_MO5a`Lq9-6OL>Rd^Y9bHvu`SsV)D;$ zBp{Shzy)iBhu!WSH%39OMsEr=c;Hzeu;)K6OI{j~1n?Pc!kygL4d{YNVPY^N`n?^t zedg%+Sg*AL~TY4G-b0IVdQt@lM)RKe9}|D!<@VVYT*TMx#Bw~;dE zEAz7*xEnqI)e@{9BIPs&p9oHIVff7Pozq2gj8kI-zmYcs!V}vJpz>Rv(aE3LnwM_g zd@F=a+kG$)sTRqPTO8K_D<|&Qi?Za!xcthZ9lbGwOSt(E`1nCfF;2SFlOz=YVcXYc zrilh7kx(#oes`h}^XAB~1$D2Y7i=E=OG+7_81K@ppCZFsYQ7taTa?z*tv%v{&bnd} zpS{o%F6jnfR!VbR?8O8HFDplW@RC5L+DA0_1P>sa91y;upFO2iF zW9E=qWA5hdo#`KFZ4OMyzyUxB$@U%~M+W{LkgR}q69;dk3jI7qgqI|95A zbiLENM^9@}vS4Z?9dRgeLsYE2;X_sZ9XNK@SOt(Kaj;Yb=>(C?Lm5vBo&px)PU6n^ zzI?n2(~*P6iGfSsdtC*pw;WC&9h$4`@YNYXYoRQRwXr%Db34D$2S7XVlJ~mCjAh;6 zoF&4q`oj7Wr`OUN8G&(g14%d`0x%%Qd&3sZQhh5i(@rP7pyS>`1Yj1H@TbL1z;gUp zfg6w1PBUyojC*XhA#+GH3S7lUk6y-LLcQ)-3{5i<;E?~yJLOu`lu-L#5y`W`tvIPv zP_j{JnJJ$#p4u7@2=yR3yl|(j@)XApWAFVVEGpC#g?r|H0D7)e(hw;EASQ5gFUG_v z+tUb$)qS{_(Og#*<3i|65-%hjlIQsv7LFkKSwc5 za^FCJ*qM0POO~oo-wyK2f(c^6_xq43^5D=1(}z)scxO<6E4->X$&*v*a7eBtPPQJ! zo?wwif$eeY^&;U#d|4}v3w*b>)B6yM(Rz9Pj z{u=X5;afnmxO)xV^v0f%i3;N+t5cQ%)|w2rgjw8ycoa^6`YS-;HDX{tRk&OB#zMUV z6WHEsdtn|QhC1YWP*&hI*P0X%SYd&vP2fPL--)&G<@R;PpA{bIotXt%-JAgYS z-;n+k7FAwkELX-cOq#V5uqVGjfN2SK|A8^Mm5i;RMwME-X157{mR_P7&$%{vq#Ksv#BZ8WhlfC^RmqndSJ2ZBzdC?|*gpLF;SRq6?K z06s5P=|$hCJlD9cF-v=TJ(OEc)$!M0=iN@)wHBxtkoUtK`^m);9X)*3gJKRf`(0wL z(i}9|RU(grbQgLE2<{sE=}A;k!9L$Gqs=ZsB)IF|p3J0;F}#}Y?-S_kXQ@?)zD3g*C>{+{+?Xl&-+G9NB` z*JirG`i+B3vsd1<=jtNoXDxwi7AL`jXQ*MrL;#3!xL z4N@j7AQOg9>noNp&|}mtG>0^6Y3-P<*@f>6Zh;rsUi>%%i0aKMQdE#E>`Jb*D?24_ zW^G*P3mdP-p(qGYIOTp(DC`&AxSyKziz8wqW|pZSi9 zqkt|2`;$BJCGdqr=q3<=7KFm>EHxKGl$RdU<-0lWZ9YXb2+9L-d8Njo2&b_>*O=4^#< zoFnm4=o2^Cl-mGimD`uI$TZ-Ak!g3jC}xso<{n^TAt>(`y3Txe^$64{;=klq_z3ij zl;Wv7pzMx&Q@z9vSuewtF;FUk)d0VY!wC&nL8?o1oMk-dxLBzMPz%-sUXME`!%LJhvXL@unVM5{tiE#XHi+3QD&p@HR+N{&| zoIXLBA^Xkc50+VVw-YN0Z#a+jb|`=r(9&>sLzF3O*LeeiVoI8exI)oOaez6xuwts{ z-Us|#KFV=CuLjRAtr~Rl+=Vv?>o)(BkKlR^W9$xdtOMB1m#@bo!?n6xLyPt&er+U( zd{Yb?#*KDR5KC5GnE@Ojzjw>NK)BX@CgO~ECN4D6cCo3YBAT+_39Y{J_yO4ij9-ZC zQAwXp@0PJG0u|EGjScUsxpC?1S&G@QF&=X2~pFZbPl*4b>-$gJwhef4)oWal!@pM?8E0;>@;i6tx`i#r5wa?Dx1X z{o;%8nZ3GWedr1q0=6MEC`3`+j^E4HAg#Py6^DxcrR5t9R2}AjSZiB=^t8&EOidYfS@CL}$Koq;+$8=QCK@x}r{V&;UU3u!C-S z7Q^vHU2Vf}r!hyG-wwQXchM#rmC4JQW*o3Pcuqb%ux1Q^rDIiOpE*uAJG7*|S2pr` zP9(ZnMFTd8Y~OVMwnlLlE9;-3tzKWPqW&x*(5w**#7$1~HUdKGMn$X42N)!x4E#!AIpi#lg#+R@jPOT1#;voKZ>^P_Bmdp&<%I9$Cs#J+%oSrXKmxxD zY8JU>qT#WOZbAQZUG0NWZc^9EcUl>uqFqfHE!u{ z=#@=5KOyaU0q`>kT9&Im_MAuaQbJO%kxc4(oan1&l6lp?vL3oG9C&?)2Mwn{9OS@s3Od4(gapimga90tT z=NYIZRO_B3Rhn>RF5gk489!{Uk+dd&PT+*>=&9WruP3-(^DIQO2sE-w6FVmHNXFW#2%cGQ8wPxDs7!|$5{?w-o_ zxLo>0FQjJ6gcr~YtwsR|csB+1!`DwVt>0ijP>JV{v&s4nzEFZ%^36&Yjs<`z*b8N> za@zoUU7|ZGdYCh+yaqL-UhxF2={yej7i54wRxlI{X`M_{3sBnMI&h0{4ZbQh34G3& zVW*1MTm!t#rrsU#B2RHP#V!u!p&pm|+Srg{apfQ+V&c+4UV()wLzl6ceYY{t!)3gC zH9m@;CzeeGjv6K6%pLZ^71Y~UW_ zXLba{&@cPgnS|`ZF9W|F;jP|))fJ(}`|WqIssBhGSfi=O-{SgpJ0j~*hmcMuk`Fab zI5S9Or;|gk4WO8S>}W~ws1_kIcpnX(-eu}}1O|betQ2?gXc5Fu0ibbkENN3Ma@eAI z|6%-JO9o*-3YMmapo^IEi)Mum@Im?CfY@&hkI@SMAzak3lJUEEo#|!o06c(t@)Yffjf~onTUO&r27$|*=n_2c}y+p_*ePU$>(9iDJ%vqN*T; z@~r`0o4`cy^a=R^`O9aaxkSshN(jNNa{4x~X4a5&}>LdbnM^!&eTZ>NDq-ndE)tHF|GJQ?~uz zWRLko$$_0zSrm;4Wycx*e^7R4?(rryxcfC;+TInvkZ;TRGl4E?gIDz7L=63{DQ$1Z zx}0=V7q%#fdq37@3c5lQ#`UjL&m}2#R8M6EC3lhl}Q(n>0EEL=YcgSFvuk=Sn59@Eb2n?tjF&2F#}mH_7mxIos9& zowkV^+1HeadEURMxy;`+wzgxW@)q0qg2Q4HbT5p7*_u+hh%co#b1Mye^YURK?YQy= z@H7%6rZ5x4TILXdCAbU$ z>-2$Le>e8l*YaZcO+@oWGMo(WRPqdv|k{UsiE7$uc{W6VtEHuDe{R$W}6;$BWuJ(Jw<)HFpQyefI?XsR6olW8D zj_?bOssLGqU1Q|UpY{n&DWeP|D@|s0Umbi3GrLO_Avdl(-2g`1D;9h^?zys*Gs81d z@NL>I+)T2C6LLw?HitcuHsHzF`Xn67#HB!4W1f;HML`*>fZgLRDqQzn*m8oH45SV5 zYMFm&?)H!#;CO1nRIZJ|T9U3kUyC224OmP*0i-h0)|uq9wN=6Ltz>zc{@$p?q9Dc_ zWj=%BW#L%8fKApEVeXwH$MxF)6Jt;9GJh}j;pu(Mtu%G}wMl$1z-&!56Uro0ZwVtv zX49L`J0ORnqhu%=>m(#mK=8T>>;{patW}wr-Nt@iZ`f>Ief()-Xs;vhPuHo{j?Y^M zoWExpetaQ=3oli^Rcv6rTdQI z0@BMrctHudO|JNXv(3k`s0Lk9s!iK-!`zZh*^PJE&66gF*Kh0ThLe5(Du`n_$ws2~ z$MCUtxa-9Eb5CyJx%{2D4NYy9el!69=Qi`k^`8@0WET)G2_`dSfsr+q3RUS;KI!qb zqV@sClV|YYJrQNMzUB|)YUYXssYbVm{`rq8l$)-_HX{QL*eF&6g2O1hJb^vAt$MAe z7sF}AGBehLZbBTYsHwo|Dh~T!1Xafd@pobk6R~2^RMhtTUKbSew@7g=Ni_icp4jNy z!2L6UMY=1q;fqK2V}Ojjo({K_soe-Sk^|UHnp?UYENr_$k9wx!F|mQ8P>iW_etoJ! z9R4b$@B$34@;#oRe7SWR?Q?|(6>^BbDk;3F!)bAmXE@ZCT!84y(QuKeENMeEuKDF4 z49RqS6kr-ITAJC3m`wDb2*603RLn+!0n(DL|3=}fh^8Slf9N3p1A!V)*>JA-2hczG zAh!bXHn&wsZ5OnSx8FZ|Z5W|H9OZdBR3wvH0eezo)~6jrVAkC&eG3EA*ryK&L-A{? zRuNLDXtd4(m?p8)Oe`6D4C|}EJc8UcD1uh7>xweI!lLZk)ELBoz8Shutja%Px#QSJ zBRT4M*_6cXkLFEJSw+yb1K(x9-NO-MNf&bIO^}jRjFZfzogh7=q3$|ze_aM%-?J~U z)cW!bkt);5>c4$K&;ixP(h#khhY6F+hS0Z_yhs8J-Nc!~FLjJGEN@sTy>K?`3xL>V zKXD69qWLDT7^DECP1kJxgMH|*4;s4K5Hyg;%VL=;$KC26KUW!SMcRNO@Ju95pQM*o z*(^Mw3__nD*3#8m?=pqCQomK1^^1YI05Nv@m(2QJ@!v-?e-x<@DIQ=X{YSKU$D^LY zKHLBg%k(v5Nkg4*Q~o24({Da286AYGKHt3baoUp4qfDSs;KWCqGSdzJ>V6EUX!AVz zam)Ldf`(1h9ZpZ*RSQJ8zJ}}OIj2W4o4CVoNeAu4xNGxiam^ONV8{=xuK^?nS$(&s z+{Cq~_O3eb*SDrTvS-f52WVq|FlaZ3hyb%MZ6mf{n4k(l+-|K#!OxUPZjiRL!Fz@z z?8d`$e}R7I{=k%_!4GG%B*LgBWgmf0wJpK=7t`MOv67WWJOHN9WGTFpgy$0vc*H-m z7T?(pv|~Ah+5NyA`|yqT2jU!s426N4Qm$k)8z_X670fDB$j83yOGA_OrpT^9Ad+GC zRm>@a#OpUqc#QTw9B1xaU!BnmZiiQE)V<6Z@H=Klfdoe7I`!|C7C$UiLb4{_k9?6T zoH^oY!VAL+p!?I94%8TC?^hbRVC~GU?RcNAtv#o0@lnLn2_NOadsG~b+ajEs%6^yh?oy=j!60GDNbc zbc=;-!A!@PqKB^dI~B|E4r8kC4P)`V#u6WwxakGJb;xawPQGzv!N55{g_|2@upVqT zDutqPCpSMfCzA!9m5yMuo;0e67hG2Z!1&J^evOeOM`Js`|Kdih($4~n-c-WVm)mkl zKW5#KR-`(j!F@j6E5;k~PvA9qrOtt0`8_&s8$b8#S>8exkAi+-4a~$XnRWe6(G8LD z>J|qKMt$BzUX|J}{erS4HZ`o{TEc0CuQDld#KAY35>tUmYBq8FeR&=tR!BAwQfSdf zFeP=}*c0|v3r4M0)ob9wfp*2PJvU%{G@M!#1M>!McdFNP`n>@QKRqBs_!ki0N5x(G za9UFuEBXeBM=8g9lZkp_W$1h%mmj%GHVvfJXmv+O5wW_UG5?s0&uvh|cKGz%l*;}P zC(6p4odw>}>+%hk;CEJ$42KJXOjT|cPxITPQad42p?W-I(SXaf#64FgAC!|d{{JlE z9xX4&$lv)&Q*9-Vd)fepW~pp6-Eg*%U%M|efHabalH+b09q{dL z{)NA03K9nQZsA%?c@!RMi6PJo;9~riY!rD2Hq`qL1p8uvyPNlPizH#SY0WQ#Lu$AO z@V2#H1YfGsF7;;z!G6OT=TpKTlqprcB-b-iydlK^0ja*yiTP@uldfQok{7J*J2p{isc5$-@|xp=pS zebBgJNe~S|*Dsl%tL$$DIw@{QP0nxNJ&yxMj|0rF*SqJ3PQWke4WoY?DtxmJ-W3mNCUn;Hc4xOAm% z&uVni1&(cz2YEm8gxdb4a5_-Lb`PUuCHv6#k zcQGN#46lOn4KnfEYY@)P`Wvv#YWo58knj5LRb~&RvrF{;vcyJqR2< zn+#4ZnJ`p@YHD)!AIZ41-x1$Wl9ftxX;+}ARzwV07yNVf7yWG=oO5pTB;}36~W*8c_0M>W@TQ}fqs!D~*3Fg+2cu#mH zUM%7(setV4ED^c?0cg?gIr*SN zNY-qM|F;)+I4<#PwsM{jXd>;vg3(65pS|frTnWdd{+LFH9kU6IQ79WW&z*+N}VRv5r+Q$TBCX6aMO#vA1u0=vtXmn&;1@%AlVBH@+M8sd|*VumZaN z$`tp>40WvqEs~*UIoq@uFRL*?RbSN(iA=rZBFcQKDza)VHLW@>rRNiFx9(T*Ax{lZ zU~c`TItt8TDdUHkrdCUh(DiXL7CS=UJ-hZ{F3ADn!3qt^=j`lmEWO)+Se?@-25X_b z6hG7%u4^YV;#7ccvjYp~G!{C5(GM#Vmkk2pC3yrVNaROSUkr__M4*pO zJJZ`-`W&6j|MF*~7SN&uf_1$W?M?u_z>cD}UR2ZNmTD4x1lqObSC+_;1|%`RTplSy zL|$2QGw$tbrEsd;6$cVuGg4gW8{?>x0j?4t;J>aNXaYDI2`Nk%HYR5XmO;D2$)d%< ztIZPTfXg!!P;v`4rm9GbGZwnTNcrXSq$;o=6(O^Ev*RXRpocn86lRR@i$HtVK{;?n zt#U@-Z20;W7Yl6juQ&e!Fq&lBTG2VF^_<)-Qgw~aue4yngRa$x^^wO=!LA(y0EtH2 zh496QO!d`L$5fLb3_WRZ%)AO%cS_8bt_E*_a_jEnR4uTTSsboy&UmMTaMC6 z2EI$@JF+C;+g2s8_((U=%~z*Kb|vloJdNi=o%aCVMxlKc4(AUbphzkA=6*fL;*wm{ z1xJrM{)};b8p|d6_`S5s*U1Io#%+PKq=j@|NmSQ0b?d9JL8!CRQzGweaP0WsII;nK zJ|(=kxV+o9vEzFXzcfYs@$s0KR3UTc8`D%P@ri&vONp3`+%1%pfKi=z4?K_(hfk-L z9~+2X5J8)%aB53EH8 z9utreTSeZsb+PV$IC}Aoh3fmAicwP#LLcU(kXI6hoB|Xk<;Ch!Jv6L%FnNaeF(i{BZ_{>2^ z0y9OoD(4|h65AoQe~-oEhntC~a$v zcQtl~VGkOBD|F9^Y(0iB0JNFz6cY?>`IKIFdLyHH*|u?^>O)GzR+`cvbLy(QfM}gu zPiqsSe;Ol_8#mvxSEgm7{5N~`f1TY5rm){C;Nc@P!!sNU{@9;Emwlq#hc@Jg?t3SJ zKzH=|xGjxw~b-~UF$7GwgM)Sq3zYGJZcFuCMy7pwT)RC7Kz(QJ=d2C|#ytYkYpOx!Gh zz!M5n*ODv`&6gB-|Is)nBFf%>F_u#m4|eaJjsgNqK`@nu%7Wk9r3XFvWJ^kcYvN^$ z8W?V*IE|BFpY8y|#h|`UKOOi42S%pJV?;a~F~-}o=lzrzb6$Zh(G$R0Di`{eF|8;w zUyOGOrL}RMoWPW3pquzTg`>zy9uq*+(s+N-%{FeL7(*AhhpGZbk;??6Y4%K zxm-!exO@sa%D0BceSnF%&Hos<2d^-=Fbv=~d)ZjFm+fU^xfYg|xoq3Em$_^#TdQT) zvhjU?!#(Go`@Z*ienej5xg2~|KLUn(VzMcs;|TP`xK94omn2CxdMnt z^J|h`7K2_7(%};f(&3Qo3G+GOdw@-YVNI2bWhrrr{_CvQGqH&?B0q7(@d-EbHgX1D z0gQg0ku{s?7m`Dom*QLA$R2hp4eTX>WxqzH?foJY1&jt-=Xku+W;b{wpOXJfwTN-) z!$jVTi$dOJJ*-;vfYyRrd)&$8VaT)dTuLxDl*2<2QZ`0%4KHZ zOHWyujwtzmRrhz6bp?;3m6Q=b$YF*3`+v3d9mh4_{hkT2!NVWD))*Z=Y2d6UHsgW5 z?~QrgA8gdxYr=4Dl?{Ce8PJ+;7kBxNl`$`R-MaOe<9cwoW(WFpfK^jm$)BebhtC6b z$i*d;ef}bf(4v%h1cH4SQ&cbmP(w9BQl8bX1Zv;>F{^NxRbGFt<6PYuXAtU{MDwgQV)xG-aw;nQ|mtX$dSbDhbQr3@hBP-}Oa)9L27 zeec5$`3gFyu0JiKq3~f-`%8B(=trOtHMz5iQHVh+6jj>kwZGsOI&0p*GJ^+8*K(M= zfE_SYKqoy2CvCVN!MsiLI{hT_Lu`=R;o6b8b)*Wnl`=^$hGOKn1fW~};DB#iXOQ_q zCAYm&cRDrZ1EDh1=&)+L99=dp#(bFgBZw{}s}d&bF{0p{s)lg1V_++iK>M4lgf;Y} z-VyL4AC#ZH(DiO&kAJi@&5L_vmii)m*lv)t^eIIDXE6tSEO6G=DhV_!SC2X#snIA5 z!nY8HWXB%=vz5{z93vVw3*u)j<3{)z$o#2?%OE~oHGj{{34IE6E|r;^e42_*1cTYS zQw6ZSlUzJ^j-qhB)Jd%&Fe8w6lN!mPDW;uGK-95RP98&}?Hc-@G~|M!||in&R)km60uvTx{MNepPCk6&G*AUhwdASP&n7qts5*k6G zrRbZ_-P^N&Tzfn&|A6I%yUmdQPLe6l%9NHt+Ei&sNt2vPmQJ0;;sk0H;sHEn-A1#=L!Is)3-MdTSbII+lsmEbE zH@GcPt(9+CC_l~V^ak1xxrAR`7_5~2Ng3dzf9+_-Mk)KYg&Z?pL`|=hT7el2uT;kL zzbLjXfg(2-h7?Ng)0Ze0DbWfnhri^u=D>+DAKGCkJKu@`RyJ%1;*;>aQ13g1QxQhk z52QVnMX-oU!W|vz=HPU@o%itk6X;&BruSK?HIMMD#S|4s0dr%89W3X6txPW1hp3Ov z7PD)&K2TOBryz814#< zNwFI(Aqa5IyrFU-?ReD4BI+PqP!kBP9YSjWEy_33pVx*@?eRmVNlfomGop!N7B-!B zAq0N}Dz!&}kqHq(fwQ3U*ki_De-9X%u;9P8=n$@)&x++b%9t$x6urP)DmgX>j)RF| z;wV0d^qzD5@)*QjCi4464Aw*@M{M_POwKtUW%-v?olTH=APBL#C4vbNwnA&GL`t6y z62BfnSM?u>^pb8Q+5$qD0iF|IcR|P(uOF``CSud2!F!?px~& z)*xj8T2p%M=Rk(T88`FQkyTcCgJ>l?Ba=EQ2E+s0HxMiAA|~kVTdU|Ef61&{)z%Z( zl{JN+&PBvvYyLIa0PJ6vI^x4th@q@et+zo8-!;(wN%KZRF6F7}IS+*Z0OPMCGhg7ppwl)`#I8j&S^n+}b|Nhcg#wy;|@2i9WC z)aDGp8TFMH$xsWHg8_Ak`zWh5G8*2ZUcMvJbWSZC6uY3-QtbD(+*YFIJDR%@`&hgf z0}Sk^LRj4Fb_pA>_Y?e(YPKAGv$52M3iXC62Q>bCp?SSs%H|~1L1+tZ&^_-o0qO%L~d_YotD}ijc zQAIpr7}V+Ih=vW=iRH#znaho9mCYpJK3N*;ChU~wM!KI70CE&qiCLvE|Dlcu3;AIt ze#~Wrx88}5Efs4xSHi`VzzA`iQ&b7IPNKl$o3+Q?(^!nyR<9`a43xrVEE9z{NafnU zxQ4!oDq^4;S-mA}onQR(Ywg8T3z5t~I!Ue@U{G(%G`Jh5&62Z-zR{*g$RL{GtbG)& zVtr^Z>vT#3k1-MmBcbCO`hTmD{)iIexe@-^?wV%$nhe!mP(Y~!I7n)Q)}guV=3!y< zzqqi7ht(rxldd`@c*{?;wR>s;7$+tii> zwIVuvp!#zw^E+eyYsg3-8%}cgjAL#jPd(X{Rbn=vN`30H9r#UyjS;x6%r<((2W+?ohmmNG9C>VZC<66(Q5O`5Un`I>x_?fk05jbCAbNE_FG+ z%b+3_*^0=$lKjysLJ3V}F}J*H_!6vs>(t1V$8>TFvhvQ5>Flv6pqxSLpT@hv(u>W` zpa5J`@^MKJ(nNNeinFy^M|lAYzm zdhE5C)yg-VD_zq(0KD|8(39OMF$!Jz)LhCY@fRH1H15NR1sS@{fru=4I=}y+c1Tg*2QSXEV{J4rc-x7u=ucnL2Vl34=9&m@fnSk-wK9PA+ z!VFvl>pY)x+(H>rRCFe9s(y=FoYylNoM?ZbC*2=Yn$}YRVY;+zm4{u7(fUoQgK004 zB1vm6d``uBlEow9Sp#4GZKEVQ@rJQ<;@EmJlVVKnwRU9=klJmZzq{;SVy*S!`}J?nF}*CU43- zJenUk2tyBsa%%UkW7XDEqXPziI*jmR*q`q8BWGpHQ z3(_I`(z}^AkKh58&sg7k`fL!?UJSlNl2n9QIoDAR*bn{eYS0RSLN&B!d_RRi8dczSjp=%Z!e^Nj~ zupZF8Y+p5XmEVy-C+k;629qceTFRq!pYb+;p_H=;E}(L-z<~3DtEOjxi+q|pDb_I| z1i6dnc()(e{rEN}3d*qP8%bY({f!@ELf^!L-zIm+&sw!yC88x->C6LpPh)$7W^-K&vt;O64^M+`kTFccZgWo*#{ApxTEInG zWw;3Y+fw^(OYgg>hI4%p@QoL9OU|8O*S* z;iNzJGxySElcM6P-N=le!*7&*;0V6&UNt=fE(gNDxl`J3mKjI94dg@T@Q(vR8|z$t zf4aNpzI;9gVFu`2sUwX+`|uF+$x=Mx5MiC$7dlRWwBS#=Y_x+a%!^x?j-9mYo)YtIzQpHRe$GxBzkIp-*`?2;=9IFuctIpwMG{K!wY!UBf((fSRkTYDFsJGuE zT)?5Y4$nb4p9z9+XETd)OIYwJE&s-g4SQAFt=H7eLLrp8qWK zBl$v&yH%pcAt(0B3aqmZg#s=Q{13DsXaMDeze#LRdT~G0whJB5hY>?n(oW5l$x03P zK+-`V555kqi-cGwF_A8~+8NQZSN}zVhEO;7Lf}B`loYO24JOQ(^RwiWwSJly7>`B6 zceRzWdWDdgDCU`6G5#{M1!P9MLk|&m+H-1;6g_V982T%z_`ghj3SsY!M|i~CK;&67 z+x=T7y|YiAb2m;CM4umVZt$Usncy6{gnCgTaNc2qU{H$+-Uq`GQXe)m6L5@!dU^)& z2RoxVR- z3>s0wbdaPuzFcyXo(umov@t25pN^|J&3|i@mJ0TI;kVhd>iU&h=YNoa5uc)e$7%sC zgN3+!3|*&BW}wFZ-`2plq2^3mx|35#PPTOba(h{}ao8#>X%UluIbdx17 z56|{}duXdXeV|g)0m@Xy zFbb7Ef4##8S!N(gp!6%f<9Ma5p`uoj3`h)W=1b&}kcPz%!Wa^d>zz-1ul>y4!EVe$ z>IR=%3T!Wi;}(~Qd5vfPwz2afcqvFcI+vvWNtqY;_WF}x7HB;mFKTzJhK1{VIoG3_ z$ilE?nQllF%xkD5+vR%p0TywTQk9!J4Ak;SfAG@b9rU=IW=9(^esE{VW*a&NgOeGy z!9(Gp(_Y$W)DW?kimoj}(hpxeOm2TGIvsoV1N5>wif$L`(9#+N>|Vc?{%CT+SY0-% zaUwtLFcs`(U=g10^MT3y3$9rWt@0&F-Ev}IqxTl~EKKsN6i=KfP;e3WCTN;dh~_8r z9?~a|lUA+Dse7mdSuuB+xZ02jZp1fSKDs3amG%AVaHiUIUT+k!L94-^v#5yGcpQNJ z|Nhye`_|j}c}WmS_61vkSU%AA&#^nb^8{?Q?e}ga(1SiLm=dieg6pA)m@vFhA!tF= zD1tYW0#(6fUa;1D_-%H=~C?E!mBAzaiLVHtVh_U}Q)44e0 zY;sbH~)ndu~}`3?gX9HV!_oFHJ*l=XMyg-L^y@CiYb?G+NmLo9S`q${xKGu9H* zMq)Vl0G(W$&0-0xHB72mEGbV(SDjXIQ~>M9rjdez$HdcsGp$6P&ZNU?`BXnCJwKzl zS82V}3<&k}$ucGx*HxVmS&^~!u04WoLxtR>uygzZH~Oh>2k@P+uP{sGE(fPcJRHKq z9qXadBTX%;B}0;ZSk6(60YmhBH0*b{6eNt2Nq^QF7Gp3D+ixg?diHo5P%#yAfb8H0 z)1Irs{LIb*-fQbUU!v7(zH;i$W*u!!8r{oIKp1+F*G*|gR77}m&ghZ}k3l8%?dHy& zr)9XxaVr{ty-<JuX`yozd*}v_eRXjeaYfekX`EJO7Q8ZVBi?H9NZ0_vJ zm7K0KV~~4q%BNj-sOjnfy2bB6pW8LL8%@2*sUkdG2(_bcec=AZ~N&EBf_0EXwIVqmAetnBJs7yPcynNyRHP-Re!A~c*u4O zH2EIo^pDg%shmFwF~_&T+#r2?n{*rq6*|wJctQK|#5I3r42F=;X&Emx$$!`LysH%1 za%KcAnSABRnG-Kgc^!yh;W^! zW!XZ6GT39eR0ESpD+`d&v%Coj52Xn~XuRYasC$g`fmO3tpy;@z4)yau=NU-j3%IsO z$_#ig_v~-J;WGOhXf7R#qJ||HmgigOQw@}t@kEbUH`B^3XFo9qnc{c!72Pu3F47VahZ`#^)hrFaEuawFP(g5iD0ImCs#0ygPTOKz zrH}JVW2Ugq9k&vV($Nt{32>6Qhy3TRvE4xsaQ5(X{n~Jf|5LAgV58=(;tsE{17IJI zN*6_MsSANKf1;(=8E5A8t<0s5>=g;RX%V^l08TuT60)Vk2CE@hbZqmOEaVfREi;xG z1%hS1`+Xlm1D79Ga*~i&RC)!R`6(lH#D|Qp9M&INOuueFVE^C700hASW#6QW2rhV0 ztvtgrWq45&Ini229yWyf;^GNnz=-_BI>jf+=lXGYB#;`=GQZdV&=14k zf)%{_*VDggF!1U{pPGIy(US9STH?o6*y~}U!2C6*USDt9#d+Wa+n4QJi)m*Rbp6OUREU_}--mkgpf+FVlwzKg zxhAd|OUmfqZH~vibM>Y1J(6eL{!e2eP=uuR5El7Ur0BL1vGnzC?E zHaF3YmsH&7VC~Lc0TT5BQ0%uhFz4-swY&c@ic9$*P=&uMyoNT-W`F-++IX-8_#&w8 z9IvNoQ_HGJG5AZBA%f1W>*RIyzE8kg4i?vd8z&ieC59!=Tsn;ASG72zd-G-gqZWsJ z6cOEK1_cVB-sY(8EvI`f2pP{b`VG5G7~yeRqeDtAIP)zzg3u7~!K^azM;fWnlmvGs ztHR1)yiPLL9AKZfb8!oga$bQJ7&YPng+7d=0lQM8M{pQ34sFURSSu7nzeNUhdm~7ArBBH32@q8v~;Xp$7}wGj5Ltr`V1G~ zlBPW&_H)j3^CK8=PaGQ{|?o$8t;1ZH{sRLpXE)U4u> zDq!q>D>)=5s(VIV|8r~-KjjOzrtS;AscV@Sath-85%`gh*?9JJrS~G@gE2hDE4{V$ z9wT-C9B-bBwcR3I07U3~w?9wMGOV|3jVjN>1UB=esjsuAXj@?(e=Z@=1Dz4Czg*jx zu`GmW06+4{|0SfXoGN#RXjvbJJ9D!_IrWlYW2ypO z0TftWVECpnoXflww7U@1$;Pd5ygJQ`@*~U@g*7w>PtaZq6dAZu5Y8!nPg&r-q$phN z#g8Icx`#b7m~-99R5h*#Hu`v2!*q)Go;3>}_uBcxa^((Gnvx6ZAC7bq33{tRmmcRm zsh-t~O7L_j9^4f%{nQBdsiJ0|VfcAoFVqJxTN}t#ih->4sf{+>Y@?1NF2*2wob5FD zDQVm6$6#U?97$KJNwBLwn@~BJu4Wc zl{ERuLhmW$E~sW&lP=^ytrYu)H9$uwtwpkeLjmBQJ=WOdV86+@e=ZN>e$KR%X)Lq% zp(b$Cer!koGyqg}eX)AcG5YvADqItO&}JQ)*u;L!(njL(Kf1DD%>~6Kr5s{(nlZ%7 zmOe-i=2b5W|5LVCpQq&+%-ow+$w5J2c#x+XQVAi%7_L2Y&TJ2mMx1lMPms10+)iCY z10EfW2EH$tMo=+{YKBSeryZL#9rOpM1!`y_d06r>;N~^p>_W}9p^m!zeiw=3(*7e= z${4QQaz8CEW+#stfLE1x&Lq?5U77?`gL0)&$=F8bJz<3jB+(HYUFh^6Q%9Y#gCXn; zM)ZYYlKkU`P+^4&;)qCR@9dv7l0|}`Wkjz$Bj*lw;&&-Z_a{EabeYE zGMGX@%Ak1g5048h$pk#HzB3zL%;%1f=oh7PoSvf0n(H87CDM}z>oMB-)!PIn{WzuY zDxga#BJU4_%Y{Wr+g=_>Aq*zf;x0idRl{~CXGwTvV387Wr_e-sIItMzZ@2}Bv6NKs zsftXKifcMoZB9Q#b!q0`5?Mdy5QtQ_vWSB3G!}8nm1l!H@z9?n9FHZeC_z>+4+4-qAx0iD8STdSkbzEvwbt8q&p?}Ne;gvRSg}4xlmU9! z$Ed6)j;wKl@ZG%MW(jr<%lAb_L=GfdQRobR7y?3km!9{%0mLu3aPi&(8CW*@xO_K3 zLw*^Ep^*k;>A<2}GQ4Et@_l_U?1#Y5ENW5O4276ey}Tpy6Wed0TYx7?v*w$ba`kG? z9$MoJm4JqXcnl-o@LiR#VtbdwI#8pH&}a)f(d@@mx?j@n4s4B^5DPK##O2GiS_=P> z2$XpW4aJl{N3p0}ar3W@6Zlg1b+)|uQD)TeOlHfhgZKOKxOpvsRAok1d#P6v@rBR~ z%L{G+$iPbYecfbE!0z|iRonA-mYGzGSzcmHM;Bd{Oq{L9)sT_&#g%0#C@rAd8lcJY zZ?AzmS_$~d%&KA;kAC0c|4H<{jZ01o99SAu&SQ*P6=4L9$_Wj`UFwT&c7@AVLqQGl zTlyIQSZ@Z%a|J~##R}cHL%V9JGO>o+T%Hi9RQ#zoq?ZeTu_>E|Q=c6FYWe|A$59aR z(11lVQ*UH2&PaS+{Y(Z_Jz2Tb{kzTz>lmb#__t*_CGmdqzeKyK5)DO!Y3mH8+-fYI8*0+$!IOcSx;7*j8{`hSv!^3R9< zXva!FTHyZAKOPh1Z9s8NI+?v6?0;V$BG1Rycqm3Qb%eGp7yJVrow`Fmh`-v>m}{#? zI)*cnSVL%*cg8sjL7S+QYOOj@+PCU&%P6K`%i&iy0`}o(HGWicHG)5rL~Z@ERjD; z$EF(XxVH&) zG7EB08va5@W9;vZ{B=2ArZ8Z(DiPGh7ZjLM&sycUMfc%JO~?*C?V74Ik^qt(K_~?; zU(iFmBwcZBs}F_a8OrmaCYo5`EVJvIzOhk)1W`a>0l0M{d#bw>a3m6o%rO22K|*-* zyE{;IMCA+{_fz)RAy|*>ubGh-TXSNtS%9g?L27`j!7YLbwpL|es;8_0U~|_2t3GkE zm|``c=?@!42PxwjQwHv#6Zr@PlV@xnpgM9wdby#FbPR(fF|hEH;C0U*S!DcTgfgAd z)>7*PdbLq`wdJHBw)KTB{z`QDbuy6K@n@E0<7g#1atzdh`IuuLbLRQKY`Xm{P+y{H*MH{i7(uQuU^-ykiZ7v~eP8^>?uC=MR%SB@4l_WuRr_uRKKh zWLKcyuUvqs2YvY;Hkymk<05^g!bAN&gheAP%g}V_7Uw$NbN7IWw?2VbfD>A(E-X9Y zkE`{U*7NvCh{P>VSINXCD8QEPR5h#=08MfX9s1fXcpB zh9R{tk%G_@lHrdL2|+MZRjk3ct^}-nLD2?JvqEA^$GSF?%NT9TpUbM(tl*3DKzAz=%vl9@t~v z{OZQf4EE#LuMMcOKWw4ph@nzeF`mIZME@UhF_J06_Z$sI z35dyE7rvmwuax^g|9-W4$XW$VGejKfXww(DpffAufyX{odP zZO+_q_sxo%IStgUXhIV{R^m}cF01rk*rHxohw(vByOcFJn7rsFzXBAABaE05NRydA zK@y2n4m%A=I|Xeb2RkN)O&KWgfq;UH?}!~oaA?j_m{P~*Le%VyIDf8B(b4r>BJTUv zFHpoMIfs@dkmqNYX{&s!GobOo)@!hf(Z&qcfqQi)1T+ff$*7@4H2?H~@RP7Cv(@&x zzl9SSAS`w6U6lLC0+`5LjEaO8p7*+_#=g*^?^$^uY*f9%B7PzE13Oy_b&B4QX3=77o5zM^SNqJ1w?rxS?J3qOPA+>M_97!|J!5M`=4i`P?kOI zzVJHh%9nz))AUyO)8b%IO|_iy`QuyXOQE&HVZ*Z3XBUtZIA=}@yVLrUiel)>el*2hy~!_oL|^%u z)C@#ht%iWA|NDU}BCw*?rS3ZeB}$rKQL-vhTKaWmL8i5_sI#v@8Mo+{-60OTzm{7r4VfS^vmspmjxrg zJ^!OSm)-MHXFxP912|_)NGRubGR$Jv1r0! z2j_2bpO4DPa^91S47cNepjpa#M&&$xtRL|q?~_5xYOlD6^Q%`!`xx6>1SA4zMx-pD zHLfArIUF4IvGZ8YP1nm_{Gw!6q^}Wj&>0H!Ts@e%%&VQ#{%W6_oE4~#w-j>-1P%zo zu6e+R{v7~F7DIbyN3}$ahPkgd>IBfta|q-9sIqk_vXQ6-&}Kj|&EXzh<8)b3@isyg zmvNz8ixHi^{gsOi4$^Ml)CO$)Pka@3eBDzufra}%_N@qN*Cz`RzW^y#EJ$=Q^bMS1 zO)2#fLDmd6MW;emGFEZ{dGV7jmy|j0P4+*-E~!`u-Q?2fBI;S%3dwl9Q@|olk2WH7ned(<<_Jj{h-YUTe*#!hKT!1E9lWwhB24ghQ z-K=v`(g58{d{S0*IPj_kTJ$eTHHcRIjaYg;<8>t8x6IyL4(aBpJ0Wvw4RNV!eqJGB z2O>v^ywXV=8BHd7Xb#)#*OahzAxown zgc~kZw9-C1BDB|9aExOwJ(aZivZt)3X}vkzn9h{nIt{Ts(F-T-!`KlHtbR|5`x(SfpLdExir z8Q%1(`aiax=XEhOd>tdfZyW|Bzl5XT3h0;2pCb<=r7|+$VFZ7&$bW?0TfvJ3zg3)6 ztZ!r_;7HaU>}(uYc)zNzs$10VA$zPfOb3)N3(mdam{*mLB~h9gR>v$Q_yy2}=icc3y-+in3!K~y4hn7x^|bnIP2 z+}V|;K^6}d&Trir7E(u%-3`FZEnw5Ng%Og5F(2x{@C{E?ZxuI0^QQ+*8?n9l>?6qE zH@{<}VtqmRO3)kRSFUws9I;7U!%$J0%Phs+-U0sljR`F8wUKTp90u#2$`3w;Snh_g z(9K~~L5p|%IsrLCHF3KdI($1g%J1YHc1R(1TCbaND^SOC_VcsKIe-XZg4vzha6ucT z=qQIJQ53hez%rE~RWwhHskc;}92gt*c4HNJ>l~;t;&A;+V)=FR(gMf0Lz!WhhRD(S z1_qW;oPHTMt#PRsaC6aeREP}jUcpPEW)J+eI~r|V2K(OLhR+DP3mE=ub1$)%}jFeCGAUpx_m3}=sn(1XX4K?umhociA&PfUQaq}+%ax|q9OeJTXg zn;PpHR)2{awKy=qQ5^wvEhq2p{;dhP%ub8>Fx}yl?U4>)Wo9olYH1?ylSB&WG^td5 zVkOx$M?3S+jWl6Y>VSowx+4kvgbUm2HN6WcAU)mc_0DQNW?kQYC~uv0s|pe?WWBmt zm_9P(lt_S;m3bDAm^^~MrSU(n`XDIlE^cBxdk|Rg&5;)7L;UbG>5JVDa<={P#2QTt?~^1>+vIB+asjyg z!K#!GRfPu^m(U1m%jn0f+vfb{Avb6|QvzlE5U|jJn-u5HP`SGWA<-VD6}i{yk+d*L z?G+Zep9r!42SiR`Vm^OJ#UP$M8AU;1RoEBpo*>&Jf_h?4%M}Z81V}5fGUyq{pCTEf z=`{7zbax0|>r9C*Tf8rbZrQD704_~tP``Jmv5QsU_QH?p@W_+Y0~K|po%eq{JXGx2 zAaqPy*wo~R+QPzbr>q0sINur*Zqh)ZJM#|WkC=2d7<&7K;1)ROsrlKXd;!~SiTZ{! z90xOZi%wbchpMay*g0Le>`i@TTSPwd_Ynz@#I)L08Koz-swLz>L&| z=KF(F%}jZxOn&$F@`jFPX&-^qecss(AY$}xzPp&lLE_r$WrH-iqi}Z45575Z{NDtK zpx3>@{ToR;g*khRPu}g1gZraCoB|or7lWj|=b}6a|5r}nnX6u58en2fOuaDmD_wQH zljmDDLIO8@+-m5*6&zde@%Uu7CF2|l7NypWum7k!9mWh&L|@40P4f|9NVpNyRM|oo zBa6-(w9YtiQKhuEBjoVUFMrjVoK!#ya)AKpXz-r7e3Z{?M!qw!P3l#ib`(ax0|WCj zc(tJGT&{uJmZ+?&B4n!FeB+N`xTO|b6W9$Ye!ZE~{xQ8UB5Gi5vih5i{s1?1c>JS_ z<=?)#E0U^1c9x_FSQp6Hf)G&FWp={Wjd+uVXAINmAl<&>ROv-*-Gc67#DAU4$^ouC z3EmiL8B^17S<-Cwuk#minZEEGf8QQ*+%3bdT?e@_@-9*X^aG?1dlATl(IamGfzqT8 z)9^;EJQ!-->;Rf%18kI^^5^Jn>;#Glq)V-OE_gWp;|D&rCAgaJAo2CzEA|X*Sah^j z2&+dH;{ee}6D`A-}uCfY#WEO%C|H*bAZFHn=65A#UT+FS-JKlK+P#>{LR= zLD3f!TX*JPbWtybw-^d1n5NgY=i+IPc_Q&fW-SC)AV__?Z5|U^x2H2vn;c&8TXPWC zKnqE;2O*0~!or7mAeE1|dvF(o;TIAp!%ou6kmGIAc3i~srCPg*$;L1TfD2@fmj0$s zoNyxEqwCepfv3wZhdx6lrC1EmUztllG)d`l|J`@NXpRcu^4>ekHXQbkiv!btK6|bX z3=<533Iw_>7`Y}E$!AVkPO@FRDCbEa5b2LS+C)PwFQx z6Udgy)}E^&75Sc5X|;eCH6aqrRP3oHLsfK(0PzWnlMPG)NBqanD%plxNsgd;P+h82 zch2C@48e0Rs;C2CsoUiCgzFY<^HwK&{{r(aE$LyIG#M!{s}i`VO2JF=2+nu<%cu;9 zs(lV3=K z2^^Im%?fgkbq%tKZoH_?BI3BDh0KZF?cl&@UKt0Y+(f zu~eKC0@`4JklFqFd16fePggDJ`Tw8UgI1IuMTKqfP&#CU=RTtVf|xxt!L;1J4lqQm zKA5cPNhDTKrs^_*cE4j~ze*&53u`0=?=U2WdDNy>jNP{&3C+EjZ)zydAr~7;`7R|MLQuqNABVMzJ+F zd1ea_bK0GtF}n-NbiH^ti0NOUtN#Mj7P8Vf`cJirN(-N~4&@LHdcEuBK91#jDl?kt zs*ZwYOJYSN@}h_PT5PKq+&3R+iJ7v{$S<=V@N8oIP6+?|bFl93G@q{opqgyuFEflU zvZlH!DAvbLqV=C6ll=f{g0Ba;Tkgt*6?erzs~=j*<)4K)CVIWor;G6K`8;5jLopZa zchpbqLgv0fH0!DqvkYv9y2y?B5$X8I00(f-oNIq`T=Q{@9>d$F{Km^7$nB%pq{Ykc zQapqYzY>A}VeqkTIEHYi$XW&OW~@}W^9__+chrro16`8AUmVbJhQ_3+<|rfi#wmNu ztM8Scax+(Vdz8J{u9buC5d;J?rw|yTSJlKbu<#cYq<*&R;T6HRf2dsZmzY{R{{)bm ze)-!8N5TkCT3A}gSYj-wcgL6X3^#Z2r`bDT)PmqR<7>VBJTJa2OR}zY)+GXuyyZmk zUKfL}x(Ctfxgg5g)o^Rn{gA02D+DX_d(P9+r4CBP;t?SPZY1je`kqJon^HSlT;{6MA+EG zCI(*bvSm^InuX+OCJBfblEm-)3A!VqE)}g!=mq8a`he7@1s12^Y;5)@E}4}bNlB>B5zSF?%@|%)U7#XpyxY0S8#yC3ohr9+S~@`JJ-=bhX&f*h|8$MsQKRm znj%b0rS)J|m38M+kK|~#>pjn9ph#s<1ynBi<2c$To8Ls`Lq4eWw-v;AV@?gl%h(xz zl_J+0wW4A8H$oiFsTEkZ@(Pqt9)}h_Sa5T(5BZJoVi3kd+}xk7>?7DnB2cc}MuTnP zcNK_E9c%4g+iv6C>AAM;$qsuz7+OLYUb_A189<_^1=%@ne%ONJ3hJwPU5QR0J8$M{ zr**n`+(Zq9z(ILss6k{~hD1=-i$c zFXNzCQW^i*iX%7YMA!BSVs)hekn0i_RiZbDIfs`}DM6s3GNQwTN?Jf1nMo=P|FIuH zi*0MmnrP|%ens~2AEV2aO2Z>?CwsWRJx+Kqm6rjJ46aY+5KMvBH&;Fdbgvvxbswtr z7xNgR%AA|-nd-o34`O_|=^`&0)O7FnZKz+#Ny1f&*s8{n8*%FRTjanwfdR@#GA+!D ztih&|0rmoUp~+|BB49)?Crolze*xSeDbyY@e1Ba_{`$3VZC9W3vs5inbWN5wT)S_= zUk-43bfLAXm4zeb{YPpzn5J~jEks%zu_UZ~R}9%)-Wqt<(KK-8S6`WuDe#gIK3weY z(;M`^-x2IM$*S#%+Jb`2vh+gX15X9`QF<9PHMMDEhGZC2BiL1Ryc?BRnE(BV_JW%k zInyU)X}1gnWSd?WA#-l?A1qAEVzvxWfnb~|J23eu*pX|;P*H_X+DRRMP0UmVvn{}5 zC4|b^35X9W=9Gx4H2O^p!MSr+s(q7?=Xpj2>b}?smvSg|;5Pk;rnW`S8T|_!%!Ah? zMtkEul*o%KLFl3%YaxL-aL#SaDQirtz2xM2$~06P*BDDN$dzcKC8eI6m+3qNqh9ec z_kmR6jjzT8ArV+Ob+d=lI4JhTGzPb@*rl}K)w`LVnxS{oO4v`a6)7^-H5uDe6uFg% zY2b=NKnNOG5>zOL2Eh@rGvAu2dI}xC<_unZLH-@(y>u)OtvCuYT$h$4l^5h1to(x= z<(kU##-=A;xsC2RY>PRveh-02#v|{Uils4CL_LwUBi+jFr`sWfY8E2O^aD$4v1C9N z%yD=l@}~4X3C@D-k0rflqXdniIsfMG9P4t>>kIyJq(_j&_2cc1tlI2eB>AgxURLHH z1c~VdRASJ4B7|JGE04`R*g0YhoBFtvy`x@OwxxHi`o_a#?^RBMe`EyVe)2cFtcW_^# z7NEx3``t?`dW?{Tg0Vf$#zB19>6zZ!H5m_m!|$d14fGQzjDH^M;vJ3q(j3$N$v?1- zo!q!Vl~n5?w2O1%7^FhM=W=6g!Bu5c1v1$XTiv0VdZp^&4=8?K-E;JK1J*|u8JTI) zG+8)RO>M%a=^^{WCESQ|3XD@G=1St50NTg?xTLZ*9C9=ry2hxAifHqovIn;j`Iip-o{K=1mPULO)4s6DkR?67JyHSxXcTc-Pftra^Wm2; z>^rDaDbKlG!$P^K+-d{4rx!s1j2|;|4$0)eVLF;NzB6=k2QD>>xL;%4jzn+Ukt0x% zj^T6o?CPYRZ~pp<_u=?qr0o9P8l^!aAQH|7>K#03KnC_H9wi}|)wHNZz< zf|klIva})!1vK5Hy|c_(-;c?-%CM@%up<848rpyS!2W84YG!uz2}QHO0BYTE*J+mb z^?PY@xMcD0P{iXUV1=P1yq349H^iao8^PDWohsJ2=Yavy$~oo=R6qM|FeLR54XygOx>prKiTqEujv01jTE9*A z>i+%)rh+x=B3lPw9R!=PlF+n;;)9p#y}26FU`I?cIS5<9D5FkT2|@s##8*72d<0+e zK6}cMmtTwfKa&Vi$aDIEx~Qr~I3@n6a;!OW>dPJ7U5^32BGg_oG&4@d4-|C}OlhTK zIzP#si<9%K&wVb1khpO{^yGQiZvXPjKgbd$nv`Co$) z$_h*toPa5K{f^xFka>r%tPQx`cqdP2)%`QHY@^q0(2c#h<;dM>C;TB%~Qy-U@|-dO$0dxG4yx20`=<_WIvJ*fz59- zg!5M>C-Xurdw{mET*%tJ(>d}ufYRzuk+Hh?`L1^mUf1|A8eP(Q8f>D8)3`NIrpjHJ zo)+cn#X!+9bPSg7LsDSYIuhq80U4IWfI}aD*%nLNHR9|rEm{N7kI|G6zyxxHG9Dfu zB-TByu)ZNUFt6fw#d=m3UWU#JQWq(RR5O1NrZ1KTOw-)dgB+6LcXW6Pn~Iv}4Ni)? zVG<|8*-nP&?_RzC`{@jMVlP8-4y};?R>6*at*EAI{d@tI$EWZDW5@sfDJu8sEJ=h-aB|X_xGl0|3ZLxfkIv^G`Z@2MPJ;hEd=M&-jqa;~ z`{q}W4{u%UXq*}!CUKu<($lM0>Lx9ia^YUm-OcH#JMb603HLxb|6gD4cdi*)QNYGq zS%?^v;a4HS+rlw?P5J+Q;aeli7+=Tx?Q>D{+t>y?pJB<*N#j|kfT_gbxHcb9*um8F zyC|3QgD&4+l!H(_c?XGIDAKI}Jjj6b&SErB!v%kRYIIT}xnmWrboa2qqDAcdWEO>m z815>vSD6I7x0e=0t!JRr_b_C?$#leORkQ?ZpLSE{mafrK=m&$}=mR{4VrBYL?{}xm z$Fr-<9FyeVgrl}KB`wPYY3~6}mzLv}M>!ex9j88<$aZ6X86QN<%QI3P`$yOfwtE1A zZS1kzH%b#`RdUbFr{LB-YnWCRfBiFXQ?x=S)CaLO&0igB_}fpiN-vd8Tc3{XF}Kje zH$s5e?f+7FH274rOF*I^A$=NM_ssxF@x*|kHG@9Bs+b?&EwG366W~3LNsv8sE?z06 zJ@WY_?KI)SYBLf&h7{W5%OzPX0mLA2G01n^GS`e#Pc|c}jM9(jipy{%|8gEPcy};` zfURIJ*0U7E{BF+F%S=6F60z_OXHjZssbILEKc|mT;12l0jnSZQOyZAF4I7uAA>ktC z4oj6r4O2OXP&yicm7bznE!QO!1oTs!{}?(4wlKH=3g2v-i_5mX%*8FcmTfNM%eKw6 zY-70=7M87Le*KC2+qUv@W7HjN?Fv1femw-A-sF+u>!`cpj`)p@)z>||?#x1Ttd-{b!n%}Y^n13HSG zpQ$56pIp}MDJCXpLY8ouM~JJ6e;xDpxMX9GgTGvtm_3PK3VOFAU5ZyJD9**cpGTVb z%#YVOPAKMb1NOCL+ILhHCaqvM*I;)kIbyr3MKse zYB0=ldc?^|Kv>*Yon~FMG~8PX$i<|#Vo)a+oy`2kFcztlQ%f&}z;l0)kn3Q72%t0q zZ%F5srX04Yz9NA*#t@ul7%fq?WtEBO3t>Aw(Xq@x0s8TYp5)_v%cDoVI0Lzqjr-@O z2fvpRAyb?~LR3m1$Ra}IO78asZ~BhtB<6l4U%g;M3ax{cS>#Mr^Iizx5Bu^7if{7V z$2~wEIs`q}L=Ru}`B8^GUxyHkcfI%9(T9_gX}FTCF{Gyl4~niIMenzx+hjT&(!@F~ z{O>_%QG;fcd}Q~A6&KWlm{nLzr?*dr{kJM^=DDSo%XQ!$fgF7Jrnl%oVtoRdv_e z`1q|1hNN*`kugh{X>te%;c2Ci6^acJe**hUTaDHXF(;cpIKpdjvI10N@}PB&Dej!3 zKC~0Wk^w(tU7YvB`=3~|! zW@dZXY88yD(yf{)Blo`@2X(C^G``4DG0Gq1i}8BM4^^6}w-1Bn>MMBnImC8<13rqH zJ)Vk6i9Z$Up$_cTgP*_f{+1OFv|(qsr!qB)0W)9Evj#i{53Xg{W)IT>J;lT_IH_S( zmHjo)^Hw%efU62~8GiBiL~Dk`a~c*@{Jxfugc>a(P6YQ(R&^{vz|B&rwDTYik=k1f z?HV&&65Q1;l60omFy9qs-Gi144z(;koLukj68Dr~pM{y_V$-DY#;>Yft)0d>;R>t) zvhS2lk>W-G%l_9Yj>b0J*ON}Z^e2kpSM#I|rpz+%7bRb5y!kK{rs5W?A6xr}JWajXI8Lu@}+;sN7E z32ZkQ!;1M}r}>aNvVO`fUp^gk8foDDA)mG6iSGP_{tu_h*M}s4J{y*dHiyi(TKj2wp*EIUYSO|y7S$bL6x1?2VT^)@u^}x zwZO7cXh0zx92#O^st;wRjIZJywMy{Aeg{g0aq;=Zl25?M2VmD9eNAPF#ezpsjhPsk z6VOgnrR6oguz;v}Oo@j_2AEK@?4unutO{Qfs8=_%L>a8ODM^Y|t<@Z@s~cDd00BE{ zC)cJ=3)I1kvUcHxTYPz<80xlHUMs$l(VMRd_@@=5Vf^@!aNE_(T8F z3;o60#XId2rBGNv;T^h?tjrcwg}R8+*oN24RaoLh*;fW#l<$@0zX1greP`o^Uzn#^ zWW7!tp7Y>`n0*>_g+}Ai6X3+@1#5ufmz43LU!C?!VhJQX&4ETTX(AIojw_*|oDEqg zezl-sO_U-mq2!_1GO27&HWhA=N1GQh>0yexet%N=FbEXg|DNj=mp|#WET@W`TpIuM zC>H$TsA4gpy9zbw2Q|R|5zRqKQiwG`w*cu{UZZOX9T8H^wVB%-hw_oyArdeZSYOD* zu3Zzue(rqj=pJWY5QCy}`*6^XWr0zOkPoEGd^QM{pu)T0%dDCbthfaEs0;J*-4fR} zTz6P*9|5oWho+nj7CyE6Kj_%TCgWmph;Cyu#F@UN=1X3w>6_z#0*F z?~hJTV=4P~7j{mneKnz%`&LIMENMXcV@)ppXx!u(@U<&(n0Lq)59sl^=;#TQlv~Yq|+%7>`coi7$0b@UJsIj?HpJo?pCZEkB5OI$*$w5 zhn5B9yBIDnkK02kw@}0f!VpI;5!m(1D-(!QisYdjGIH(&6t0%>KEYUPn(lwbbKskG`W-GgSsJoz<}( zYkFcSUIJ5qEcf|b#=In@MEnaAwv~X$hlG(mlkl)(vJY9extGv@p$Z{I9%?>#5_><$ z`1OJ8tXXKN>u|Z@Rx91^-zRUtK|&Bg>t*|_5pjKw$;agNo^-X(pVZ=IR?rx1MhAfE zh2odiKfg8GzT*1!-;D7U7DskgEemtpS|~wS&y#~mcM%+offxy)eRT>kuUj*zTg?r3 zXs1mNqrk$%RSX~z8^x`X5{u!Mn+cDVa$o2_$@%3rpZq_z_}*d#6I76$g;%Zb!2Q!6 zW+!&YLp9^rX+U$6uS>W8OvnhD;Rp~T^4;6gXD-nm&Mf0CihIn`h7~5SvCA_!Wo+0< z4FN`d(X)T3czcqCy{B{EHU{Ql$=wegPUrT*c48adRkQ{w69d;<}hZt`yy`fz$Y!-L& zz%OgKD;QmfQ&m`xZz*HnEf5OTpkkt!zkyv}gqpD*F5Qgg!=zE)`?;cicvCV5&kW6Y zUz|Z1P5|`8x~hImCrxg4VztcVx=FuxeOb_vV zSjs30e`MPtB~aFOF`}a#8-{1SKsX#&)X7hsvN4#WFnMm%GCSadApLAX(GTTC?)AtF zoKTuw;8#~KZYtoJ&WZ>87RsG(aSCu%u4KxWaSeR{l{6*gZ66nXTZU$<-8G5Q*Kslk z$u-*fx4x&FfUGp22c*$CXQ8Dr*KU{Y zQ^tWtCMlPgr_I&GZXM|BLmauevJ6>kvH57cXyP?CttU)7kDo#`N5&PgiVB?8q@9`G zOs`Mlm=bf!MJ2p-E4g!=%>^o??+pgqOaLUb%{FXVv=E*puT0H+gTH_D?<%?%Hoo9+ zXo%~-B;aW@4TUE!73rZ=3)lW)JvH`6p0E`j@)wK|K`N?)2!M#7RtasxqG)%*MHSZ{ z9x?Mpm-|`t^dOPhJl~-144_?LewY1>E$})}vFq*nwezi%#I>a$${CJyw0LYQ27IZ& zHH1&W@j~4Ws8-YfYt!*h%J4c>ndvvP=u?fK!53rS5@#1fffU|02yycLAD7s-jEe{P!Lt;y^yBJ4$m)2_;h4;q8fCpdHWQFly;P5$b_Z zKD~Ha;liRFqWe-dYr$GgqeJAw#;wJMA}|;HSAH<{-nQm^?PP?ao7_!M%kM0UI1qyMoO*L^$gU=6bJ&cf$vZVn^hCsf1&2>|tPeIk6>9=W*!(SPg*{#YxSa%P0VLy(q1IgZ_LeCE1 z1(K%NyzzA&3f;T+fb1rEAT?EJDOYrx&sfuD#6btdep0$AA6*Y*Q$EMT^8GE|AU-zN z)>b*g6n^&A2U;9_VQ7ytJpMJ)xB5fiZry|6UY6AI>!@NV8#b+^!tpGKbRePXRp&o7 zlpFF`BpYhqWtbJjT#t+SL{rj=QM3Z!0vNY^QXGqBl#UEI66@ESwIts{2SS**YNL0Q z-z7CAE?Fq5@{s9?+{n)MMV9@}GO-JGaVPh+*fiqR2_xX-MldcYErYm5v%%mo0REFH zoG7HRi0}31&ei(V+Cl*9&o2kN-^s#dxdMI zQ!*BsK21u|L&O*iypRwRF+_`hsy6qOG6N%f^}ul|yjhliX5ZCZ)t-)&YNh%H`o*P< zcCJX94BT^ic~W5F!EXuX1Qo*NZuz^QTScFt&)jw#sM_M&0f#KyGkwy;kH)&}>YG2i z*n7q!V1qQi#h2d5-`6N>g8#yvo3N$X4_M%H?#}pmm2c>*c34ueR^iYC%bk5sfQ#0r z0Y?UbvbAD|VU#KY$WK`vq0{80iDAayB~L6pfG|Rkng_E8pZ&b)^e@SZQ8Yg(>NLwK zN(sJAwh4qd;Bg;D8$h9gk(_465BJ@&yX4N-cqniX)i1;B16g1!*cfR9&csW@H@32( zlt#bjtK*i~1Yml*wDtrM*te@dm6JPvZpsE7gf$6c?|aqMM-aBMu|tqRed#kz{_yGT{cM(E?wNt8L@0>Zp%dZ$~5PR-pfm z8k_{BmHo#B>4*xL(!>7qK;j~5v40pLw|M;rNbh zCXDbqXd5aR2S3Fo=_lBEFU0_6m6?qpFLAtSo`P7D@w*z|}eWZ$x(F3l3(JKQD1ab|t<3)QuV8_%XO)wbi2*99m^v=(c z){^?q*YQaIFe3COCG_)m(2>~cP!Y-fyr!3D3oA<(2N_iT4P88S;DDW|n!xJ{2oDms z`wwiEx~vv%i+v<)Go4Hj5s+gKRq4>Mch^TEtcT@3;dM7$)Q&1>) z1Wb@$6Jt>TRx~qwye7VTHN_oHt7Ot#e?a-3SGtjPUFjyI#QVOXm2%q)hKS)o411?x zzOlzBbJ&hyLsH)p9kYi$H(m}<2QcoTwiNU`?!WTc&)||*{>q|}j)JJa3^#Q@KEeg* z5r3xV_nOmQtjIO@)?cL|18oNux?Zjb?}PA*N@4(Bej^vbhkeKw%WVc+Hv2QT0AaS3 zU~2>4hTJ#5JvERtXy~dPCFW2Yjsw3=^{ii@{P)k~hFpn_3jVKo!d8GJs#B;lx9o#i z`lbVUnZHUN7M);15rp>w)r#!VLs2tvWfuHGt&M;f66IeoHOAp5F{RKTCR9U0y z3A{4@^qZW^A*G-QG@x?tvU>MYvZCeiw*EIw)BIR#2h2V8Po^rKp9de2v`&87mQ%{R zK-VmP59FI_`;Sah2vpvr?$)~TNs-{x)e3zS3(kU6@G~`CAJFQmDTcRm0tuzHL+k&- zJS{~bu{}5+PJ2FA_wG#98<}{#It2fl2HS&KxKz!hyKe*!BKqd3p8wXT`-Q&{4v5)j zJIyeu0?ToetFXxhVhdN!!{+)lA=Sv}m2kLq$-mR1v4O-`fRg^a!_qo%Ks^Wj&87Ni zX67Khh!Q)%nKo;%ROFT&3`wB5{`7R|QW==1X<+e!Oy#fbi!6FzlA)qB_GpZdbrrQcy-}FHFRxJHa2H--t-gFU@!qARsNB{#@85}S+r4GI>l|_N{Nhd z6)pC7i;@e4_xu9C{WEnVh=dEit~>TNUS+)0f}xFCm@(Ip&hw)3+X7PycVd^(sEjvp z-|tAgvi(2NMDB)i2wVPLt}HW1VFP0D;xSA z2?F8HsF@2hx?ji>$v*|}X@6goX*=k%-HV^#0NUSULV&phN*jU*Esc>+O*VHk^`Mcv z9i_7|2O0jLKx7d-f%{yP6U|##k^{F8LY-64R#hM#1&FLr`y%!`%Ip_Wj$2>wzY2t6ruA1+)x0Hm7b3EY?%P~qTlNV;$F^zoa`8!df95S zZ+3W@IB|f-_=}~dlKC+4o2X^Wj~8jTx3d@3dF{DM^Vy#ZGmT&#aycVBX~7ro^{+~@ z%6d;KsJ*&;+6(@BwpmVAkqV+Wdus-qvqs53P+K)nf1${!o&Z$WW)jDgQ5dEXtzb6! zKS{?9$EXzI1S)ScO!MHov6|!%c4VtQ+b*6aEYPn`GLxOKKCSnc(XQmS=ni`D{pa_y z?YeL!-3Z-xYT#*Hg>=JHfK@6INz#D>1!prO01uYp%nuzk#{4B8D8i(HG2TIDyV z<6%WIsaPHCu?}Or;zE5SWDu_Cz#NM|rgjmN=h{jGCPSY-p>XMMyPl`cZ_LmiPw5b+ zL4!TV8iXzNZEDJ#ktx#gzj#afIq#^Q~V%XqWo%Co|FAXT59&GyWm8KTl^j>Lwz)UVVpg>%RS<2A2l`B5xA?>FdjdHRS}YbE}TedN*hz4fz!l0;iUJe0Xxl>L8CKF1A(N z80w`jo`+J5{U5-to}0n-88yxt@pjHvwdL*o@2g(oe=LZrZHjhC>&^fIbNM*fH83Y! zK~8}T-_4-^?!5Wu&c8f(W$*_t7@!rBW{K3N&Yd{o1RJlD>9L92*Y-A`=-x}{ z_Y{E)8-R|k3s#A`THqZD{G;@>1^O$EwlATTimZ2`(k~faTOeCBnr$a^Jt{Bw4af0x zr*rRTo%Zf18v6oCmGAKKDR5heAw@Wy(P%ZwK)&3J6ckS5K9SBUlk#=3m`3`R0)7}i z371aDs*WXMe$bCOK^B{_J*2f#VwbdZQmsi&2L#DAS=J16_d9QI=z=k7Py_7tJz}~& zn4`~oQ35oZg}_t=u| ze`m;#UyAxWM|Qm)Y5LZ$lPaJ9Pv~Jt|ET8CE#f`vB91>|7zthjl<*N29lT~Nmb?$( zqffM>NM4;Xefa)#EH6M$uqpAGt9aN%)9~gihQUX$eMTbR%^%ht%{~^(;_}vJsh@o; zE}7DWi4HmMB9#ORc~`Z~*L4F4nO+M>$fOCk=v){T-uK?U-E5uVVn3h^NA=QxTn5C01-UIm#y3$Bt{S&qY4GW#bbxHGxm z={}(E!+cGVj_(#yz^}b=vqhJSDU`qm&qnG%r?_-CUwS~2G1GqLL&&VKzuLRabtLC2 z&(iJRXRqOvBDiroB?eRxpSDuXlUtG8ND%@<6)K~vE#f=#*syH1M7M~HzXPZLPL&*N zT3Gk1O1a>apxDZ6{NTEsLlIto^2dHro&xa6mNqgU6br?umQHKIUZ7xKiLl3P_E{yY;OlpP zHaDel!Jn+&_LEwV6p*$KP}9lY!D_xf2b2*1fbrqMgzPGg>Ca8684mR;ZM@=Aj)ufd zuO&T)vyl@@;5pqYbUW`oCfmFpbm$mPUb$6@<>+6|_?S(w>l9-P)Za}BcuLhIQZ{?6 zN&yX`j+0mPXI1M&qWXw+ktZwQN2XdGR+C8t3f+*^-@T{z-+%sO8k)f+D8Q5G6})z?MyiKunR2M@ZCc}@QRA0jh=X|KmgHuzN~Q?{!Ttj?dE(GUTRvd ztjv#$;xS)EB0QjJ76A?4Okoy^3C#C4Vd^QD{DA+yR4U1x@nBFODB^pytAQy0i}U~D zKgM5f{xcP4=2ShP(LNs8yTZg;{N~vPsSDbFw&5J4>Lv27FHHIlII0WP+fOu1$%kk2 zm_?+7>H^x8M4eNc?}9aHkGK9^o>OOh!kA{GXp=If3rdTiYD$eNk|z;=Rk0 z&FPJzJk6pT61+(N@<@;h9dK*yE$O;@9{p`vL?vIZwr;ahIm&~cr2d7o7x8nzBrs%S z4Q%Tu&2aaO3F%L7Q*v0%#=n*qUL4JwJgQ$6fN1I=xv3v6m1~+e;yX-DR=2YQV4kl_d-+7?Bz8paUYygIct6t+O9FK z;m1<>Z*}wsafl)UGJ2$h2sd#jZPvta$3F%qf0?PWIVP^*2Xoz4`QswN%~HvfZbb5Y zZv@gT)uyA*t*QLid+cs+m7x@f1bhH&O1k90j9XYEA!dne1e=29AZ)FHJX*=lhcwOD zgE`>vj*n{m_up)h>L%0o5(`znW{po8w6sAqsKig(XZs-BI&X=0x?mlHLVz)rHSeKj z9>yvW;aD(CwCy46WNP+INXuL@8f%Gs{rI~W!G;39CyM5d$V_~e+ z%BM!2L*RJrlFwyfsxruWp)~}b;-x!syb)YOZV>Z%AwZ(K2h`Yv{vqj?4TyaH>F0Ot z$cRPCPZ#_#-6!L%`jXUx9eAT63~~?Z4z}7Pk>%$#3e8|e$abExs7j1+clFOHf{>bQ zSQ;-rl1tVpeDeByiTa%VU@($KhFcv}4v8QEs*Ad2-yU&=k5DAN8Z7BNFa4JfjZl(&$|Km6@%91*js%rw1Fr;>Tqel4?Tr-yBUE zJAKKghF?v>(EE0{5LgxuAw%!`NCys%fd7IR?lEIm>g;`bJeK)_R)~UHGdFvTm16b5 zqPwfNGy?59F`nOFlY6Tl{Uc7Pbvdccq#mRryDuStQXpBm8Hz<)T&_{Mnhl#$Rrez*M=|!U)CAS>*`KyIen05lay2 z%kC%JU*Ieyp2WK$8q#0u|51g!PWA=0KlU}hZhtSA_=B8K`t}=k|52}Iq3P0zTn2dX z7{BKOModEAvI*XO=q8=9I@)lG_pMvo-ol0}_s$`c3I%^)hWVx`g8Am{dgijqOWwug858V!^`2~Y;h;=pnB|t=cRGUoJ2dWOI;_IafUZlQ1tq_ zS%eic2IlzXkh=HrAX?B9sWksXgf&9sBqH&D0ODX1A(*C`of(F7Y@j)xVUf?ZY}p=G zy;rleK=eNY(4p-i1__Cbmyzpptr(qPpku?XU#3Ip&z$I9@WpHyARj0=L!KuwHhwhW z8uX-AVcXsm>mMI7;WRSyl3(5fU=UTh!R!vf7oq8*rs5@{fnIH~b@9K zJ-!gJB~gX_c;qDY)y3zsUG}qGjv9_3jSiY`h*bfumm7q9q;vI1Um;VxHb-)MyeH8Y zDQs{iYTL$)SH6Mh&Ik_}7nuICsN-PmrT{Pf8S=C$NaKZl%w|DOr!5f7Ra-8mV61EE z_uPay9zT~;W0lEghCM66d@K1lO99F;D37ei(p(J8->&jT#h2OHiEhoD=vh?1U|=s{ zi~;|r9PGYdVG<%!Ehh(?*UUqBuP!36NNf0l`Q^l^$&G z?EdPJ=z_HQSy8Ds1}oz8sF7nGKg+T$SS@)S!_nZ!-kUi2ub66L50OnmG6q^+rp4Mc zlikJ^5WH06wA!hotqy0g{Eld7*s(e`2$T?a&*q_~mn3-iThn!qBFcsj=Qo%YKH=m` zn6^D#vxJ%6&v*TE z_y8hYuanGD4*8epuxnbZFReJg$+3b7rvYO@6iwvei*=v}p5mcux{i4|2ny)H) z!xS$XUYHP2m1aPhn`F%F{#vR&@7BCu>)vDf6`ixa0vE42)-n+gsWcMEyTS*?+AU+Xfu_XeS zXIvG1dXw&kRqa;w*T~qW&cYr$d2S|Mk3HXN3i3qR;FxS@t$>zM|AAg$!t?(dda)sVVdY zPZVJ(Hh58!3>!=$&Y9OT`Jn0`CIT_kC_eihEsc%+Fg!;hK7}^{o3_I?s6Lh1J41>L zPk;`Cj7+Sse7|fpwQcTdksd(&GX!$sPLAV6lmX}57p$W9k@#Cz1Kyc$$J~2Nv4;U_ zrw(unN$e0kNVV_$$WDQRIn6bx=^IP~!F1TkkKw#9=FTM8$Jc9 z57vl0kanc6->9eHww*L_LS&kiQx11(E&TbFV;dO)?kM#fSY})ma}@`lyoqeq{+)wd zix1UIL1>vhzNNVFh+ruM9`=nw){oR%?-V=SoOCuK^q_xfzDGuE=&m(KE%*Pxuu7jG zBPJvMdKDOW_=1`2FP0eUj?n$_hT>m9f}zovyPoQb(|X@`SYty6&2G9_hNl}9 z!M5?ep|>oc{l@zzb?KYiAp4qkz`;XUpC&e!m#4&!RR3BWaUO2)gljW~#7b&&R!Z4D z5bq})r#JsoZ?l1bUV3Y|HvCB;bSUvR8+n5hV#Hq@m0gHqg~x7HEb1_ z*$wK#6c7XhEEwYXi{b1&>@i|XrXV!^0+ z4A@Q_j3#tpW!{1aO`u#7o{+1H0mHP*o0%Aft#i3T*`~^iEXNkMtt`KL0 zNY+ZR1YnH*itPwxD4x`@BM=!}HxRN*Y%^Sk8Qtx&+Hd#40PUi@8WC&0EmIo8UF2AJ zPd(uz_+8;O;Trrom|^O0z#GAP?u)}H%B3c%^E%hRVFY6c)h!wQ5wI3yWPJ^q>D&XyJ z=yTOw1>JrN@WeHdLd;G3XQ9JZdGa3wnE(n-UGD&ZKk`A>wD(OUoy*VRg8R&@h~T~ zfXP$i{1$)P@vAC8MFs6rC)YmM)K4E!dzi>2@%Y&mf_A`z<~ufH+;}5c!ciq;S%;9D zIFk297p4i2U>=#8gAM!HCBvU4#4=olUCXk)8Mkh$uOe*3* ziT(%LXFbrPC$%@`mg1Io5S$o3WsVW&1^lvagmt?>>3H`uVX>tDHzg>V$9JODx{RF} zDr5T?5Iw-(T~x!rB0&xIClOTo|2Dv|?#GVBx7@zUDJ@4#IUMb_Qd1#*vL42e!&K5d=EtC@+H3H-Blq(sKRJ% zcLm`)MFsJ~WuGJId08}Te*=gHh(Yp!HPnaB`$1bx~#LcBx zdC1<342r)`0i;WmMWsxYm{Pn9G=wy!rF7v)cl_?U4O`I12CQs~044=H2)lb^uK(ll zK91Y$oAT-u>oxphD#eRR;Ay5D*jT*ySvC}RUrFrzN5MS?Uq8)sB2*Beo+kSFUijr6 zaKIpy;b=qW{7PZ)?H4I``4~5sIzMNnfSoZyz8Fgmrm;j7C0SghPz#>y*aJvvxm`z^ zq{b)+3_~{OOnTQq=Wmfjy8#Bv|j&uQP2y9 z;v@P^)(AKP14rEG^?&{?K>7{EFe<1yIE+AFPGU+G5}qkX77zBNzYeT^T3=l@(6nl5 zv0U74tvYMWJldT|8Q%AM*aw>;Jzi*4@s70!*bzz+1u)F!QDGV+!ymSA2Z{rJW`n9! z`4^tY6?fRHFN+M_S5ApMj&VDj&iX> z@;q$vFVm8rAB?Tye4+EX?kVqv(iRorM^fN|uj$pExRW^H&AT960>(cIrLsy7+m_-j zp(0yo39qlgwn-6Nv&>r1LVI`6XT)m?)8~Y^bIgP+N|p2v{Ii{r9^-%ZJ8>y(n8B4_7_$+nk01c zRoTGhfz4oIyVbXSojeey&?AthXI=b#3i{^e-l7%#*5x@u@PPEGpf0IjtQlObAoqMv zNx%<5A82H!rNKTOrrU$M(D%7G#$NfbX#xfgTFbnY%AdoHGnqX9(K&uQBZ5qAq<;#P zz#t+Vod?TVt=ziNstgM#XdIrE!(VtStf;VlZZGasa&cSTlz>RXPLtOG!qC9KaTiAc zd6kL#N~QEkM~&|eg|9V4kpR*Q!rhzF``iQhER&Cw;#nYu9G@D(EoRlcrrax)8z?zn zEvAzT->dfgh>1tBv!t7xwJ+>T_MV4bOEspT1d5$Q&aJTeLZ3P8hYoq0&j)uCAk+Nj zezbCxm9M5v1A}a8;lBh0kq-SSGxi^%P7cG5^(;@Q?sIkJPkutRfX3uvT6KBk?Q4cW z96^F;+uczOD+tao6LJ#EV!B`~_F78y#ly|^UsxE$NF7G-;YH^HMxceh~T zP-*BG1zAa>9cI`!L1mG=b*!RY)%1Z?V~PkEF$W# zkI4fj<_9SD4nKL-271!Hst|U<4PqDJiT+GshaKYU^ymcXPe?W>ev4)QLRW$nMjTw* ze?h7O-E)IC&p^qt!2%FF>WNPNO-??Z%pq=mCy!KDv&*ZBJ}KWr5;itS1PaiMv7pAF@QTT=sV3c%bfR(cV*v= zD_*v0jRVffuLp)HEYD_$1xoOd(?_BVjHU}}qU^9lGpuiYI$%e(>J}kK74xEyjOj04 zN%Poa?@T#%CWY0Kw>h2VG(f~{yd9kRNV%J4WIP~kLu!US^m$Ak3U@r3lZiX_oo{r= zio+&j<8Z&im$=cXE0~ z{lbC6*RM2<0468~eJiF+?;w&0E=F-8lVg`ZFPO}tF&#WxPNBq`f&1kTZz^bvOOK9A zP>pN+gwIJDgHy0s|E=hjl1v!W0{Xbo6=|Q-BV8sI#n-FNm6OjgpSp}nI3TEgS4eE3 z1K5cobLGm7FCJJNFu5+}nyp5~5C7TTaW3fCc_!_+fR(L8!U~}xxei(Xa?hV_)@+E> zkn!kCvWw`vhc(6O@BNsanxzSM&imaeNu*E{tm)kY&lk*|RgymruW=0!U`HDoW)qgy zP}RYjKSDhy4tWukc3sGkgWwWucy^hC2%ix#FHj+PJvNxl*Z1 z#A(*Q_Qirj(&*2#K4|Bhm_jPzoWXYxD_IZXt$Jb5NXKf9x^e)LOw%raE$C|5A3kWW zF?lTZqK-$;Ls|et? zslLVcj|frVaZZ=~Eq#xPD^}T~k|A<~qFm|tjzLye`o(Nv)7aOd)jDKd)|F;sz5U|~ zG(nUP#ys8i^ML#1#}ZrxlPh8X*={D+P1YUT&F?sv zpJzB6$%Wd`>+xm%P}REXnV97`V}MnO9r1H_N^H#B1{C3kiXI4Otq&G{&BVWQ_EKe~ zw1CX`4>dTH(~W&THXZZnTuGbrjG}X{<$q;I!I~UG^5{SG)JDxmf_Z)PK z@ZMii)^`H~D%sj()>qkJ-f$)Rt>ruY*5$BXMi%>DBh~DqAzY{COJ{;n#`H4)C1Dy` zC93>LAWUKqpkv~iy%JFMNwgCmBh#^$`d}u2x zAzvb70lf_x_apYQ|JwbHHuL&uP^wwIjq*YXv#*0);(0qt*l7k9ORJ=pZ#wp@`v-0r zG)#BtwDG5J6mLdUjt8(yo5@p)BUVB}rLl-3@dM^Ow{35XM24 z<)o28$jg4F49aV2M2K&baA1_l(_Osf02jHq%pA}m>E^6&my(cyA%g4r9qJr{>W*1)w6IoHVW7EQL=1&d}wa}p{G5J4Aaee$$J8WT4+%S$8}Tb~4aa94JB zfG)Ha1oTh4Y7Fy$E-LB@_>(2x+J>mkjt;{N8qtj*Ab+;kx{rdw{u=u&NtM^`3NfEnzkF5G%VxIRwQ=KfecUY$dlZtdSW9L zpIZfo9`4;qR!wtlzf@mT5|xRd+p_13XuZ42?KnQ+);R+A<0U#pvIu4_CY))ngJchNv~~qc%L|%`8ZkH}Zw1$i z=2u^;q^@LQ;oRDE+Ml#OoIXKmvz=o0CBN^VKH={Dt#Z^1>>TS9+nO=}_wEpYQ*crVRSNCN`s$*^V#|6xAN z18*BLn#Nz{))7%5*|MqSNq>tsbmt+iEl$-DcfF4hfynH$xH`X#6%B8HrJPzkbY{87i7IMhTe{8KI3Rzh0a`QuP=JH6H= zt&H^%)a3tTqBhbLb7uE~7`S)`&)$MrC3-YCPe{k{-;)!dhOT<{;$QU_|LN>|Pm$KQ z!=|#KKaucLEnISnL61=&4_!AcHs2tt&b}!U)sERi0| z??4cKvZc4=>`Jq4>dGx}DQldpqnX!G7G(%bTI?_I*?nB)Ki7IUxA;ymTQLQF`%xWt zH_mbO3r-Pq7L|{}XFw1XLNM*hM|fX8$CxW6{6>!I}pMbR;?% zxH)Er8$YJ@GQFN}Ja(DAs4~DJuD4)p(Dm9{(av#$i;7J=JTyM6|NP*ealNRJUk=FM ztld!1I7m4&h(zI(<(`y0*NP%36cE#6-0H%=l>sF?WZ|+>V~oUe`y01~BY}v^SR6Ys zwWg8YdOD-;{4t2QsK-iQ@9xYKaiYwfkho3U+e?RAkEW`Ri0PNmeSkmK1Tm0KmbH2Z zT8Z*#ztf?rEr}jp5wv!D{m`B5aZ>2TK@d!3g&I>FpTrM`uRm+;ea;N|yHK9+s)^Yh ziuMKR!E|^c%-m3bq;KO_M4tvHg#E3$BTFmO($`QwCXHY#FfYK50-d{wu=%9TWxk4r(zqa5r$2nog@3sdC@(vp^`n zXC7hnyyEnydc5XkM1KNR((Rzs+s9MZ*pFC|-T*)5ihMh0e#6tlXEHb&Ro^BRfjywt zb}j8AiSMwrD9F}l{G6<+$-Zlv;-ey?F1~C>HSlj0^lGYjy{wf0HPreT+u%t5c%`kV zoc+;x$bdYB#sM=o$lWI%ArunuR$`$c@bZ?8w|}n2$Mo%s4EgKU?5pk0>XP8#y^}AP z;AXi^Yp3*yl7KJbR_!!w-6x-7H@G($-U-RX3QH+2ne@6=pW1=!FuwqHzMc~oaAD^ z+#KO}@!4dV{2Qp>f!Ov<|_FHYJBZQ z!zH^Kpj9uwV0&GV45$CjpDW7>I$^2t>}*f~ZuE_#d_NZ{;r^-@@wCcuonm9_Gcb=6 zLZ+lXwLXpk6jvWl-EHzUF`L@N76kQ!qM~gOroDi|p> zS*K5}Q?nC+3@>mhCW=I>`*XN1nMbaKHOzf!2SUD#{Zd)^Qu((_7_&`3oaO+-d#M+3 z{o>FQ(~Lu=jy))ae7`>w^Fs9Y&)+*#A1!n?=O5a( zkzaYXi`To=7yVl3f9j8NBpt}&1m=Dh@Wod#*GCL7d4-zQ3iKB*X4pFQuEH*aVPs%O z0hvp^<0tcoYaLT&5y~T*l%iPO%D70V^tf`ZI}}43uo8m8c7{ZTOfSN#L`ux?FnsF3 zRU=RNw9=EADdSvG@lmCG$Wj+OQ89IZCm-sWK}5cr z+n{UBBVu_YycxxPByaN!j*4-%%$G)htt#VkHnI@F?@>Nw@HEqjHD4$qO+mzAm0}f1 z1RD*)%S6QLj(wCigsbPYS_z2BABZ1Q0&=|vrMUm$)fYj$koY&voHT(epMBY>>C!z( z%_2BU%ap z(%KFpidTmVGJ68ig7NUwQIsch7Hk<_Xf%Eb`%=9`au~^G{()+f9Kyk5 z0f?|5B({(-oWTkVOw4}acYF6{Ox1ypU1#shA(Ep~0is{!r<(^Lv19IvyQowZv=Hl4 zgTj3oCT0TT>#XLjfQdJ+(@{BvW=AT1lb?Rix^|W!x)KtrbRrp4%HUQqhz>hL;=dLQ z{Dw5nt=?APHzlBN<_`ZwdA9o>U=cMG@JVBg@Zif<7HJ^e=0i*LlZW1`2*=mGlzuAu zkIIz~NKtpbi!~!iY|A-^)1lU~1em=LCVYBN@`Po2Mf7+CswepNp`H)W`UmK~=SjvPq;5?}0ABA%?-` z8t==`W2KQgc!aHrY&p*Sf-B2Rwx8-;^1XIoaLF3}SGi8Q$lnZYbkdwul<{1$kM#^W z4F!VFcrY>`iOPNOx$mt}NQb~ZKvMgef)>KlD3^2diEV0dj1?2CskaD>imf4s!RWa^ zuA(@NYW|9K$w-CTq#yifDTM=gi#24*RiwqPd5C1;q6i$jOb4IRI2o0=@`*`ZhYWy? ze1(Z}WpOZ%K)WP7P7UpkRf9_u?oA3zXzxM6uZ_T};*LGEiaJRPg@K7Tt&~=doQP6= zlpBq0S-eM-o)K(t+~2z4uqLu97^SMj`ESjyh;V%iH7WYU=i|$@DFH^otHQn6Y4W&r zOaA^M$pwj%B8l~My0GNHz<)AA13<+E=7tdk&cm6*4_B4LJn@dEv|OVqn1|R^_R|`% z9XOM=ZNM?CpK6Q0Uj@vUWmZ3E>(U1So_C$_rf=QVmQ_6FKcr7Osw5=6UTs#uAb z@^>IK4%uOe94>Z;lruT>pr}sgPu*^Gdf8oY!zRC|+K)Z$jI(ZxO-0NSILGMIa?IUT z$bX1a9xDJY*_uP<3?0%O_b(b5TK8p~kQB(FH-)-%=eXyt{LO$>Ziq!+03+UU4r0ag z9%?{6q%I?NixvEM;2;u~b}?WGwt;m{`vY(BSd>!8b1z3M87u)svR?ohyT&5E?hZar zBjcL|TSMX9!x}!PWsZw}k}UF*_IN>+d~1&}Hw6>ko6?MR37gq<2MZW~{F@)EeSckF z&mx4ge4sTW`3qD`_f$jy&nl!GPdOux?C)Y@-@J?82Y)s_f4nRSbArNyDB`(D-0y?& zNVw{Ch!=NX=cM;c2|qQspEU>l%V&nD2Gv>77r;~$ICx;Auq* z9@a3pNH_HA-R^v^1ILuHav$YVr!OUb^50!bBYZ%jY0a}_M0AUW-t7?51R2N2-k1X4 zp-fUE)hM?L|Jvr)T2sET@-Z4IVc&|(`Yay85#tIRVV&{~#i7xw*8%vpu=ccnVD{3H4Xs1$BT zQrP}w#-)SA33+8$gK_M~feYWb**9fY2cXw9R=5X-OG zM0ZP}HKMM0ac>D;cO~oCRIML?aiDj&Xy3m1$M#1aoK3X|XS~CwfWu<}p~0pEggpfC zB&8D7M0QeUF=ABsTXh35_9xlA+AauoyH04SJ2mMPO0RwHFP>rnX)kStoyW?qkdru)#MBkl9=$_=TBa(Cq^M<`)J_eW_BpdT3M z7LI=k8S^R->HRtU;&}^&BR~9!SiGhJZ5kRA)OwxeytRG)Nt}QN!$-m{sDPe7T;f5M zp#IoD?b*)(Qs&Obi4(&Zyf$`n;txY>OZ9NriBdf~w zC=zv+&rQF~PR$dP4eJT(OJOq*{jGeXe1Ai{FVG25Ku@k%Np*y?Q#Xznk{zq-X@MyG;SQ*?)4XGXwCW; zyS@9;3{GLYW3oVE8>T*xM;2jssdyBMzJ2jK+yl=B%GV^kkeUNZGd2L{oJ|*l+FC_x zo1Iz3>t#hESyQJ_^`l5fknT5?3q_S8?c(1?`wQ?pv(dhXY*j>H?1mr7pe!8*JwGO_Mz}ey zYwpUAuQ%XBwj#Z11t$Zj=jCIZUU3tG2j{~5r?Aci!>Z2#3S!{<3?#y1?<=*(HpHOmg)GKU)}atg}e zOlPXXh>ypOBYUL23M4+kd{^Qch&Htj;TM7s90$wXi;q~5XjIXXd>02VXD43KZtnK4 zT&FMxV$i7ti2w%c7UKIaD{4iiJ@M@eKONP%JOaPpLjoS@yEHBCGk`u6+vH8o#@#!{ zH*D=xI~8BBO=ow-$yg+Du%`Uqw`i^T^Pi3AvT@lKZmJr_P77)`^4>biB-?tF4(l{` zNkB~~)@}xuruZ$D&`zaEWB81D9JBP%mAP%KJwzmsN)#EClK=`KH@OFVjW5^{(()1D1z{z}NsQxuxT| zOIufu{dqnQ@`%X;K4{zg)Q#lX)56yeXz_k>Khs-|*`6Kzggd&gRSms_T1u_o=`2Rk z49|lJjtR+44^S2w6;3jY~*7b$alY2`LWGMR!MF=4hODq#8$D04{NO%x3cLZW% z5^4HtByHSxzyFbL$$A$1G~|Q7f~t_|cRc|?RR*%0K5Svm@Hl0tivj12AKcH466WW3 z4Cf(nNHS|m7vSXnpi2*~hTA|Pp6!SD2k{?!Xs?j?nVYuS=9khwGJx6!Y2QdZMysIc zSPcEn9`de5)G51bK`U*H*yHi05EylZU3Y(F(}h#3AGKIkzBPto8sZ(?^?LlsU@hKm z3KsFHnNX1;bS}hC36K*GcOWD~@a4fhalQ*{@a6D1&?~zDaio~>UEaF*vkp1~*dz18 zqPf@e{~9K;1NzXQ-Su}pig&D8EAr`lWVH-^n}frr#+OzE4z3QV!=Wvpv3(pjq5Byt za@5j%p+^36u>msuZwb1fB>kjhUBVAwB&R8OZ)`EE6cSdXcpWeJ_rRbzjvNB|zfOq_ zHhET1CwuVxgssy$9Mp;3)E+8FA0InpzoC2J|JR-XCy*WVYmr-vz)VPG>8-W0)xKZ| zB#o?ChF#PhITp8RpS6I9@T%@TtUnsl@`LY)+lsx^r6CVvbDko+yAf5it7_mOb_B0z z>Y)0Fxbd0LWHsSe|L}83qXjScxDgezlnRtlCV92s24Ct=6XEOUHiDr zuVk0X?Y#RNW0$wdj=Tgg9vrXZY1sKn7jt+M-1xJrcQMz15tnSJGfCfS2 z#|^biFN8~&FpxV-=4c`@u2Nr#N@U$uE;7C+}BgX zoy^Vk!R$$JC(50uYz3uwnTkSEftPjkVm?Zj^t$SQyz;*MW@-Se6WF*6T1R73_(jln zwSPxAq+@wz*kd@j&75RF>=A&kg!RRnn&^?IF)EMOKkZHMdAxiK5aw^^BN~o~A6@?W zN9CQ~ws8M0E%U74`(^Z(Jmne-HMvb59o@6m{NwX&PC$)CGg51E3C(}sH`~rFv`2F+ zx=>Z%YBr)8iBtxwFUSV1xDe}AgEmMZMM5tYm1Kaoky1^K#^@ps?iG}LwwVgD8Z!J@ zvzDZ4ts$vs>pC%ul^x(rtic_C(Fw+&KbJy#^^*kjMQh?3RKa%$GX3Y3;FU3@FJ%k4 zhy!Lsw9K*>J~`MqFGSv{iJ-Y*p<)}hYZ0WHrT^DAk_O-@s@f%Z-`EtwGG4zc+tngq zl%`}~nR+}0{}yi6_5g+mEeV(-FUfJVBPs>kK~=q%`!iTKKJZyBup|$!onUVn?$qH8 z7xn|e*aKsf{|WO+IzsMEpK0V3N5xSs3iwE?_e(%as&iJ#yOTG@G@W*d`%7i!oeRsm z%XdrsbP$KGvT41Ec1Pnbhdrf!Zd90UBLbHeC}gn2PwIo|12Vn&)U?$T`+RS5n`Q)U zRK*kj>*}1FV~lcc%kVMk2hty&KT#iqABc`;%b~CQL9*b36AMtwX{bkoY$DS{2WFop z#{yU)=_o$1q7dnJX3f7-2ISn{B$Hg@>q1hX0IUKeHV*iw{nsCk=qG{6)|pLNI_{%4 zy}O^n%rl`*0Yy2i<7KT7mAleYu@X5d&OJBIN~yBUkPwaRgRRnU;IYXNfE#ktKh2V9 zEhXgmm3j&>9y)I71xZ@~TH-e)Se09Znf$Mxx}#16Frt~g;vb*ZOynZq+k1f1Ni)OlQxJIGApd2#$zlQ% zuj6C3glJiPL1iyMsoJ~jj;JPe`_}j%RkDsIX$G$D0b(Xw}l>VCkZB z()9TGd*fYpjeae+%WUvVsMtmX>vaoKh+4gs=0@eJC4}!TyEoD(T*x-twhwF~r;=

ZoOvOGF7R>Jd=qD~xr29U&{A=i zwu)?zw4V|CkGuTIV9rNE8A!XFrP&c&XkGD!@a1=F52>^q2%2alhJC}{wR^}=0dW#^p83r1Gy2EAK_3m8rQ^IsRfOMYfXS(njV5*W4;R0-r;%l%?dMXagL3~v zc=^l7$%M@c;8&2r`n$~p)rJvih0pDAf|9^wyey=~J~69YWkYTQ^j9tLM~~&7KtoVq zUdpF&-{|_4=6$%9o?=O+jt*r6ifeDhqvH&HL`Esp?5<@sGr?p6=~R2E^elH5$#K5G zQBfN{mq+?+Cshbt&BT+|_J*yg~TeEO5qYTtzxDI(ZdqvV#J+t|6>$u{t#Pz8iQ8It1j3X?8!SeIX8sn9(tGNRg0OO zVhuqjQ0lt}!N>kRwhT1~Y9n1q)w_RwjtU5m2b#@U>G6mJ4;Jh=newj3HE=S4=sxuH zE?9!dCN9L!U9KUPQF%7MWliov5Vn2DO{;1F{VA(4H(OLgRoJigEY;s-P7$$L^GFysg>6l$A%cN=8{<@JSV;!(P78WB|LiZHSN58I918%=zQpUBn1ViBOsf z1*Uj=?>)r@=0N5^^oV>d=%rji)Uad9;vi6bt7U#lfM7n!pyn4^DHw#S`?maJ77u@~ z7~#-bqm@5G{7<;NzlZ7cw`bFAQ}APt$-hP%2IA{iS^K^XxFv$^MVNC^fx}SyyJ&WV zAAszuu|c2LDm-Q19?VRJ{1u*Y-?4~IxXP@x%(SrX7JyCIYNLNjnBG!Y{TA%96YI^1)&4&?PeeTJUUX~FiNuV5sXF`uT* z;yu1jm`((3PHR4)^;QLo0l(F%;%Dx~zy3tVU8;AYgjgc(1{30KlA2sB$mK6gGsVc?sHH0Jo9W>b{3omU{0tEd<-3bOk zGE`#t)snd4*G^w2vNn6HfmT9DoUhM5V`3kPR4f;OGs)w4K%nU#ulJ}HF-vRO@oc|7 ze^V*zS06_`xa^?;i_E341Y&ZC7c zeigJzOGO$-iEKwmq(`V9_|Qh9{*vVVQ1vC@=hE^#Pbl!IDU6bjL11ftViWm>>LUq% zVL-r}aPP!o)mEVV+yk7osD#9F;Hst{cYDfTuv&_P4YA_)qfGNJ#$rAgO9G4If`HO* zy+$t9IVF7-+3#g)DEPJ!>a-Q|U8b=1Nq~_oOF$Z`kjtaOz<`=!hD;%~MQWRfmVTj2`+rk)(na40ygN^l0SKtz@RHZqv?ME2V#2w*I z@}pHuq11JP5t2b+lnC0C^B><^cKA_~{UKm_zk$b)KWHKaEhVc=eSU>+-E~z%2CQ|w zEskPodBf4U@b)lA++iAL?v#svPEq+`VrM4W02HhkA}R(xj}&(r{IWDZQeuMplHDkd z5|BM_4aZCw4_cC?`^UNzFI@li)0$jvMqdanx0htX z+p9i1VMS2%L2En4#2A0EcVsXp28U&xd`vDS;pGyjU9Cw{!jt~UHYrz?BuFy6*?(IJ zfzsJ#@`-t}z9K9YlFg}lWZm9DS(2^?OLHIe#Q=N=sFw;LhfVOSb|&YLoYqQoUS&Z~ z-1yco4!KkkVJ3qLwlkP1R2l52HIB9fJFnoy%Nyx)#|luU@vO&N#}mlF2%**C1)Gv< zShq6;%I-U#)K3!IFQo7)YeUlq3fmW;E6nIu#S|0Ywe^Lf;D*5<1G#mNdIArbzZRl34#d0I9a9?V>&Gl3U2G>AzdG#;~ zw|fYcE^6mjN^c%6>}0<9l%WA;yQZi^!bT7+O4OAs1gk=esw?PI@u9|X#9DNHk;mE! z@mDcS_a*4MsD8pEBroA4;n-!VT3c#;4Rp$sK+d;bP%V|TO$B6u3K#$V<0sgz+=O)~ zyF}S13kGS6rGam`%clmP`a`FI~PkR)uMn$m+lLSQ`9kAQ#q&vZCqcPqMXGS>cm z-HG+4H(S>5;My2q!vQ7(DayY9ivz6)+q-`TXO}FTk}~)^vQX{i7S1ww$)SVvG>=#S zv->AGH5PiWEzd&P#EllX+{CTk)b6m$k)2k!E)#+h%GP)CN-K5-*pIjACxqNQgrQlCf=O?e0S$hN*NesUKLa0*&)WIw)*2uWsZ;qsC`LQU za~7Sym+&$^AaMmYX@SC40pdHyA4u@ue>xt#x17iZ7483xMtz#(w)u?kIe?Iv8>!LF zN^<}VWsjLeLN6m%IJL-}>LP#dF8F=*pz#%@UY?HkTNMf*lcij@w5fQEwX_qBrAeCS zui9#r!#8^Wv7ESGpePI$br*-M#~0SdceSTqdrXBw;SsMls}0`=buuM#eZxTt6q{ESj@f=eOoQtM0FpJ=Gyb;W0BB5M ze#w;i*;t80j~nl^zi4{B6VY>O>2zo7XUF}W95hKqsoYH~C$5wuy+OFkH88hYHDPMN z3HWxl$;&)60|@2bsdB$s${CApw2OesaqTl?cR>`ohW}E-cxP>`!Pjv`jgiP}wxGq~ zP>Cg&3?`dHMLd#OLSld{kBy!ux*67_JABV!mEcy?H z+w;?sLQX((@*-?<%g7|pH5URNfFm|PE&U0N0>h-%RQzTyFS~|OQF($+XW)I{JAU^!#!;1{~h)znC!Ve3rTk;MKL*p}3MNVqFCPdAgx0?)fvRt^OTJg`silS81oXG+A`RE_PtJb(UOd>rb^o8RZz15#Vk_2fsJ|MwCl8fr zjdJ*cjOQ?p0z*m)NuWbLp9o~1AB0DLhvpFR@U!DBD`L*0ojc8bFGNDu{If_UPXYQk z^4`6^a;LDUEibWE+r`9G7?8KzC|QwsF8pFiGy~X98YyXEi%ha@Ri?UPfdJ0H<`lYB zol0(=JSQou4WMc_*Yo&_Q~Y3CrpgbNx%sEP^zr_pu#Epp+4jA$6x3)7Vw+_4f0Dk; zSJXUE6SGD96a^!pRcOxb-fkV(4-Oun3xX?!nMve$gI0|VqD*lg73HpXRC~RHnwAGU zfLT7#224b8-d;tXarl2;wJ3Uio>pvv8Z{C$E$01_pc)IkKDYzgpG=^If&%v2Y`u9X zjb2;vOlXM@q_^NVV0)F_F~Tr^a~RAd6!($ErR!k)KI{_jR}=9xR4vs`{Wtj zuDMpo^x848L?c_OtwAg^%}(mCaqW-?5Q7>a7SK4g7sUUfY>}DPrMZl7++HTFMe&j> z|79Not-Ve5YDvxZ%2#8xp0B2bxUjsK??^TsXGizc|1}>0)4Km0-KS0W_B)Dr#P{O< zs^LvclF}&SbylT)bo^Qdn91O%Hq}T52Oc)8<6w@;9(t?$N%!0q&t{LYe+m5rdDUqd zqK@-h7?WE)DX(2!2GFmsY5$rHS=cGfa8|eA$_LH;KQaeNUcvP`xLNscRh1v|Us_mkkbb}#swg4~m~Tk&x6f}H++QTr7`4GVJI z<0v<9HbuLWyax>X>bEHF>i*an{0FpPSPXgy2B zZ}ZKmvWEuDx z^PsHa{aZw8m}6qy{eWh^tM&3V!B7morX3s)YP(XBHJ}c$@pKyG?yJO;ty>k3nu$5r zdda5SXzt=3^oSF40Fl{t(my^O$Kv<4NsB$0NXp!^7Hm9z>cHt5$(DLm1*8`;jdMl4 zKM7b~oGl~^mrF)hG-!yCSR!HHSnV)8k&v0Zpgi|`<7B-+sNzx|=_Px^QXHG|EhBx+pa$DPL z3+LazEgNd$r_NPHAQmnwU>smV|VJOov;w!L?`h9jhm&r zR|$@ZqhSo50j{8J-A>qfOCntvJF&FX@1;5byW7vr-&3|$LFF|#7OvCD2S1U8WPtq!x9P&Fgp|oee|N$+SV1 zBpIc&?%=p)_{s&dWxEQ!gF3ZvG*K`gSTJ}O0iaj%(x-K;Z&;rnDj&BB^=p5M3Hi9b z9D|lE$mEun0~hv=IjmF5hI8sLZ(}CqG(0X>2SE%pO;EU#YmE)8;Fh=T>yn4=(0Ppk z8}zrzslGiVJ+pIFc>Q(!YSAcqKyW${Au#e)sXj5yWYJ>3l=db2Rj-KQ{)Ga)ZJzrW zYYz#Vlgz#=gTdTSrZfOh^25)bUT8`~OR1<_tEzQ*OJf1i|Fma#{nUOpwG_YLqD{0Yc3$^kSX7Mt~7W4icRBy7kR zHzOrwWhW^8-;E_g9 zGTTpl5=W%23SgJe%-9}>cr`7(SBtEKDx^H9Gwv&FDR#rg%j|E&g8?HfQTTJz*;}K@ zx>a@`q7Usa8wL@vPMRwX5Ya<9fMo&eZieB9V1TztglrE6?w=TegiOfbKeTi=citzp zz#w_xsxw^`&aWQDQo&Ir&JdgVo-fT9>v#VWr*LK}_?c#lioa=l|IV-1>D~j->t6;M z)X)Vx7?B)#7c`52@nzp#oguSr42D^y8N!|@84aG5JGb*EpI=qyPUFZxtmzkesvfIH zn&D(I#j^u?^9|scL}otxx`ycSi6#y3Ns5}WQ{Jd;F70XO(8YCy6_~a*bgPtgI1 zI?X9cA=#bB^D%Z?8dR_&{Gz8{j?$lb`vDDJ)p47kn)NzP*Gry>Q{|elJ{AGqXampC z2l>F|7!Y}}8&Hw>Px*DZ?0Ft^g-L6cL@EvZR&ROFfh$D5?%(Q^1gDsh4!u8Lv!qI~NSUKi%#E)2%v;U4IZ@ zNLed+YI3+erachPhB^``8N7JO2o;o;Sfe~loTizlJq5I z9yFmV+KUst%_bOs6ev5o%ES}>T(NvsK@2YWjha*(i-vA}IfYk-xCU1^}rH8Ae!zLBBHW%zs*rglJ#3s`K_YC001ZW4mcD%z;HUKQWBz3C}<{_Q`-G zIS(uD!Oyt5$7B93mC%P&dl0Zq*U0RNHVPFn{SAVU`aHs?7IVUAQRln=ZzklzLy(Mg zs3A;Qf)pIW)1_UN8>inaK_G@j5a}+cq@>mI2&{>hVjH4kZk=aQgQnJRKVjyJiup-L zWm@!`4Yb6if=_Mye?=qF9`HOwBE(0Eb5I}(wwLvE|6wUVW7N{&0;6O|sb0$mgHnw^ z42nh64$jbL;fg2}O>91M4^-|sKt?nCPD5e_aS&a0$akO{dRe{f|HOfR!i(TML>wUr zR2IC?^E_E{V=&1Q#OG>kU-8ly$2sN^C7hs@hvwD6?63!%1XSQa;INOczQkjhyV`H= zSG+?5dLPSGEsSO$fkkef#Jhc@ccYb@@}r!l1bPp3NnT>!DWh8ett?636QJI^lc+Gw@J`5L+=NESyIOy?2MtBhF3olN+0 zUvmu<@+cSWSG`Rljx~^@7AViKoQ=1;^ZM}9Q-)O?p;PWe1;tb~Gla+X8!lj+_#mJW zxz*3})f&msCC^1TW`^d^W3ubK3%0%WS4S}21^>BXrN%KQhpVu@S;k+~UX`LBzG#ZQ zQ0aKADIGXY%UyoDCbg$+T=?qskvkFpE2o^Kck2Sy`lHt{A0*(RfS1c$j;{y#yC(iK zzt+1mXqs1uC_j|0RagFY-T~E&B{_cOlaEEc2dv)F5LuR|Vx!Sr&g(CzuO?)Zt$@-% z%%>D*=OcQJ5y_9epJQ-$avnG8b-kS|`16`QQJ{t9NR!zy_dxea&Qs(|-+qe5K;109 zz4a8TlsHCE8R*QXnaNb}P8t&X;)SaRb$oro;c$;9IXf53Q~|PtQGX%s*T+OG(z$3VJivB zs$vKgi@l~(`Au6ZzlG0H!K)I5`P>+=i>?IT{suM-Um(s(e}81CZyNEPU5*76q%RDUH7rl$d97ZuT=P&JVj)CIDa z4R}v#?g(A|bi726eZ873kQ9K5@u{8-Ne9QlxVae~^5nv|p+cTBuUnQuPQJi;Vrigr zb|UNZR4v0Lfx|%@-VZ}T0y_tg#eY+DH6wl_9fxd_%OQr^Y8Ic8^b`otv5y1 zIY1E*pG|i&X|S{Pki`Hkzz~*;nA(66ro=UdCvSjV1(a(2>~@Ler(N`ugx*Yjhos~G zwKjK1YRMWXm+Lt-!52%5S@r>n?@JY2OnU zrU~I;%S24lLhLFFZ0+6hjKYKTlbjSB&%J@P`35OA%iXIflzN(?3p$y$0AGp-P0}&UPU0)JH_+kZ^+MrPjTY zEJmdU&5{Ay$~dgk7*evZlHhyaC_4Y4%$ zf<%ZqxQCId;nP!8tn#4w_CX)gT+>Tf0~8k5(pM)axL|Q-Pqc3TtXu96WYLJM0br-5 z=ApMR=KCOUcrGs6Q)3(OHIg4z0rm&@UudeH03iW&UL+mc2deGb7GXN95}ZV|6hD$b z-BVMiX;!-!K>j<V2OO z#AmKffH1C=o@ttRefg}gU1L8YH7;$BW( zVAnK1pqCB<+U{b!TKCQC+f3z;N@o z*Tk<5YapHGXfSA7A@8!kd&~Fi)>WbgplIDs$x{VJ8-@-u0FER$K+aLD*14;=ee)1b8%Z+?J)Pm4h}^vA9cY|29k@d862h~9SPj#6DjsXv$jxW4-G`- zG4Xi_ZxtYckE;I<`xR_Bjw2tPF0Hd(;y}n?@dNjgwy8*n*rWsN(u=3KeIYY7@$JE%Q~P|34qN5H;Qm703};nW6~C8C=w{uP@hVxuM#9eT zQa^59kg2Tz@Zcdfq*!vtbmH(1E3i~a8ZOv@lh5=8z6?hu4Wo2pYlH}javFkhoY))?;{C)N6A&irGF?>1j{4C z&nq!bQ84xLzK=f%GK#NyaF)t9zU^3I$!$d%f!$Is5m`wBT-nxYRfr%LBb)4I#iN4E z#|ptdg_HNt&C4<$cvbpIH`)UOC^qw5w%30vyN5LG$qhqwpdNlbx%_!>TZ#2Zw_3`2pXK#h^kG% zM2B+9B#-9p@M3RXrTcf@bau|4nBSYNW+trZx3rGH*p+X^YTuG^6+Kf*hiI{vkl#pNe0Ow3n3wP~667=u z54b?@w@TdO6cHSq)33Hg!4zC!rgBFmSjh+5F#^eSW%Z!tPe4ok61=}84=mjjt#&pbW zY|eI*>xVc91;<|z+qF_6Zat}gKC`L7F#)AsUK;HDh=T|+;eMjNvw;q7g3wKJi0cmU}QT2F;0hK;a3XG z#Z$DL2F4n!uYmnlg1=;j|txyhT?c#PeF?A(jdp0CBsk< zJ}D8a>aEPLdlQ7(=P8o08J8ChK9H|Ul~J;a77OpDe4Z3rDi7KF`3+A~$NAlN_NDBI z3TX3vP^Sf)hE6ow?ECxmrs1{(=7#U4D?y~1Uoj+Az~HLa7O_yzZOg@afs?a>>h-=o z-*sr4QPC*_bF&c>!2DM+ty)RkBCR(Od{8Jlu0Kc-+oQnT*xER%Grm&~6rAdb8@KX1 zGF>^dbT1f6N!3(QLM4MHqOKm<#Dxh#`nZZpzxSeH8y6n(Z7@q&&)I>X;JoIw-GHYg z2D~-+s1tG0!PNTou=<-@x{%OVCr0S_ps&$7k)`7+7M=@mzWbxXmtXbUwIWW!+DlgA zJV!2Xg4n!|^SD3sF762c(~K_$dWmpCGHM6i6ebW&dcXCVkmW!%>!u>-+yvQoM8yXH;tG%xUft z*&QG%n9-L8t-8Xd;nwPv)ep|{>MVT`|6%f`O9${Sia1kX!nD19`KBN^8Ewxr2L+O9 z0(&(3k)&BAfAJ9`R0k^8J(8UT$}@T3`>Sv(M*y0?QZmF(WqOBCy?JGbejmBFHt^&x=>=KSo=fpo z(Fx67Izy0THK0^(Tp}*ih#prR2;S?l=et8^Q&;nKiYv$SF8RS%hR#fobUPBAs6#An`1z&bD%bc~nI#r0no9cmWI`(e?{ zxVs0EKr+`HoYf$J(=Q(U92q56U5V#krBQn`siH_QIubjjGY(#J;#JxJ<`9hSiYuMs zSOBMRYt3O=h&E*hLc)>uz%{1w{JIags|zRye=-a-@@87g45s-w&o!Pu_>ub%McF08 z0*4pG+~v}!ZVIecXQw0Gn9-$ftP*Q}tEH44?wNm|NgR^Q1R8&cBr;FrHm#heL|;+bL73534=<+~vXQXcQRo&L!FW zfxCK+;x$D)e?a*b=4)J?UR)b;vZm;%O9Vc`s3=DDzq*=~2O%PPgzMKQdr z$|d}jyaw}bbdyK!4f}7mFK5O3S@q4_4lxfr+9jZCKdE7H4^Z;nR9Ksx;LTNhMAk+& zQwTYNb}&?mjhhxv`82LK4Qg8kDGWkpP(IM><1e4ZNwE9Q&UhT4Khjuc+L_z7f41N7ig=JAfLT(dQus#|~@hwD-80D&m$ zPUnwzDj6~dS`+)d2?in^VX}O8e!AP)z!(j=-5k?t3ATFwy!{>zVzM9+h}}Fhr26js zq#BbV+w{cPoUUQ;BDs_5>BuaAUWBgVwb#btpELnjW#R~02caBkcLJC6b3f3^2$SM@(~p7ya((I86{ozp1P&mZKMnFVG{dODSJlF2p*9<H)~&5NeD~&>Z+g>g)aFCL4I>b{Vru3?(HHm#PswF+kYCtsQ#~{|K@A==KzNP1HQS9MA&+m;hH@siIjY7Q9Td)yYrByoL!Kj1S z;_UTxVi8KW-3qHJ*H)7Pn?xT~CZ8q=A>&peAt8Wn=yaNPCsOP>d{ZU%+ZTgVCpZXI zk_LrdaCV3i?*^=uRoxdiLSh*g2YRjz1gD#2w`Im2egHVsuH75+m|eey z`ZQFHZ@lRKegAG5jJmp1Il4c1ybtyrlI59C1X^0H6>Q%83#_3 zzQCEo!SpjhQj{(FQ4y>?3OIpM;3PyoMP)L+@|W6)6bpG z^10p4+3ZiVs8&?IT}69Z^2}<0gMH4xuD?Wi=xq~OvEL7GV2=oS!|f@z$e}v?gI~2l z4!ZJYM%p}Ole_2a=zn2{v}TFA*;Qlhb4n;g3;q7 z&ytu~@<>-@ghLn1l6cYjnY(E#`b-a|bwgH&ct6h>eI%b}ITftB4tfAZU_?R*R`&!t zzEw^2C$elfZ_7fGS9hcFu(f_q;{kAIud+NNmuL?`HpdNP^*T+`hF=H96V+S_&*`74B^Tp4A>sY_?rhzu(`NhJHQE`S=X)hN$4`vP~6#XvS$hPESC z{SdxgRg_ur#Q6Q0f{ui2NeYF}9U}>H%zwGU&qC#vE+o;R#fA&(ONl)gfekJsp8xx< zJ1tRpYo($d0Wl;^v+b{n-P+sn(K~c!JaSyG`>7x<7C5harIP4IPdi(lrf(Cd?OzY z;uSs(i!dJtXc+jTW$JE6-n{xh*0W7#{7asHVZVzdS|IXKZ&_UL z`N8(W8Z17;dmfw%-^mabdw(6VTGAihM7EP1qi=bD$Tjl=30(ACH+8V)_4meK-fO+Y zOsijKIRkxaz1*{bW=(E+X(uX<=`Zc*GmYGdVe8`;tX6M)Puc9iuFh3K?cAeJ~(fFQ&FCfdIE8}>sQ21s|fz~m*rltJFYfmB1hk2bR z!HNCz7QmH!8%U3qQOZIy3yjk-KvcO9W?NX(SE#Cy`ux|243w%)TS3j2C3Dq9n+>fU z?}j$HeuT1c(C?dhf?fDA4Wd6D^GgL>$pzSvTWIkdbSG{MmYjbf73h92V^0jE1v^}{ zR~t=emAZ0^;Xq$fJ6tRgKp$3pyWKFXj7 zcm6kDZ_@EfAf8qA!i**z)+4NtO44DXXF?BL8Z zw+4Q}DdE*vA=HdRg;fqd8M4oU%bX8iD~Xl(iRLMC9?m5=Kl*Y$yCnb7O(4;Ijk=%( zMQbhfZLLDmK? zf-AE_QB{#?Ky~@fzoP%ZfO>w~FBAApY~pbl!t}T)Q&yJZn%?4;vWt$M~FVeL0u#VxI-lJD6g7=0hV{FJSet2+zy0N6lWDmJmOauqj0Xme!nNm7uEs4wXdk!*w$Le{Y`-2Rc%;_q5fLvzcnmpeToi* z^19QtEAf3*?thQn`CVWv^#rRM<407k;my0R7e=!gR>`2=Fj% zez&S{B0G9-_`9$!bB0{MzIzXI$i%5mUY;CD`vdIC@$0xw^V z75EL$jXf`h_DRDj10BXge_`SNA>p{@Ku`ZcZ@;cRf+6>@x{7Y48sgi^N z+`EDn{teX>P8~kV_K#llJ3y4_<{{d6!xF*@q}xb=1`nx=2<}50 z+X;0{Z8vuUAKk5B9uM#h3eNzk0kn=(xq90X_MMZ)YS%NrE}sGCZXPz zu!97>DbtrJMaHv5;7#+sJ<8?|69|LP%l(>(u98daCFxNV3S*BgLIQf~h6Ygty8>*b z!E3-Vz$84ZGA1ZNFeWO?CD#V^)b?$=O?Ba@`!b%@1OsdcaNpd7YE>s0iDv)65DZX= zzu@1lJAaibDa5$$H33O)4qsBeUNzLYgL2bEU6<*86B{vm{h?G!D;6xEHpIknjpBio7{3vl4XV2tOz&PkT1a~7(Z*dNicL_`iv;t^JQVFQ}TeW5C zi>AzfAlH4C!1c3&d>J((8OD~eADa;Qm>SXmWf6#&7Cv0(9GE=>d=?^pF1Mz)Ix$JK z(YvGqOkgDKz(Sz4@r$d8s+D5_gO^I_<%myz!J zb$|k)gI6DnxwlGt`jX7>mweY^Kid!q7g2NlPb^A9?x1<0ZcAYjo()En6q*c?iOkcp zYWVKf3aeI#fMPHE97K$yc~iT3SgN^{(AI1e$;>8Dh}*4W<#^eL6!??n0&Ypkh&ZDA z`pTr<*%|HUwy&r{JH-DzW*uK76A!c3fV)oPgf=<|y2VTg6LYpKUhmUntEt`Lj6~CV{zD%qX@lwHUsLy;3J8-gSEXoQPkaqg+ zb6owd+W3kmdfG(5>FmJ49qoabiG=3P{DcR-5D>FuT%@i0{7h z{bNkmjlcog70gw=wn-kJ0u=o>nG1i@G@G(XZ~eVUdR7Xp<>3Qivu4|wG-uJG0t z0?iNJv?+3&{N!X&F7;p7ZqPxz<4xk0e3Y@fgh~k=l)7ti@k&qu=hU?#M$=H=x)_i) z4X1`Hyu65!U(DY@8>~0K-+UF%E~tDVqEe@!aZ;mKLU0n8DVriF<(K z&-e=fN-Sd$^#v>s$>5S0a+SBCil0mM(uU@?`2U5@{l<9#h#4l&`8QLuWwd(SZjeL= zD1lJiYPz?t!MjwotJ|djBfNnnPQ+2Z#I;de-E3q~To`i)Zs>~S6%8h`D5%U*DY8?{>!BQcL z{98;^inMaa$SGwtT8q?7Z#2+g`PirRR_BgQ8!F%_5h$}AC&*yE$%{q0vcJ1y8u89A z!;~fU*HE;L^w4b-ylu)m=KLrXmGK)*l#{m=h8!HLIzTk2x<8e@wbfyZ916leklGue-1f*`9^&+PM;8C zFX3>E(-N@*FC~;Sw)~+h$a5aY0$`Ld?|e6UmGq=Ibo0#1Xgj^7a^u;&i#%kQ2HwD0$f6Sm656VOdbAOg}Hvo!wYzi>k8hQWr7| zhD{y;%~JrL`u)#P)34bt$%wH~R4|>&`|_eUYngcsF1b87%gDOAIu-!0u z`D`>Q_jO5ZRCe`ilAFK6fEf%*D|q?YicA+AimK}k5ka!Cebe{ZP-8zAzGH_7|8=gf z3lPmPUEa(-^x$_)$RBX*wQXc?$vkz*)Evmm%wlP81Ln?M?Ur}}1m*L8GaJG!VLQGo zRrO5WMQTQTu05%$fT1h|m7P&N*D}2WC)yWRvBKgR>O<;0w@*uGM>V(mOZ=O%UEAAq*rb4&;Z5D(h*{Vw}Qxo`x?@}%)Xhbss{zzp1`rLb zXrUDUk%liq(r?&Tt#x!_YZcB6T>0fRw8YA}59Xwa&$dY)Y*KILh-ea27>QFzoL?as z;Fi_Vx%wl*fKuG@?|`?!L-O>up6eBM(@cdL)z-yCPB_(yk8mM78v0c;-fp9_R$@Vp`{trSm z;5^qqoQ{C=Kj1UoAc{6)J|r%OL`kqh7+>R1M3(vW-md}K9WKxQu-d3`8O3N$<|ZXG z40_x)o4d}N5#0sgf&Rzmn6Ul&g6~p3%@6VXOsN~65`$S6vs=ma5mB>19692d-O>cW z)}8b-Z5pcPtCeJ-3O{B>DTEqH%F+i+9cn#1c#X{q4Ga&aN8Ki~0gr04-kqANL9z>7 zPedRNjp|cFn*Z*6YG6?+8=v4Z6(h%Vp996wRGFtd{{MP<`f>x4pyDI zgXaC0DhN?RuH;_dxsNjft@|sJd%c4;#U@1oT&~F_m~zA(`!O36Ro=Z~6~MejJG_=6 zS0Uubobko>s~qDUS331@T~ybmY|U$dF&N{P_S2s6MMJmyj4{ib9=Dgj9{tE^wCxS& zViPCl3La4!#twcd7DDdeW)@x6T$op-3>V_b?_NASF)Na$f+0IJIxA(h6FW{O)gO28 zd*e$n>hQt76K?R$pRYNPK;ZPG<9a^&G=2@7u#2=U=5n?ghm!z)p|eVe`lHKxK77XU zAMe!?!=z}KfXx~;peV|u_6Z9 z>`m{!;YLO<*RNDQUA^hPT?^TKYZ~4 zjG-tfFhzdkxF8=#%@KHeS5rq@q!5i^ehU+&OgEu z6>>Ax>C`{PFyJZq?3h`@*zYpaN)rIiD?YgZPgeKfA6_im7^zf=4#r2O$ip6i-Sqe zJ<$b>h{=7S{!(yn;TlHAR@(M$8*0QsY5@^s?gSZQ)6_v&WEZ_Xs`;v-%+8}g$EVr* zDC*zi93ZTis>W1DEuMd+U_qx>1Ht=8c`jKxCV2<}jMCE=UvS2lbx2vdN6@UxqVb-= zCctj8>^T%@bJl@yM6~Jt3n-XyCI(=3{)3sqPnSXw9Q04+1}A zvt{cy@AntMJYU_z!Wp1?UV=91gl;Zh1}n7;VAgSn!H0OqRxnHN{o0YO6;XTNA#u(y zj`hOYTX9kV2&q0;L;c&`R56>+xPy7%<$WowxJvyL`0-qW?QEYJgo&L{c_+M{RAGMT zF8z1uPa!fy*IxJ>8oenyuNSKZ=o^{eq@AahD(_Z%rpF>1B+Qqm1V`jJZeab+o|SKa z-u%f7vhL^Aral&V^h&!#+DCgDGTyU5)k-lYA{7|$4gcnH)OGO5(dPLw-d~IR?g;fe z4;D5hvlWXm#I+N+B2`)x!V_!L;ViNa6*o?+y0Tf^Z24~tLzR@|M9UtyP%HiZWx#^J zQOHi8VYiL8;9!2DttAkR?>Jqb4Vwmh#74M_W>2pnQu*4tmt1S{X-VM3_p4qs!~ag# zStbCp_zaeKCx06iE_1Pn&EA`Q&mXj25Qg+*$)xORght@;S~OCn>g2FSCh_-5AdP;} zaATcAyiTJSp_uE>pML???{5-8QH)OE@BMnK`sK>g|LA z>gOX8Ny9cP(@0>Mb>z_BZ$mQboYdqTlR+SLzIJOO?93v(a#+AF*UYTWRW87J#6Z(m z*K>}BZ5{|w#S2nQE(DJKRbe-YpwK`s5*a->D-8|8IQpG5Zvb-d;Y%Uoc_4lcpvNzG zl4yFn8jAjyGr}f!dkVM7h6gnGp^sIW#WpnlyR~0+O<>eVj@B#P`jq~o;emnY=m*LF z&2W=5zL>6;hxS1W zB$kt+*M{HQOtJYS0rO)s?b;1R8et8CApW!IA>n8doefb>K;METt73HxiCgr^RACy~ z8Yi=qd~YP#@5Q7zWP+P{Phqn*n60ybfP1gI zML_XV%+^Y!mI7ayMJHF<(``h+&RUD_TSW|@ZZkBg-xYW@)nS?GIR1_PCsg0v_T=Ui zR(WJA_e39L1@t+lEsx_VZ0&W{G5W0|^rN)%{BH{g8Q*>VX!>$RUcKbo{yvXW50C^w!|D zfaQCXhHME6Wgeqj7Y}ql6={N7Z~d5&7R566B>^xV%fG>rV#{rf?<_w^F8$p-V)C}Y zR2N$D=^R2jukM}yIuIVc5q=n|wp@_hz7Fxhn12u25rgfsrLZ0n8VC4c+LU9K9A}-& zCW{TJdXbee!1a8@#N$kqcOY}Xg#-q>0GH=R>WoB{ch!PHH7#Lm-EDnZa&Bxr?l(Z0 z2qNns54PCKK2PHlY~ZjHZas!%COi+=N&G>9{9|uj1>_>Gf(4o!4`Mq~6r|2a?F*CU zj16`qk-hF+9&Zdy!RLr{ze5`Q4^^%#0j?07NHVY0IKrl6KD7Cu(0S@EG*a1CMAUU#pb3dPuc zws`l`X|n$mR%6pP6^&A^tmVEi`W4sz&_34Va$EDBdIjNWHf3cD!jq#ybn8T(g%GwuXUkAO1F}!SqrWrolJ@i~~(1 zY4@eB1IVWt`7eI_RCNsS(fX4ef;Vi&KPXDUkr8`S$?%ae_Ap_G?}<_xbB30As=+~c zL7LuXqQDTK<`OV(MKSX#C2j~=?tTzS7SU1ZyeQPTvHlqPH*yN>hY-$Q?dhbYL&Km4 z*^DnXw{;0u*>y(#WJG7<&h4Tw84NV z?oS0>`aOA^%cnZrJoWyWpA~=Y%M;aBomFH6HA8__0`&w{nKE7=MF1{_&QB*9_Q*eS zWdh*mGism6z*o@K{09-2By0~^w^~R~rOsx4nzR9hnr#Zn2GiTjGXtDCL4Hq1>4Grh z2$s;S;S}PsN8`6^!d+6@?+%`dv;g(b8T(Y^(^O=QcCo9q$0?`9&h_x2XYf+|3(daSqf@5&b3HhhYZSXwMCcbH~FTte~7z+N{U0@IT-0$2t@wCbs<7*5xgD zf~nOR)x@a->H^~tp3(??oxe%WH;|>l_M(sATsX??B(@47({0`Wg<0Z90+#PLP(F{r z_;G=qHRe%HL}4kgZkHKdBN)pd(M3k&8% zzNKr>8)rFV;lBb%h_G7Aoxwjf$Pb$Ss?EYMN9M-gSQ$`*e%_vYZrp$*qjF)J7n)k> zc+7NKdFofnVU5LK{3zsG5d&j#EYslbJFED%U z>;vS{-U9YmeUWpG@Zjl+*s=IdshE{i56BkOvcIT>H1~z|Wq&a?i(a7648xu$)lJ zAjbX9r|oqU%dO39IMmRRNSWnQ_anq)DIgRj&A3Hk*;!EkvG0_L8s=e^=TJgzyS2OV z84Zu#AGlrDoD$9+KpIV<*W`_;D2Mp>8|T!P#b`NY4;X*O22g(yM-g}KXQDy-!vdC%5`oyfn))=RN#bAc?l+ z=2A_thx>TE8-~^*0@}uv_QVLTa50>U1R>M{4E=ddO06+ff*hl-@tjQlz{Q_aP}qHf z#kP0Ou^Y_}Snm|}xB45%h+1>Ge3rK-&0A|$?wUp8+2ifvogaEZC9&v49Diomz)ZLx zYgX(0lHLyInzCM)Ed?B3y?8WWamuO4sdQdpvsNKsHzAy$uYK4@rl%&rm^*KEtVscO zj`P=Kw7)P%=Ssh>ilzW~bQH@Ee$Frnqj#T+M(u&K){(^CN9PYdnWlZ{mq;;?hNiul zHfFDJ85ilHj+wwthpaTqSb)o0C+3J)x)XEQs_SzW^Sbp}cXXAyg$_u#2=PyL%7w{% z3J_tD3(<0^gj<|>-MGM&ckx2}0}aG=uQx8kz4IcXqQcz;%ZCdw-_pJUKeEoPX^#Wg+!lelwRSeBiT=#xd!GIr(W!6flon=+ zz4~F!4EXov#H>+RAbb*F59GW`k25kusics29DEw8_kx-20a8D0{A8yHn{qqZR5GoO zyvd$d@L6V&3RYatb~zszP%NkAK(J~bl*PCik9Rnxo|yR2U~>?vY^CG)k464JKx*G7 z?@Gt|2QHAT^msn;m*z(+TXX4nNh!7-1?nMrKs>%?z%%)jG945|j%%U2mE4wGKRI97 zZasJ&JjlxchBKyjKYqL5&@L;2+_(``ynvQ<-Fv)c5|QH$QPJiG6l&yX+E8DOoP0*< z3|Gr!#sn=R>IvM|?KcA=CJr4zklhp@n4GN*QeeswyN6vmql1>*N=SX5($pU^XkWo7 zH!2#@tk+e(jx7Z5e;RyHT$ONdWPc!AAGEGs=VHJwSmY_Emy=!txW9H?ti)^vQjd^< zUp~Fkr}!3)6t9A_6bs6$BP}V@xv*U8oD@l$htRP^4yhde!l^ z!g?{9|ImnIBK>Qm4FPuu!SQiXCV`z=zq>Z&A0|m{k#-g-9IB?Gd&1<*z2j1N>2-yMwe(OUjDE_cPpY)iDZHp`5* z^3M#z7(7ek?)^&3L$dpNstzakHLmbGtWbwae=i-;thgO_2lyBNJ(jSX;910c5t- zv;veL8lZv*6FrCseV&K2v5ys1<*THXK2;j&15KF8DI*cdiw-u{K9O`|EN1$;4M zl~OFH^hO9wuuS%Vk9e*5&$8av)z|Sy$-cd{=l2T3N=AF+q)%2hV98GmbFpcy`YZu5 zI9RS-%2P5#XW^ z+-0t3ype9F^;82;m!cO8+2nm@D(Cx+&_iO($~)t{w?eavT9m8kR1aWlX!c_X&tYeR z9M?ZpU(BRGQ2ImXf2GE8NsUpcbf184A+18PqhYthx0_NmUcQGy{)Dw~i;Ig#4a<1e~%zJPB|K|5wM zfsg&2hkq5*6lMNC;K3}RXfIAB{Y`Edy9W5?rD`yC!~c*E^Mt?o@_L^aszJz+tWPg8 zY)Ec}U4q@?B4V@8TTj0O=7`Z@tt>)MuM)BlM=+X~KDB+5+y|6|gJWv;UYvJ6-fzG8 z8cp{LyY4#U2h~dF$kgJ4kpPdlr|RE%YaMo+Kvw_EoH8+V!tZOwH+xg1x4p~sUEnyy zEktJ`p^asb`qTqQp4O~~objkhF`Xnq`>@pg0;Efyp6?VTvi@>3z+_tw?p_#2QA1nQ za5!dWRxWc42Ulr=1^JM>AHI8IJaxw}W9+2$AVaLcP1RfnT2Ctu0+It;-Cyv|!?PO6 zY_Ps7Z`O6VnrFpLLbE~z9!(b`Ab?K~nt10m2s^l{o8iH%F!E0=*^`uv;4-Ju&4ryR z@JfcG!8G$h2$AlIc>Lb-uVgH(g-5A=is;F+JU$8ytP@10fA*^3jGq44plke1MZI;H z$A3D>2-kPqWq)oIyszrMY3d3!Lwn%Vxm=DS@W0XtWB;vA7S%VYrrh%YX1Cw4pm$~x z7o)p4NPBOj-wB+z(D1&6)wgtCyKx9X*muQwcHhv^+8BOqKx~ZVCQ!S2AIGDBNuPm5 z)EEPt-|}Q3HyZVa5|bHyZ{d*{?E>)S1XQ8Qwo14?_?E$T(`V;s1jb%Czw#FAgTUMZ zws4kpffgGan#=<0Lq7ltsB5SQcgh1RJRAjM^It9;5U4e=m1_U$F0NlPaRT@U2??iX zu?UKstaSf=1FU4|lg>W$yf1qsux-DD7{H~t8@Bl#;lfya;0BCIg57g!?RQb%KE$;_ zoHY%la6m7+H??u0cjXEI%bg}6t#k?9NoQMM2U*b75aHwo_@W_cM^zY!#xzvbFQV_1 zsaPX1RC1QMRh*M&6lkyjvdQvvxsY%}rKS5l7oXx=ObRzjxl(7d&RB8?(08VQxzeAr zamD$mKUrUFri@E7oh>Hv+T24XO4L?@J|#$lhhCGz;Q+CT(6I4~htzq#M@;DwyxiLv zjGyv+98D-7yXhttX>#G*T$(&Es#PK+G6`gT@DEC$n(|tgom;meg?Kxm zr2X;h!}2ex8nY&)(w1DB6JHUap}RXHfwHHdiCF}#sExQDfyYpiW3E9v*D!gqB>e=g zUZ9GPpU1WAFl6)eQ`F?mW=@z|G&m?@jgQu}^ih>cEgpL2vW}LDFxx4%9*G(o=z;?lRKP5b^~q%eWAuAR2lXGv@vcMe zKh6y+nB%Nuloxj>li=_Vni54-LiGvBu;Fw}stdl{3Hc@c zXD|TD!$tHz>4B%#sTz@`F;5+#EUHoC#g%Jr=c0Bs&xk*^Pjqtk96* zQp(d9YqH5vWzZZ!Qm%`CY0f-(fq8Oar42}o7{Za~!Zne7(kdZXPJ{PtX=A*F`kCG9 z>9+XK=@|^TO62f}9N1zQD7nXO*nF(IzF5h<noAsd^Fa7)i9bSHk(gc)%I1$s=&f*x-@T#>TJ zTTDWP(RUp;T8+7c?4x!$F=tVgfyo<`*~cJkw~c52A4Vx+3a9gw+wWjMO6Ls@DCVRJp5-4m zF&2GST1zgl^g=&c=8TP%qoF=?*wyU-<4iDpg)efo-@e*bCN8kI0C8fgG2_egZSe~YK zPV4)d{S$+Hs8GI&w|O-rA5sAx5t^i3zuji8-d>|dW)%C;QzQIVTofU-prpcDlM9fh zCrFC1k7*v~c7{Q+ziFIU(g=FF{e2v2X#*|!nHn5x{?qj7lwe6N&mO@~_V!J!+!68M z!(`S@C&=zXa0CX4s-bM_&Cd>e8&Vm9VY3Gi)YaT1u2YxIDt7kxXQ0}7T+E%6f<09`NPd6dA9MXX=!dIo%fg12tc$8! z7D&F}4Vw+!#g)c%kxs^+9WPkLZElR3A%QwYpaqDBVAK9Yn1&Zc$R|YD0}V;E{Xp8f z9N&#Ux1;@Xd&CYED5tDKAy3w({IF4?Rc$Em3hs|vrMek?%V!e7*p1Eyeha~+@fsW5 z31uWL28$S<-NdT9C(5((|770DrE>IuxTe3;-LEj4hYolAXup@d^F>-*p~$fmW4@QVXPbblr` z*BZGuCBm)l{~QgyO*(QsO9c~0;`Kwc;lvR{1}yTPF4d=~eX?IaOHCKQ%T zsop0nVg_0RFo-6v4*j?1!2~|$<1yc_kUB!V*y*zV?!FRdm;H628Kb^R$iDa%whv(T z1Tc=tCutaAwTOhmyvb6l@NUIm`@Vf<__`O#vjgnHRyXJu10 zqmYW}1n41!j{&u59HVV)Bnw?n+x{3;X?`On0&YXZJi2`#!+}D!2+-F5f|FGK!r>rO zqcf=(ZqDBBfCSMh&x^m&EsFy&4_@iQR^=F@v+YoNlA{%C&Jdiw?8!-q18b9oTG;pe zzrEIFm8uQ(KWGG4l#`c{>;F-74sKzvZ4`gPvTfU1wQSq2W%p)V%hqaX*)3yn8H>wy zOUv%-`ycN6x}N(v=lqUoYu`b+GS@)Y%g<}7*mWSh{y;V4*~wt$!&{iZ^Nr|~d51Ch z>O$Qp9IK$dxC*dHWLtNm`NMweta*keV_r+?{2N2fnW|KV=X*}=Rtek(i--|)DjZvB zhr9Fr%PS&$dEn0~^~h;GVK$UTjX*52J)?bi?0-<*=g1qq>Wm`-PeT8>m=3HWt~P7b zCxHg5SPstaOxZFGsD2q*#1IiA+w(o*4_7k@(R}Vml7N43<0yk8bjohy-#wYW?xI#} zG*PR?iOUxY>F4UUN3cg68S^*r(O5M)-gW9ObMS)-%feI-;V<}k=eeq$QJ`GFjjS(L zR7nYq9cSEQw0(`Ac0GZjJzVRU!vIZ62I_l}a0@U)9LyGV2;%U~P-n*<>&T=MC0d(q zFm$E_L3V)_8C%>|o1|ohb;<7-2LUQs+TO_m*0c{+{QPbvr;$?I7CCf zT&w46YTV$T;}4bbmvyXP?2fdS6O$eCK^T<0>nqewXh=#=;p$k*7y=FZ6)uZMWo(wd zGWOL-M1|qN&VFY?2#2_U?*m5THz9$xQ)b6cc*CK{{*n@khhH*5GrL`d*q1L7(#?H~ zA1_9^a+I`|{|p~o(r%?lCuHUvWrRRKGrsVzmimY_}t9SRgs3+ z(cS1U|9d~#Kk@*S8MOKQFpKU#SABFJ)8O`iMM0DezSyhtaLzRzIKTpTo>UA1mN6BT zGK5#N3n_X!$u(!dC6eZAAEF@J62-f}ZH^yB@gkd`6p9j4N8gVIa=|DWxmEaK}=>=nb7U$tbyt2YiZQg26>uBAPX^a>k zTaw+GbOq)O3NZ@|&NO~l+gHhUP|(hCUHDtHr@*?=2}2~G^ExSXTF8@K+5Cue^{yWk zw2tLgRr9Sg!kPK){8lbfP&!OVsm9aKm!=D;`m7`m<_9L{M-A}a7M}@btfcKuyxn=?-IjOnRam1v3LM*WCF>Hysje4z&(o!vur@UU^LT4d#DH#Fya7kh7{9KHr(EZ_2hq*0ZA8REB}*lCLg2tKh7g& zDP=&m^z)p@^L_psuBaC1vQz#NeYIWB^T^8%xf{OkdU?9^GXtv|%1d_55=)n%1`IwBoNQ}Tryput?NI$mGw3bG-rSFSC zHFUI(7Vztt*wpxrfy#WAFh&5o-;u44di7?EQWZRegi4S6&i{}b-b!ugpt+5VcT$e& zYF50bfbWR>xLP0D_PS1d2!*cQ==nK>T2fw07K>7 z3@Jf|JNFRD{w2wp3d-*uG%odnt50feBc)+UU^~N2j6l@CvUPF|BqCP%ej1iK61bde z+N;_B@y*i;WDWh=J=pEb>7>E#j3DVc;98ArSwu{~gWx=zxCc7`lUSB8{fTVOpA+F6 zA+h&Jek!)GSnvI`L{jA*S^W!q16E+-ax?{g{kC)a?SYhhO1Z#~y^5oSH3&B_u_C>+w5T zS$6eul%E87mo9*g+lX9b-!XlI-Hs$$&<}fzAbChj<)Y8NQ9qBW8({%Q%j)ta%8h5Q z!=mREgt-%C=PjHWRK&*@^z(YHH)KEuY5dp{iy+?{Ie~gT@#McSsI{H?se(AB2}NoS zs~J3;=wINBp@c^%wKy7!m8itdB?^z;hH7Yj=)s_&UIh%En{!Gx1aCYs!58v9W5Oaa zC?v%m+`p*OqJibVTA&uZ1NE$K`jo-bGI)JLJj5 z=ejO;@YQ`K39U&_Rm-wIZS3I;T;Q)X#_n?3}mtC=O=q=dlSvc!u97mAR+k9<)x8${)k8H>Bls41R< z%A@&!2ONm(H)TTBF6=rH!g5=+>FzPIPcW|wIx4Y7Up>FNO#{nL9uZ1L2!@V|GT(W{ z*MC=Al|`-lZVeY^2QyLg1`Eo-$JF9>x>1)+AmjbrfeU4&|EnN zw#3F|*S%?#z+bxxc848ECzwS7(PC=m-o*bfJV0zVkLRsV=oJG65dMdmswXFbyzrh9 zIq*(8Y7xkx5#=%7n5Hcg#T|(TbW-72Qlm`pRkW0T%s0WVI#s{>2jQRX(5ul_aHQ={@FcN4R&NcCXG3IAH02&#cQNLXW~5G*;i<&v z4D$Aeg5e&trI`TDiNbs}VFn2mCaOH8RHtX9n@pVS z(-W{Mc6&yr6Wx3!Em3h0tXXC=Eblcup1RN|3(VsW%LB;*5?{i^X06B1)`AKoGi|m} zPUBS~ps8BM9*%+zu))lMRq&{Fuxcb9vL=bDkFyry%O1%$whHBDiT}2q9emOW@)U#-_grJAHD?u_*f{Qc#^O zW3}{27x+E=LA85MAIL3fvA(tog6zxCf##zz$e#Kyt&lAD57T zPqJV2N>T0?D_FqoVbF+&Nw zU1rDoc2Nd?KPEsLx6)HRlgjF3APWiMJW1uNFU$fYjX3^-vg8AeIv#+SC0P-`U#V#n zFm|46+{wDl{v0$x;OW+Z^mLxVznk`R zgyOJGfHQR}#F1WDdv@_c>nQw9VDY(0O@Z^rPbtY&K_1s7K$AjavnPEx<&(O-fY~2z zvcwy@@lKY#guu;P$UtESMVguPiBi2yrwo6dsH#&iq-6vMry$_US-wqAAV+kAjX)RU z|2|VM7zi?|(Y4wNobceFxMmP}$$gIZf?50uz#4u6AwvKL$0K6-SGgJVpYBt#jCupU zGRA=QYvQ|puUZxGQZ_CJ#YBSDs14Vzx0@{Vj|6f|3?6i7#xt-Aa9VpO zJk9MKra9#PNtD~m9X3$H%UuIq{Gs)x7xVe0mwGj8wC0xbs+xLo|G6=XsxBiQ-H&`sfNYAHA{yb00hu$`9SL zShuPK(%flht=~X}$@q6acp)wbr#pi7@ZV}7-l{)uITYbJ$nZ)d2>n6te`KZfrYbqp zLpA4T5d=5ml6Asy?&oc~!7Zw|R>8oy7{^KfUbqxKnab(MLOOv~cu_3x==YaDaF;`P zGzvhmN{+>n1y!;PwE;#+%!I{5T}4YpUzNb*X`qdI0|TgPM5)3opd+4##;?c&N}RFJ zel-lw17J+_bBZmTnoGS*E+8IwTWJpY=AF>Ra48# zq=S#|AR1BrY1`tXfbUpHcxAO!N6KWRAovMwuk#=R%S>G{`Sq|H%SZ3BQ|GjS<)OoA zxQ?Bf1{fsAO@8=kpr8x4sj_rufx3%3EAw?1a*%&1+~TAf4-8iBL0hqyjAZW6Ho>+L zWO5Dny~&IRE#<<%C)|qSgKgfK?Y%erR!YI~`R^3-@Dzm>t#ZPqgukOw8;L0|0Tb7! zN(cN@&c;`z02pj$uK%OEg~z_ePX|3t6&D(S=WKOV@j$3mzCTdv!tSeMIAb?v$i|C> zaF}@P=>l{B24Nfij{)TL&Ftv0OqYxuXSlgfw!#jajVc+B)I=`ADWjDrsl z*%*XaQdFLko8REj*lII0pvM$`ozWdrS0V`cCsqS2^c~TN+I~T{wAePeI2N^MY3-f_ z@ZmKqup;S-Xc_>0NlNpmK~?K%RX6TxeN@;W4Gn`$BCYx7%Y2|j9|}Gru_u^*GmCy+ z3lR(Kunh^CJTR6^5P;ZH3tLm#M+Jkvq+uja6JuK+#p=AT_}GP~%{N!Nxm*5Y*&xMc z)BvHU7TXG(pc@u*W>WuXGxEPGoIm|i&g)YVSV$3{RYB%UzLM_+P0#H`UeTDTlX&ag zy+tglAxEaG9t1j%G~k2iML3;au35ToL}#OYJJH;1RJ-t{<#VIDOrv_|)3Faf60LoB z)(kF-f!yVq3#1rFcr`K}9mReguQdC010vjzKRisTNMM$e3#}$MHm5v_2_w>c*HfPKyAKPrA_@^t^mVZ9_bA#|+#=jUidt zZM9#qH%Ml22IsNqJ7S{{Y z;Y##Hh&-KlXwn(YOf#f{3epPLm&862BQ?aMuqHV+C!R0a?vq>$SyUlpK;2Q6UB3I9 z)}+}NP)9gmZrC%P^B4kdthCt#ArFl-1eY;~yy`)JiZh0jXny(ixqLoe+(joVKTa0A z$%y9%e@V`%_H^L;W|^_ODjcN*l}i2 zAT!u5B+!}@WeA<8B+oBf@2wWb!NA6$jccg6X{_^>JpqcTl)yb8cau=OIKH4h5BU^|XgtXo# zk4~A@TcC(Bi5{=6Tim3a=%dwt&KJdla_G2K$z$k=adjxO48E4e3pAbOo2rr`Mf|yE zY4F{MhQw3-^(*cZGfO6fC}^t3yUHW1$KBf_TeUf&gqTw{N%eiHZsnOCia{{S0E;iZ z-}WKK1Mi6gGIz7+oe=Z8mQZ>%Q|RL`~nW~deZmY_h_ksoqviurHD<{~)EeKI5f$vLa~KZEq`cRgf_q5_MFjmH!4DH*{<#&C2XZ zxd9RVm1xF}D14&UOG-ZRs3p*%u>F8#`~?o3`;YcHFMU>yhY*SZ?Kkt%mCqkoAQ@|@ zubsi_G5i5)p}WW+A99aSo+JLf{0Z&i+nJO0eIuWJ3JGBQRj-?i$TmI*mnKa$!bC2e zi9IM8ET1>z>0vLANeUX3{Pz{Up~Rl0ZME)vi6?Y&Dx;F^LAhk--D*IFIqDy`txZDDkgsx>!Bo1qDOJPlR9P% zl<`C5`9O)dXF*HRkHZcTOXh%1ecsNXOI`_D?J9iHNdW}t9q`^ro%(HycGF@qTG{U{ zJ-^fX??7rjU0)CMhoLB&zaZ=zjn|jsQ-Qsvz4Yo3yX_~7uBN(gTPKx&29c#|i@@LV z%J~q7CtXVmjtLt&?GG{h^W4Ks8&Tbp`xKoU>^Opgsoj9%cxGbb`}B_`^`R?nl=+At87?$m4piB_gsXyg4#8!=wBEnif-Hb*T=CCxM5hShtlUvGmnZ!`#c0e-jFWB^j+OJI`~M7 zm@OrN0*{}!fu;n1Etg%zE2a)Kqbf))Xs2xcbC zvR}!SC)Il&iK?EG4u&-j6dYe@;c9bEQz8KEm*kcTme;TM{s{5eSV0#3Q3d|`TyL^)z(m{uX7djcBpS+#{=xLC!BP-RTA zz*G>|0Zy%m{cUX+#rsxFfb-G7tj}<>-$t*5uNB{fs}K*bp; zeF?_am|Ab|+UTFmL_~c-BktGA;2GuX2!|*JAeLvQ`fO!lskvZE1H)EW_f}&!PiU?0 zjtjwYu1WA0Sje;9o6`S9RAF$QXtrahr6C^|s+pnu&sd7hM-YV<$O|QIh$YwZRQM);U4DDokNxJ@u$LH0tjqed0wfg;_zPFBvByz}9BDKDK(f}F z^kV%APNK~`t?|Ot1*>!H(&2rR$TbrpQXz_*GQ1Gjp^3w)kcmcw1bR$Afb^3<*s86) zaOm)n4_WW=8oXR>M3ut#3Ui8L4+REQP=mj^NNRtG#}sjklYLcNKJxr3b*J$CqBbChkmffvfIKZ7)wr* zX2!Y~dJ<@FtLI5U7#jnCnrEv)qd?ydy%@wf%29IMJ8)?V9!A9P1xAu_EVTgkGwgFp zGc~N}J!)lyx}7z{<=bS;8%7NUE59I*f)RQXl+|^N&&@ZG1~SPmHwH@-WF%5 zvi}hh(5tPhG(> zo~@yb5np}zIq3WtW$$Wc)aKGnuz%YM^Atu1zZ5N%I3L8@F;Res4C)^sZVT$BPWBJj z%r)jiK(5b9A1iFho^g~14Y_vTUNm!|RX2oZ2 zKu-1zz1W$tXCT7ic`=jqX$m$sh03HpS*#~EE_LZIuw+)XBPF1X2)zF4-YXk+WH@IC zliSpu)w=cQne)vJ;J2O%J#w2$sm0wZ5wLPOeOF?>gk(9tD1&CR=_O_a!bl%wCbG~b zqDiJzlP8-7H%4Cn8^eI)W9)U6!oNKO>#-Z1JW!1g%>$PWu^A__{v-;L^t4XDapIOf zVx_wP7aW)d)CJMPjCJ1HW7|wG;ZMgsRpbqX-iI^u_{upThx8zz{H?QNm|_f4uKg#> z6jFiNMw(em53fnDGx;<4+dtzpI!IGb)s?*L=cav!(MJ^pJ=#jg0&msTOxp|A*FJG0 z2MX^v(8m6y$~|=c&$#qXtnCnO#Oa4{psYMd4;u5rRGLDAr)sB+{krA-B!98I&1w7>G!p#_T)+n@g-92^eZ+|>divXYX*ojB84Zf+(t?$mnMqUog_I$5 zyoD}{?eN@6Y>EvzvoalgDS%%6-KSIBt7d7^tPH~Mu_oLXgI$s%rrFRb3>s`^ePG$n zYMAD9UZ-9|pY|~UuG_6_ZYR#RXG*J2?R>I{2ZT<^dEh->D>X*1=OG3>&RR>4^^#>K zs#OhZb!9r5fOWQbBly*S5n8f8F7d=SXPJ-`9H<2%rdTh_Nxg8JLHxW1mdCsdV!MHk ziE!Gb-A!rVx&l2X9i2zv)_1KA@JT38nm1f--6mqt0r;3{z#Vr5PD0Sa>~PeI87vQj zQLF}Dlw9*W)tYyDi6gXyfqeZ41V$bl@kAu`h#nU3H%eGI>=vg);31;5cFgDwrM&LG z`uGL|P4IwJy8jb+%1hbIax4+Nmg^;aiy_44mi;7TGq@dnG5+B==v)Puar6JtomKBX zV5c^o9pY-X`ZN0FIZ9G0(#*yjzFZJ+?Uw2s8Am2s-H~q!_FZxQ7g(##@uh6(3cj+7 zs2vv=U<+yzO!#^k=H5I7@Y__(cOMZ!`^7*o>qxu&SkDHD!on1NeQHaLf>tUycMA8{Fhkfq>iPd-Gs4E)Fr#9$E>yvyWq#wB zBfkjU-;m$|gI18uR{P zi9V-U(hE5M^}b^m3=r5o%^{IWdtU7sYBqWfFwk z@TJnAM4m5m#PU%TMgPh>zk)b|;BJWd6<|D8=8VY&RjFmy^HbD6YNgF3^hbFX4d7cT zVITOZGAjFxY0)ix%%1Ff1`5?)Cv&bvOry;O<{c$ z6hoKi5p&VVZ#!gZ9H4~M@g2oK{JZX9%@&G6cg3{=18AmVzVbxV@n*98DvV!u`voBs z62b=}3Li-}EC?e!Eu5Ug#?14sVJj}E^s_qOPNGrJ z^(>X^@HM9B{kuw_0TsJt9uHUjlIR86aj?g0>hU(5C>eMy9@#Y^Ds<*=%|)$AhPRwA z7c_yY)ba>yE@>&dc$moke=$r?50&A%bsNHt3jcue>Ji(*M)5)6GcDL089A5 zb5r*+a*Ya<(b}Ne{28DU%euLC)~|b(MSj;{6Lf;*Jz~l=_fmv@c9FopwPF9N1Ed@r78B~G<^;C-5^gINy+d*7l^OR;2_ z-%ff0UV)0B(0VDPWuZ;CD1lW|AcArjCu^pe$4z56AI1#UBG_y3c)CN=fXucrb{dwE z`{1!?>|{Yi$^PR*LvxUgzG==sRW`eJ1lfh6 zULNb;A45KRtrnRaJ^>zbI)COC53ZvW&zLd^5W8u7kJVUF!GGetXZb+sb2&>&0qvM( zC)_c{_V`~Tm}C`U`{m6`QC!{&LlegGm3&5Z?9=Sz$wbdu-+8ACNF4u zY59yIZeq3=p2VvEJ1)87pCQ3JUow)QGE}z?0=WpPhr?74%gQOSWqDA?YW2@NDSMRy z$Q@<8fREVrs~(=u!V<11%09*%ckHKaW<^6C*LG3|lZ7lj<=S?wj8c*YRuH6Wp-tz1 zg3C4Oz7qc;y7>1779lOEe+1ZTPE&pSagdwSIg7V$Uq*!fPDr5eK7Z$T4{~|Huf;Wn z&h&fcUw3DBK1W$BE9f7Uf9$6)=2jX7UnHW$&9X1anZF5IcDB@8jccVEd~K>F{|j3a zb2RrgY`DMf*+$`yoQxxd_)A+Bvq*g+4V(ZNSh~}>byZN3ico25ef1+7h zg?b&@I@>Q$TtaCxBr)Fu`yZ4UPM070hPAC?6T~#AuYbQZuK5q6TYr=m!7|JRCKi~z zR;v)y8BN-MtWz|)Ry3p2#v`A{bu$)E^1|NU{hwbQ#Fb!g-8S~QihAoOuVyQ80-V^F z>PMazk(nNWD?@K}k1plcGEoi*o%;s^ZjQMtb2 z#^G1?Liy$6+pt$CE6$&?m0&B-di0MnK9vo*SG(9bjXYjMlbmwQ1><&e{D6*fK$6AG{~=QpqG{zrI&YU~T>CL`fi{<|C0O^`9O(3WWiWp>2p zDx33oO+mIqjU6ENl+0vx@$0~0_#`2gVNY@|m#*Y4br);d)I2`t<~u*$&D^DXnuA!8 z^k_GeFott--lyyHKEJ;$>~Bdk$VcEc-P2x4S9(N;NV3Cg<68;k8azL&P6Gu`aMviY zTMO;QWLg1Ht}^s~4wjb<9h@u)jf8ptOKC_$1f1GxzhK)9 zkCH-;$iYX^Ku>xRZ^Y)^h4yLpUvDFq19K5P+g`2R-qP|N^v-Uw0up#|`nEtp%t71VH@>ZVZ_fMuaHR$4t>cTK;tX7 z$eg@)Cg82%c$qLKi~Uve_FbNQLgZG9-d_&^g?^;dHHxBLL_>UYddm*p*)7PmHB0T| zRIrX8z>i>X&F6RrKY@G)tw$PZz_>&mX_O~BD@re3tXy)(RvQFz4fy(5Bl8$pT|cIF zVhR3wY_FlWRLaY~9LM`gkmm$`Q~2@)P2}gI+~b(pi%nMLJ&BzAltmq{)-Etr5-R{I zRmg+n+->qxHrUwpDBgg&Tzau|6{*vn#I|WpC)ILCOL5s*T7pfmPkcZ03hxZEIE53_={_RXBG!WZO zl?-^fl(&~l^o|M&qZvK?l6CrKa>yvX@DiQAt8YG>MgT_Bx<^?x*aD~wSJ^d@KIC`j z7F4zuHrVwAnc&TF;RB)xWm4J5jYs(k?f+mp0N-@!-(C>f0~4 zyy@)aKYKM~$Jccdq?)XijTSyD=ZDfzu>jqhdZ5|8PQiN2rS`JP3^JwVC2ZL@8`JHC z`i?237!X4OKQjB2TL!o$+P`QpuC_h5yo)HBaJw;g;TBD2Jx zb2%)T6+#LQ4|QtYo~P@clwun1A@ZF&OwpF$n<}lRQsC&ZzP5oX!A)cARix{`hboQ# zC}9qk@i=pEM;1^aa@hFm-|shsI^Q7v1728BZ6fujC7?p+&lha~cX+OT&b-bfuMdV0 z71cQYVc8bE;BapEI+((6Cm7Ut=oMqP&BJ$jX4eTv^4H}h_dG=RMEW8Zm@9-)<2(5?kJ$AJXK(@`+uzbeXmRsFT&0}b&robZJjllLbt0NZu4a7p z1GrA^z{8uYF3rUhuTs|N-320}0`$?cP&aUvI4dMugD}&3= z!o@VhYeK0l51)YI0*pEd8T)?8!c$om#-Y&bRPOB(T#;M!GR*-i#Uo?#D+^YV3=L7`9Atxwv~>$y z3)nq&(7<70-E|~^{eyD3>`N6U)n43m(!cv5#9K8*{eB!dV0$4QrM=4LJQ#aj(RNkE zy{don$H0IpE**JRa{4?Nw8|+X{k+sgb6zgr<}Q^B>j6wPyruOiM!-> zOUT~M)u;!U8rnf;KKaAgh02-8T7QzOhZ4MiyTaAdNTdlO4Q_rIDUFylj?WwQ^NHg7UF0`ONcuqZ=mo9@TN5$4_w3ZU{m^oQ! z`2*f?Yc2I3vU>0<((<@`>fL8D5b7*($bA0g_WdeLMXx9@tH3cltzv!zd$G=6QL2fQbG zXu|sKsWqrmy7kZP&hR~3E%bYKy;F#6W#d21-t)n+#23oVv@jIhjekb;A41KZl^`Z( zJgbsz$p-{ELHZ6Y-f#~gN$~;etCs0zk+!pSi68W**yypKpS3H;AT;tE`uAPG*h9h! zq5j1m`>sw81HV#7L^7>_8<*wpV8Pd@$$;(4-Y1d%zsWP_i;$sTRwapF37{D5zuTn2 z0u0E)not{pKLr1Ts@RH2Tp(1Hw*oWqeIfb!r2fu_jb&{u1;-JfX@%g||0FcCTo zjPS|d$C{B~Ry*nzz(=RL*wqn_b3BHK6@~n2dFd6sf()Ia$vZd#JQ~kv7|n?{=u0>P5&aXYp8-#Q~_VTx4|r8R<&NhuiTc z$y}DYe$jzlVIZFr9?;JWZ3k(k@N==u9wsfeAys;3DK+e5-yVmbpnl>PKp`@>vV)oC z0Jr1HDp&ET*`3excYcZ!=IA1R0t%#_6cf_{^xzYtEY$pDx0JPVVgmgIobUA2G-eQ! z_hXA(eq6!nCYX^lk;R{7uV|=1`C2a|P$8TS-Lxrho{1ncRIy692#ohr+^yH+T)LC* z#D`0n%=cG}g-XNSqhKy;xiPEdfrANz35~(OOaxkbmOPa_G#t`C*>G)x!6uWaR8FUL z&{{6swBsOGvRT_~EnG}|<`IY9+U{ey&Tyr5un~s}F7)=k>V6%e(EMmZVv}AG;5ew> zJcd75Y4JI5Y(gdojPP`6ZX*;>hTVwJd_Fcl+5E%tF<(>>EvAKx?zb!nII(LZ%UHFm z#6gdI5%~#+UxM9e$6;>JL{QddtYQ=o$VU3jxNaM^8S&IuJHSmWY-Oi2R!THt*LK*EkEtFf}|@ zA9h>m*S7nIqeq`thvEJ_2NV2#bKWr`q%s0;u=AAYQ|AA=G5P2^`uk~x3*7GNH5)#0 z&_!+?>o$OK)-r)|UctEpN(6)(^k`psc^FMQ`Gf#nALt8vcoX1Pr2D0-&znMeZz3|+ zPiP?j2s!vTBbTFPL;J=jq7ke;{vgdV75Mcps72~@=5SOUK3Y*Cf9-b5>av;mdj|MI zc+wD%4Ds{e4vS}FiI;%YcKAVxgD%joqlI%aI2&lQIQBn1Nq7nxn{sxGRA>iM^Jl>& z13iiy&nDTKWWX=?Yw02I*)>QdQor_YcZ&DnoJ;hU4#B@*@KI`w04SGC2s`*!vM7G^ zKe_t&Qw$MCh64$BvL%5=Ps&u{c=!-9# zGcX)nO{54u^3Y!KfL>cdmdzK?^wf9Yc!9>FpK6sL24Bau4CU1_&#Rx!<$j}~ zL0<%NI2&1wS=r)M02+D-ri)mQE+%J4BeGr7K7IN}>tW7#|o7_J!EZHCx zMT8O7z2$LIRGpR}tMmVWGBmuPnT`n9JIfY^1=wEiw>;8?NV~j7kqa2i;bb0w9mB!- zQh#Dug<#2-C^tvZOkBE%8;d9H%qHjlLH!c+vcio2$KbfR8hyjX;@n;JW57DW93k4v z*Tv$+#2o_kiYvB%&mg?Q83KYezi@ZOg6iGm*FeP(HYE6ycV&iDB= zCoUc}-&QjU-qwqsCV{HZHy$VZc@C?JuDNwc#RKS;$$8TWvf^Y3cr2T`_s3W-IGD4S z);SCFKiew3&u6~M;ZN&$dTTYD<};o|l){7G7d{?bYYx85EebDrSU~s@jfpS zbDaK~z1Yau$Ohk3o-Hw#5Pskd22eRgVC9O>zADYLw#-6<-MSk+8-ETcgoLfHk2Erw z0lTjhB&td?vf_Z3DXP!#qM&}-6b|oUFZQ@St$T2Hy&K|{JVql^_6y6wQI;Ac5)7I}h5zs3zOSq04RJJjmeRro7gajd8 zqCXr{xsR8_=vLHt8aTCa8*#Zq+06cJU#8m#p99hXwxQuI%6{*E#}L{rxGEw{)l3yfQc+L-a643~n`X}M2n{)SRt5AoNS~)K55*Ua zTDm7fQima8(HmFcdI~+*_e=lLOMw7kufocPi-3EJpU+{KSu`%jSh+>NoIa~^z{^e~ z?*O=)@y>x0s`rqKrWIGr{hX{X;B*P%=h?|6kqAv!r3DMj9$fW5+1Fq*8Ua?u3yh?vk6l6E(oZls;{LiFiqh3BpaG_M8i>PA!t z-S^9-4S6z4rUmW8ow0-FVx*+)}i*OQd+~g!K=Nix@m052~(bdmfR(S@#A>zH%Wp8f}+Cmr_(*-~0A_;g*2)y}X=5 zz#rb3J^GlxlPkYrnD%^nd;6SzVf{T>ip~So$F$x1Z!v1}2wGX1B60Yn%_%`C&JQkP z#7^~AZMQ&8^&@N2HAU%=W(nU9C5K?Dsr(b-jA}P@#ikbsk~Bc(UvBm+gr5nQ3jV>2u!e%8P4xOH+}xrftYLX7LlJsp{Sn`>xKLk4`W zAY0iQRq;tUj{qo?=94E$4AuJwU?jJ!<84SD zuI;sakA?iWiei+RMU{Uc<-!uKjGKA_yfnzjL;3^t4wqycEVzha>aBk1V3}(1C3w|I zr&oKx5Rnm^MoCz)x)}7x`-{|F+Kl}gA495Y<5T?vi{UnS>DV7vQDPW9>-d|$I*~Xh z>|mklW_Fr8`~gc<(;XgcQ;BEGZ-cK1^``ksSQ@kuEeBh##_i7E{oRq<*wlb6HD@$g z8vmCY6VZ~JRe2F5M;5!4wg$T*^qCX_M0k+6j-g%q8#V`(@L{`dPmHk%_n?uFHB<48 z0|%Br#}(xGWk5HF{FdBGihwM^M#u14XZ37S5^$-P@u6qrCjx7)0+I+f>fr~xLikg2 zlkuNrgKG=xR6i_IUtZCEu>z}b{syXWpBQ~MvWIGkngq0O9%ATn%A5*Ly}q4HJb|6d zbDeY(2ojfeu=&e8d+8BD&S-(n+%2P@q>+W981T{+6KSSpRBuz#L{HeKeyL(DEQI;) z{Ey0G_Y+YVA7EJ%WF;%QlM(3c9i zynHr7whuw_vNtV0f*J!~pmR|f2GTs&WMcCUlI^^Mq?W1^TUNpg!ra3uVcH0&oV7-{ zKuTG3%>=t^Xt=cC`CIn4P+K(3s9iO8F{&TV^;8^|AZJN(Sna^fQW<(6{+;^(6YKeN zOlh6TC_`w}te6A^MB^^2zaC#$*M^E0koI7F2lk!XT;C7Os#oXK$ZkjXFTQP_=iyO5$wXugvw>kR<$|PqHW*-&gK=Ig=6}S(w25k3g)) z^n-~L`_h!l!yw-JPh=}hUZ%I~?@!HVx*{+W@Ifgi*_X^{u)gt1CTtpZWsjEdTmNjw zpnwTdwh);4@fg~3*M#@GjgLWW@_7WRKu~G|#Uzcpryz(hr2$Npz|9h-f zM<)-b8rz*dP*dK)*lfP^f&sCz7X20t{u){|$M_TI5GN3#7vyxEUmvb8X~9RCJ*9PvH?$~A zv{w;^;;8>BD53lIwB<8A=wUy8;RVeSpBjEv$9!ll`G-1JUYl_CJbxXTmLWYngwbNa z7!IoNq@gp+UMNByvioiY4%4Hr--=o<96U9H=;ZG8Q-FGW_d2qbX<)uu7tctA6!B`C zT63l)w^d(s7SHcNejurkqr@HiC)atoEiaJ>m4Q<*XIO*tNa;e%TSPvX5u6%kX?wW{ zNo+W`zq*3ItnkA|kXB>ZOrgvwYdivV0mi=+>iBS>JP(K z=aP4fK$W52!!Gmo; z)Oksrq^DYv*1Qr^oMeD*Mc{#>(e9hH6wd-I7&LZF1^ksi?u&(Wau!nj!pJy=$0S}} z`fWYaU1srt2$1c_6J(;owu*$3y_5)XrFD=ALu?>?rdfoI5^ zUMF~aINo|ripyaBHp~=J&15lAj=Jm(%8mxRLTn_)mTVnmNE`2Ho4y|sa`2)@ zpT@xMqSF(Mc~nGNFI2*hG_wzcj6=e`B*W= zyx$Ks{v{sAXrYP^sHbkF}NI>(1FyfzGZAY}c~&%Ubqo%eMLU z{tf5LbDrnEuWQ)-9?(B`q2e~2FIXPVl~DO}9}&_M@ha{JC}k#S%z?L402_iEjP7?^ zh8|&&ZsfkG30n+F7|ix^C@O|$_TMLG;Fh!1eX*VXs+@xpFgl%*0Us_JyeZio@o}U% z=GUVoNGIJ@Q(744#ZmUQ$%82KevZR<=$Tu8`@{|VcjS%<&{7YF2zod1@~`Uzge+Jq zSy|9n5ymy$T&(zN)Tm5=32%)UdBvN6B8C&OVCj|rrjz#IArdu-HnE&Ce{n&AyY3JS zb&~A9bzGRpMtqyjrRi=(Q@&)6jTq!FXYc*1m*({tHvN1|+8?hUT$n%ZbSv(BsihX_ zd-RP6FXG_?x|s{_Z`xE%iXQb_Eo*KTRMOez>HJO=$ewRnTu1{Uc$AQO^w9&>lKAWi z47H~sPkE^GtQY*1P{Y;I+X5gLQnIV2HPk#e8!CSSA#wBwsU+U3O2a5gPP1 zeWg0CyQvW^N9w7KeWx;^0x~u+=hz{ zsAHC=XQO~&bJ8PS#cKvB^&8*CGEdy+>Op4h6wK0iMLM-b|79@t`1Ao^Tl2%zuEEXQ^ADCelH6A@#svsSJk)tE6g%VrmpUJ}M2!4x7;xwyIIFdHupZziG0~hTEqi^j z&~9T7Dk&9@!*uL}1lQkfb68sp^1XN%6(;gNH*osQJ4@GAxlL4aEc_Q{4or|@#*44y zaH@`b*l=dnH_Ov=$f=2Q0UQiOB+OeUfOD6Py}RwmEh$oSuy;Y_!^}PYs83+TGL+2k z(Mk*GV`-cV zDGB{SVLxOomLlpHaVpt=leOmLejDOZSa$|Vo_4X&29JmW=+4jik(wp1__dY3Jo|fW zFxZqR{H>q^aQt#OTzMxD3Y&9ZReUOTvNl01@z^SzlD}ol`wnPQxvH~$9u{VlV4{xh zug#+*QVYWcpuRw4%F4)SzyT*|jhY&QPYS4_t~jWI>O=J|?LAiw{}q2%rcHQ=ivY(~ zmH|jwEQSaMvZC2l#c}`Obx7d%7b@!@-#$6yXM+HUMrkTaVLPkcl0A&M{fz4%(hH60`(JJV4M|G(D`1P-* z9YA1{J^ll`HoxDoAdR!8R+y4d{}f>u)!Ee0V`kC*dq3ST`h;f!(D3PXJ*I$^jZBOUV>G9g|_&wD-f!vY$1_1 zXK2Kp;SYuQ^QECB`UB=EyNT-0N2>??CoslgZvA!dZ)|vdpD~ly?WRJnQ`^K0LXijY z(%lWz5`cp`NLE)ABO;KnKMUsaggW=?4~A+}M9&aP(o&-}0u+Ka-%AzIt`CB7aPssm z*AIv-Yv4x|)LmT&Dkx(wflCM5@avtx64LNd=ZX&5-s-I6q0H=dSk50-Ghst)5UEw* zpHiKd^wCKrF^#Yc;Gz-_5+=ued z2~-ctC)O;Xl0S5cqD1dM$yi}85rJ?Rdkuyw67P^nW&>>Q1_cBz+!hurh4xCx05w;x z6)>43RYzH7ngDg&P*@{H%^pUma65V{?sB80Gd@c``L5Ln>3%3I!t;vw-tg>n6pK*Y zrf+u-^PS|;2?vV)8Dy$qz|~{@(%rezuK6Y=BhyJ1%a6~=%FL)__Pi5n1=5h}Zd-it z%$^+iCCh@2I3>sjFFZAf3t*}?Lv@!20SiqQ&f|I%x4n6OG50Vz`ktuXluU08!?)Ws zxpApoVBd#54ti>0q8p}|o@E^`WKsC%@w~iGGdmVqu19`?B(rbrgK1k>iK^Cu&SyBr z+sDUdG(TY7$3#D?Et>NIlHaD~8NVc}WS#@}(KH5^m{C3j>;0(RMBTd#JdiF0?CX!R z&G|~n2OQ2C$@&o^m!)i43{p+*B=frHO?|asow;RNV!rtMYTi}|J-kW#S`m6_c^lo1>^lVY6x`{biMn4ZAZL0PhBb#x+)qgYIjYc$POB z`qt5e>iE{)?)jj*S3bCxLRf%bNlacU-M%R4`6~}N5<$Ou4UE@pbF@PJ!?`clvRMHn z-B`cNb-yCz7!iZ=Zh#nH`xmooqhci!=R6sto@H=@uwyA@`Mdesn9E2$oxXe4hc6xr zHoC%UM-kV2`HLV;r-0p}b}^ks%hX!pr>z7yLdp;oHzX3w#D$_xR z#Fhl{%3e;gwNCwPA3tY&4o4BCfmMLRPZZyL$5lDP=sAS4IM*e#Sz3l8%`e+Ja1B2P z9}W7rL?M-^FUI`^xZYZ{!s@Jq+_0)DeL5sv`I;<>h557sN#@9sJFCYYCATB~>61K>u+UN*1Dq4OlW(G4rv;2V2 z90_XqJwKE=C|-2~(-dEDMtq>#baz!y=bU^*Y)BK6erK1Y3u>b^4Z{wL7U;X#Tv^j$ zm=dUAJ)D|^vJQC$i#9sxqnBc+VfK?_OX4z1gL!Y|Fo?a+phVfOrf}ci1#PntyL^b-XBw z>A5+*G;K!82@Ws)?&hZgx6=vgFPlT>Dj?Uy;%&cQA(;%kl8osj_3EX>sb?<*ip>HS z_eBgYv!=>O6L<`!uiQlP)Hqw_hg>+n?~}!BeBbl)6VJGd#gSwslFoWT3ay{5KN;=? zjByGwZ7w7Z9B6=}`L70$#A9kdm#2gnQ0Ij(GW}3Cc?ddtjD6-0t@wZk{Td6;NnQf7 ziG}!hPWnY#BpL0B4ryS5+KcK?>t7)Lb?yZ*gp?79EBSCE0bR~ZMZd%KHwEb$dv&W~ z$T-lkdXldtzCNoVvI}nacvfvpS;YH?qOyBlR+Rsh+XQ?l)K7tQjyKoY{y$!6TOaNc zaIUYTAHhAVyHUJef_nO9X+s89dOmr#xxJeYnAV?u|Dg}FE-dE%Xl9oB40`Mi1;oU= z+_m5hjsur|pHZb>s^09SU8;INx|`%A1H8~M4s+vkips_)!qYS>ol)L-{S3>Zj(V`^1#HKOV$wefn`GZmIALsdP{fZUfKPTs@OggTh4$M#F%@oF) z=O_9>p_ZH~kK$sKF;0gdvV(zjXH78^4sID~k@oQi&3g$`4o>N!J|cv*`eQ%vI?pTl zN1UNWAB2)Rc7fae2XhA=$5SmtgTFSXjKvDjoLj&Btt%5;@k9}7{(|_`T_02JR)p?P zfAkp=O;iSmKHC35VLMW)X=ljOwr}ChoQ}PTB^w;E?+Md{#+?k3rQepgq~9X7n+(d* za*h`+g~Vps=Ft;T5tu62xEFzr?uKB7St`+ucOdKz)%Um|_)q-O?uFM-*R9kwg_t0> z?^~x_Pe;* zR+yIJiz$~$oB+&+mA9&Lcw0~bj<;O$le^f9BT}u#`df_Tt6qsr#lSt?A(~V*q$NLt z0F~8F4?obmh<}q%knfBaV~8*u!q9!yZDVP|#KewFo~P}&0q+bG?+^r!y6)MKMD z9bhJjG8kKssm|Ox$1=^wFn}9zviV0(?1$TSjbIPA+4ufqHPoZ6 zk-BeEg(aW-m0=*ae*?}i-<%3$DB_OTbPXl#?crfs<{zuadIGQ~TCvq4ePEA&Gv<`8 zOQxDyD%TCh;cPPiziSwR`*K)zi>)ZL9W-PV9eIaVSGt|5X(-dSmhe@rzY)I{lWE`^ zgrTNI0Ko8qMqZ=+-ky?)al$EBe}=G_Q;cidIK|tFZi1Ns(|Op-_U>JrEK-Av`ZavN zxXCDFl4?A;@&7}iG1Gj{M^@=p6E;Ia_#Jpx75>fkOxQZPonyYzRy4T8V>>Vexjq<( ztd)F@^rOV{&dblAAjkFL5_iXtNqIc}o19|=KlsF`hNM@^d>irfqJ_l&&5~l!xsv?5 z&lf?h&s2N{&T-l)*jZSyi-dO;dUpqxwC7Z3hjd9Ep-S0A!R7Khb5FY~h% zQ>-M@zan8Vw)3^_iCcB7#r{Bm8TSC^%Bc+Buai`m%=qOVnzDP?;N-+F7{AIfqJIa} zirEvvaf`cH5iJDw;1Zu%?;QQrm!41ml#^GKX*2}P%S)3g;!{lc6_5q>0xghe#=_eh zh;U<=QrT;(ll=HLuSRGlVyuyb>T|HEA#OnX^bg zTL@?DSKegZ5170)S}umivcw>-DPEKQ%kmjBQh!XjFb+ zlY-`Wn~9J&*0o-Ad2govMufgT7#q^)Phrpoky0|I)gYp|DGA?O=aGce%@J{?6Z-dh ztSm!`N~0H>m@NDd7J$)*i*|f$kCEghd#pHXm;W`Q+Q-i6{ylzh+E?}a7^uP8{Bzem zShSxsjUw{6NLsNbc%wPPxw6WM*W?IvfENL-oC;Lw;aT^g3}ZLj?wKJs%g1(S8vBa~ zRr4bXfPJMJFD6kvb~cO>o^Rlk``erLL&b6)H8o7v zU^0K+DCsh1_!ez|nLi)`Vo7m>(){XROwCwwGoOj(Xz5cp+S<$J`-O`&zK~;q%m3;z zx!bG7U?yJIS zwRTpS-|XfG)hhagW*>#$7UIVTRDZ@U-j7WDj43PSE2OxXF;W%*?)&q&x%#U~dT?+1 zTZq|jdIIDFGga&zMw_&*of4y<^iS*`bcf{N&PD;h>Kp)v9OT?&$1!f!`MwuAjE|3=(X~1yHu{he zb|!F*;IKiux3pKEXr!5L^lOarT$DgaVAqW~Nok=8*#{fYCoM!$J1Sw0?m5~^hf}q1 z%Oy4G(Xw#w>naUBe1~hx=bgB%by{pyY!{Qefk4?ug9WFDeKN6 ze^^GLvPjyvY9y#;`;!^)8?GRxvLT$drsP!e^7S=9L3)(pJeK1u-2iYDkItbaEk`AO zgNz=Edt=|JG0dKK5S+Cw21wgI1fb-zO~;Ub+PCeED=Gg!YN_gtXHErYnucEwB0sFl zIe^@E{GpG8WHFrM;;8Z#NlYO_LYYqUtmcyk7nIOq4_HR6bq-OF$21A)ZW>8--tO z4Qq0)8ajP0cUQ|b?G?gBrX@vF*IT>r0!^ShSzIFacCuT4ZxkQWBumf6lUK=ut)lpG z{ee3hWRshb%%o+9C>j29F-1P=Irz?f@LP_nIu^Pc( zLkGvBfrQd8H%Mta=;OpPOHm*jXL4n=qY~&q^g*2T>B_V2<3Ig&;@_Y6mrt9x^tQl% z%t@w>DPn@bY#X%d){}4-9u)q5aQZry@)D^u1;=2$BFk-mCvO&y6ZR{u@~4E#1Khv% zA-CC#bxA)GZkgWsI(T_#gLa1 zdqJLd29yPc+F1#OL74lOIIX1!HVhUB9OKlk8fD|%7&gyg0cD%4Yf?pFe1EtNr#ffu zfar&=06|2XbH}dDfoeM|FkQiFm<3sX>L`0LT>UF?PFcG*zTyiT-~1)s0Mp?Pu(fnq zodV&nCUaO=Dtnp&uq-WA=TUPJj{!lZ79FwR=hTxXUf`C$zqlKLt+NKfx<;!gp`+P4 zEmocHc&tBIeT!jm4l3N=2NV$ou5+$uuCa5NyWWVCJP}GZSx4u|h-t=SF(hbZS$WK(3+8$+Bt{ z2jr1CuBV^cgzIXxrFZ@=prMW)X&Z4?@@zaPv7OTwf|ziQU2c^qekMJc9Tg5n_1(`( zN+%(4e4bBP{;ZjOV4uj{q`X}d`(4(Z1W|GTlV`Dg&6alEx_brZDVmJ{4~3Wu0*#iXK_b=sGT+(|rH>ngI6xycj>7Jv zkij76$Q>BTvFdX~6vBP=7TEFnq32$nsF#g^{6#y(YLoy3kS>NbiMw(3XW+Veh!#3Y zf1Q1ia1_>s$*6TNnS z2g==!VBxVLAX!6dxK+|sBk#zY_>NH2K6!O3{1gPSSEG;Rf`bYGJNd67xe}S&!f#QQ zxeLwVberG&6Eg)3|E-C_X^toXHpW|+sX}d9EDfXX`kxGkyed-dxy87fQFgw6DLxYd zvX=6<+Rn=KpAH3w?O(Mc&yRFxB7a`FS|3WK!l$PK;;bDu!;*)pl48qoQqf4?h7*Rm zKhLSXd7MtdWFO6ds+7+ow00Y0?#HarCczzhHvGIUivgbwSXT4j+P1xbZGnQx=ioUm z=<`+XHz@q&n;!(YWohu3D@n2z!r$}(^9sI@iKGvnDOwJt7;BpZ3yE)Wt`S1^BHnxy zjG7qO+pODPBb|=2fam2+L}JT4T8mOx8khKtqjlSJ{r%nVr1GjYrHSpkbLLfnP=8;x zs;a>9iF2x2@)=>k!3P2yl$U!>s77t;)Fp3w22&f{6lCw^lu3L3*>@VR_HqG%T+;kR z2-|`=^haoT5JnymI6>yjB7NS;3v2FTzFJ_m(f#z@qqf}e;33zXhlDT2tP`kh*H6xLLArZw2?oRouT1eOl#011Ev6*e zeScab(2%`qS1+G^s3;32`oKkm7?oY$O6y!#p9YyXOUTpboMok!Z!8jg>eD zux&iy81=Px#YtZlI~PKLp4tezn8=nsmZnBtYx8j%RNy0=wIOsqko}{C%&uTkkI~`A z4F5%{qbaAjYu)_-#u0rJ_)!>06VW7;#=0JfgNG z`oODy!wDEIRLl$xyG989=n_0B-{mrNat9sJWiWrlJZXS(q_R^ zgt=F>OMOYT`F&Y;n0;Iz-z1%>V3*8kr!o3yFV1sREv?!XTze9U0fK49({tJw?Pi?> z90KXllm2B3bFwaMxr>{;g}-}mL2`We2i!@>E=8xi?;rRH44kmLBT`UeSa_5YA;mxL zgS7acR2S+nM=ovCq(87=qgAx*W_%f8%?WkwcMo zD~!+@Pr?gy3Iw>E0OkeHwZ)c$rPz%<-Yl_$AJVX_tGH_Dav-~F@06bsfM6r<|Blzu z>TOxB=4T_p9VpU$7#EuP?I&FbGVCiU7zxT#r<0H?F1pmBE7d6cywUV}g*zhy8RTDD zY#c`q=xP#%9IJ#RcW2nJ2J`a|eQJY{oE&~K#<%qkO28@rBl-uAN{xlI37PPmVy$24 zQ>m>wL2jO|Z=&_HHkojMIoF>ez>kk?MaELbM$IvOVpp{LT$zt{^w-63&Y}+BCMdb` zUA7342;CHPRoC;Dk*Qy0zB;3UqJyFBC9eQO)-PzE-HX4EnT6Hwz&Zzv8ZhziX0$gO zJda2r{Wb?4p1 zd$o{KdWBeqO=vcZo}dM{%{<=y9l{CJL|-b~+B#X_c-cQbZ+qA7lg;XupDxBi=|TXB zn4dxfR<1*rv6TMQ%JyzQsS+i#laH%ZeNGuvUZ=o{U`9s}wgPn31{OB0$mJJV)P>NZ zJQ?$Ihs%wx2-G05*u&(9Zd11PU||2QlQ3`V6w>RWHesYgbQRo_*9)Ldp!8tLt5?gF z`y#*3F}a1l`&TI{&Kb?QcI^qTm**xXn^d~_=B@26wYGS5>(YargKb{DR<7MAi?5I*VsC|E*Qy`G!j*^ zm&@Vhmh!Deju3v#?aT>pvx5bB+0rm@`CZV~!@}lZQg#4I$&t%EHkRn{j*iKq4 z(a!Tf!Oq@~($H1s2AwKP9~@h!^ryS?Zbm|woDyE0s{of6_oQWPU~s+bV|V9G2DP78FDzd4hFyEM!2 zq<`WXhXIXaHpM)o<{J!Lb&mVxe6wI zCBkiSh^m5t?N40H`fYn$2+zY(UH|6Vj!_j?*ztb_R@yjjd!CYjU*uU)k*M?Jc)&5B zoz%xODoc?8 zya2MHm7eteS*3{k+vvi_+>YBWR{Y-(KP;Lt{IzM-jsSrLn@U;CSP(Dl zWA>g~8{x@H&PvqZ65tB`wUSU&M=~b1;gC}{wybl*3${&W6gN8MI2|bi1Gu3pTZN-0 zLnya8lp!_1?n>b`90<1@b@{sn>`Y~S2Ri~s=)QYqUgUcO+u>wrOpUrx9foG8w!K*% zgaq$=J>{eknuTOziXK1r*p0t8I_wz8mlDX%kkzE$$j*Oi0GMFba-w|0bo~kHegCf? z2wwkd=7Cdhd~8!nie6^r2H(P6UYz!1jJ#yJ5hnI#=~0cUtLxEcQs~FzkKF1Cz(&Fl z?NL;eUD~BVrS!HTB%YK|5QzyVe%Y01xO$iekQkAoAXC*bhGyTzSMykUSu&BE!->i} z4dp4Yq}`YY3(&$+5O(wRmk_oV<6%E|EVVS|)rB29{~PJ1Y%xIvx1yqd1wrsdk2x|? zX^HFKEE0O6akS62PaM;{htA4?KP%A4o06Fx?L+ApPm(=Ge5xn2$(F(66f)a(RqG5; zx8xv13U&%sDjX)(2=o0YchROsHr>A%soSqkP29QZ-3pztrpz~mSk90<%vc? z_|I~!RfivRt_tFRNCWlS2SsXaOqOC_ZF&5jk`Oz$@4}rV7km+YJ{+?lXad}@s3E2g z0ZDTAzELi>Zn4haH}U8E3G21}Q$DxWeFXmS5lAUxugpf`vFsaiiLOuVa4 zxyE>h1o0SWsP5$0oXTxqcb7SV*H0021mc9h%Dl7*-4J1}UVJZ?J=@(;>a+fkg=W|R z?Jc1%thgcmq?Y>bVZ2(IzKci$$HDhu|9ZUhTm7s-TVFCdHPrH<(yuD-Do8yQvR`p4 zkVgVoUv53WQreb*VXNs>u5jH2*4ZPL5sx6c87?W`|J;tvTsW|(9ZN(34;A!ndCPKP zjCQas*&_c>e57RG_QZHUJuG5*U70rEjoUk>PG@H}=TO%AX&KRO<@?^zb+uSC`E`lX z%gG1ebo=o?+adBkO;rr664N5@w4dL4@DX&*HD6J3qn9axBu^p{q8U@`$**hn!nl3R z1h$AtRvAT}Z+S?)e|(_;ufuoxWTEFJvSes>&m>6!M>^J{Qi@`Nju$N}nRkBvz~^^& z`&f3dNbVB(i{aiP)zpmnKY*CFw>%%0{HqyMkJXUuesVeY&E=(KxA8UP3v?tH0rE(^=MjT#tQK&HjoOZ`ygt;P6SLgAF>6 zium;prnAAySo#;oF-DffeoGFSBMvTeh4TG&=@||92Ls!zkpTVQyl*?ClT`bimW;e} zv{F}-0rtG?O$-^?dszh>!bJnK_9rx6W=_6Ax3na1M?8X2hp>5?gVzYrU zy!K3IMH`(t&`3_HH_;UcIJ%i8p-({XTQ=fIM-benLBE)w)k~`5!vy>b&aX}Kvf+}3 zoA3Q^QiehYC2fzWH3}9A(Qla6GZ0mtyl1IJOPls%xwU}xR_xuLYqp^16}P`6w(M^6 zgwj?+6N_S>^87&5n|;BM5`bzd@e=y%2uPq!bAx=AOH%^k38F) zWLTkAe?-Y8UEBSyGP@F7UO3wn1*tY1vtS|9;}**(VPFB;NV^R0sd3`a!g70yDBmEE zjer1$4)6y5Eg*?*{jCNWBrtFG26`+STjw*FyHDhv^Q)ozzzn*__Z|up1cyaG3|weu z{QQ(0^^lAtmD;f8QjZ>K@J3D?Xr^L!oI~M_AcSI9TaJU0BKmI{z9lU5tTb02@b9UC zI>Y7g!ms}t8zSRQBokbS= zx^(^MO{*e+xj>zHO%)i?AXurE^1dMQ}K6XV6!aMJ9(fhuO(63~#$w{<8bm z#mpfoS%sNoYQ9Kn3D^&Kg6XKu!b}eFN4~FJDKCc(Ot{6Tce`YH{w{`X6|FXv6H8EQ@ z_#!=&0WN}jqubg|V)-gY6tb+OOKdniV?1`j$`=&uTCvM%0QuvXt_DV`-hS4$=>XDj z?LPK*N9gis)T=OAt7+;xKj_v*{NWSB^_Zw$kUXb#AF3WdpLx}`1$2ptnIm5Wm|d7c zl!JS8ZIrIDig=69%uT3!?lt9Q+Mk}NQs-S{cPK0mveyq<)T7?$qpb3=_f3*yH zQEpn!!r+GmV)YQaXC~5RIxce#n+;0GtaDornGSLo>wh%g;VfMMcY28@;a|;dqg(!f zitPiO|Gla6@-3u^P+cJC>LLTvbJJ%bDCJ4C-W?JrkLS(;7;be~Lh^e9!&B<97jytU zG1TBl(P=&*wh*qJgpxW2Gd&#@S=rM57S$d4a}-G82%ly=b^sh=seQ>)!avRT2NnnrHau0x?XsF9YQnj4{8L4H0mczTYEP`F+Z;qEnF@J zep;8PgF!~QMe)c`24(`W#)r!qI~ys9O+_@iIh+jqm2LtR@Y8Oww0-ywK>V@yJ*`PF zS8#xd`aJ?8K~X`#_~qSD!I`Bk$EKMI{+_j1inC+}E!mAeRFdkC zvB3i%DHuj>S2zuIFxo_8fm1|s=kwuaVw^iBtOeLK8tH**KD1IRim!h5>w|nzQ^XA@ zq0fHXWGdneUv5r%sBwTh4V>K9(M)Mn^M^L3wzk%Iv_{4u4EdkIj#Sg``%_@Mzd!#Z z7T3RlsWW4j0)}+IWl1F>U1A{A7%9Q!UI|eAThC;AtAHE%b-zAN!yxE`Puke$DzXzs zty#PC+z5&cKATA{RQNWIe;>=+#i5^Hthvjq6|k2~)kN1F)`Y?*!g zFAXTaO+Mphmy=alII+g!UjSA~itwD%WDA@xD+nyEf6%4ltcFJ|hbCnPYiC3FO9478 z$D))Wbv@be&LnTRURn2~{r$43c>trZKu(tz7ElX4)Mn!T`N!PWGPf*2#iov{MoO)a zP9u>wIm6^Vo!ynMAAz=L2uVyEb4cL2VZQHPw<6s7^rc;SuD5J!A4C_k;mA$v{gy%X zDSbaknu++zTa{yX-Vb7OzDx&Mga3F{x%Qh{v$DTejGy52T}se!b8qkc+l3jBspF(X z2E5u9gdc+6d;RrxwYM_6bni4!&m@0)g>4*#R?aOczzMk4ma@vE4NC4iM0`k1U!o%y zm@}g-PrgBii>#o578nP8q5jKFfn0qH)y8z3GiUG2RYfpDZK;~Owxp$F2KZ3I4 zK`X$UE}Y6C+I1@_oz#o zuvHQ4d^2I6vt!wm;Yq+MCcNwe})idU-mx#C4lXi)sVW5$-Y*hcN{Gj9h{o*@4Vz7kg=YY z0eJn)KvuWLlaRJZZi?GH-Q4s?67dy|SS^(bX`m7#S-rS1_|~spxJMDlfwna&T=yC@ z-C&A7OkgCTA$r>#cRbAt&{HK3r^Nz%G%DdIBiu=M?^|Tp@HoD-=iFhlo*Qn0e+I_r zms$oXQ+8^LxPP)z`+XVq?r}YaYBQg7ZB-?}68}_&1)XDl=OpADJ0A^KSMyAPu;BF$ zdY!@NCUj*Wf9;#nI%`T{qo9vA+e% zdiToU+XE)KY3ni;pKSa|`@6*gcX?r#a{*gLWcPSNW zr~yX4AAjy{x%cyx8d2{5%yotT*dA5rpv#%u&eDr#egpD#$-X-O;Co~USZW|CuSUt< z^{&l~3_RDY@I;pjx(3w6TrO_AIN4-4UnSwLYPko?_mn@vo_@3NvM!2M4+bS7DIG|N z24%|hd0u9|e{8WJNb$^ha^F}8dt>| z3s%n85$&o1Z=zEk<&iSmy}$5Edf3ItMbIu(6ep_)#~Gy+AyLS{2jcX2&0hguM<6=$ z7D)mMr7{bhuO~w`Wl?JKKWN&4EIMqfxEU|&V*@5Pv>SM9y*hiO`*;pF$ zZ>d;sg7Ey%->JAjDsnT6iP>+vZ}y+G31%I^hubG3ZSdm3qg)eLoA-MNMN z{huN(*qi_Pu_0zvyh_uiDo18R2Ka#48nnL-?43b%wTtTvm)H-}m)6$FZdDj}tXIIv-0u zDTjn?5`(hOzlyUPp5P*vAmOncTgNX;`zs>_iu*os1fCx9zWZh32>kj#VipO^&`kY- zo@xWAzjwm+Y}ZpV7pcJQM?iu@UCD4Om! zbV}mg>A~*dL1#dUDBUdM;bsz=qni0?yGN=e0&L zL_?say{xmY{0qC=QZmx-&7Wq(;3-c{+sgatP^5(d#jodh{>sw%n)+Ev_`TU zxZwT$Ir{N@;0whr6}pMe+s!xBaSWd32G*-&m0w}?_ZMBJ6G%;Z;E7Q3F?LhM;}mlmWM8yD>&sNw z5_nIy*zxODD-GCP<@OItzL_G}1!U|4#(u8g?`o<;0XQ5+|4R;FoqNX#`>%XJyc9UE(?YFBhXB~iwNG2M0-NAj?HES%HP zpnD9f(KRz>Br=mx>8%5-3UOIzi9%TmMOdIwkePX<5*q(#*^#Il%V2fs>K_8N6z zmoGRkUL?^g#9J^?96DO7uY+&@p-+%v|F!Ap=+FrG(-cyg!jJlXd(Cg9aSVvz9bLjMUrAQI_nk1y-RwMTPMvf`~UytKlgLN;G^+5FoJ63LrNump?emr>t zCYx;g8fzgx_10}ApN3_tSruKFnUivDcC3Ok=3yqloQ$mUXMwEMYtV|6YH$q6AeV$q ziln9I&)}SxAL#sp@gy zFy8>*;Wa!BPdUFkMzsKq@?>T_6eJp-v45L73cO~{eAhs))!OX0hVXn0!r_11{zZBi z<{KQcP>hX^;_R=*qI2Nh?mq!Th?-J9(LlrpL$0623s*|zW63{MZVn)>S0lj}TbWye zMnyg=CVx$O{uV?6@}N5^lmcG!wuX;f(dr=X7j_^=ENd%~m<<7M4CJ|;+e7MNNWxj0 zzMsYZS}mw{cosL@LTz?*-}f#m(J*F=N26c`d{>h8WXKZPWq>=_%7suCjhCM_-()`# zC*h~Piy?_oge-dSpV;a^8=!^p8OwM-dF3EU5&sA}NS-VAQ?(poxhJCk-CpnaMBq71 zCQG$U{NIpgwpfeVCuJ0>eEhq%ED`%3>2jNGN1*c#I{0UMtf(RfO7x)b3w|5A>n^Kj zY+HbNL$vX7BA68GU>h@_b5}aYi7$LfwGN63vMjRpiQCHycgS920`jaE4}5eG1zal> zJStB}%xTxCHBv}1o5X6XgUa9nO!g()N?h2*Na~UO-n^&o{VR^ycelYKIW2!IZC{iC z2Ewb$0b_KLW_gvRf^e5usoqU@x;G( zdV+@g%244sWd|+5qbKQ@R>Vvoy(OfWABMCBlD5I8t`ObWu6?|1K7UuRlXhqxlpr`j z7yI`<>XP?&M^Emb(0(g?hKfd#!MSd5Rsf1BdHetor)#J4yV1z;gzpxm>ITmli5?ao znH|884wQw_MID9>kty-_6PQ?i>YO1ll>dt4@7)fPpq z>yPbJJ%vwIkG#@MbfBU`({QVp^5Lhy^ZdfraI>hPa}QZOcJ3tkbLqW^AMnXxUvE@$ zF2-zVj4BIwPJA?xxh6g)2@8|nq7V_92LW*;HA)z7s%N<^q9=oYXY#qIO(7eaPtFNw z2OfKALA_@0=b`_~!WKWh*&wI?F}6pPdz(Yw-hZ)2@;K`Ec^C7kG4VRA|K;5e?Q*V@ zc^z}Q3u#B?qEv0pAlhMS1^uMY$2i1Rzw?_C3eG;N%<=w14r*<4H6p6w@lxZa0<5z6 z$A2irBgJoJX%L2P3R$GkWBJ{nNEpk0oiGyrkD+sJ3xkWp@JY+It;N-{Z7eUFTX^%b zZQHhOtmRtPvaRLbeT4JJ!F7Jm{oGi9(8N;2yKFD+&NFoP=;vC-!*#25qREh?a&LC( zm)bYbd@_^KcqP!EBGt#>S-?&T`E&R)&c^^7#hO0tgFR?qg0Ba+E`OAtACDB;^V8Kd ze=Vq&j^hEYCu5=&L2Cg7!>6rx4*$uCi^`q*S~vqcZ3B~dW=t0<`e7qu1iuqJjD6Tk z37l(!bK>*G|GZBUt>c0MCyIan-$^W|z2+47rCQYJ!N~dhI32FoQO};FM6#PZHpe%a zBH&V10@WWdJND*!f3Y_IYyGyYuE(MJlDl^$qBw~elKO!#H4hCma!dXpIxrbDm^^Lm z!9s-D4YT8g=*5)i@vQajS3m&Y-)EQPq7rY}5^C{uFhZXdnTz|>^VT%Ip%Ffpd69wD zu4`d&QiL2QFQ1pjB<9)y)!}PbaNVY90tUl_Wb+{7EqO8S0)2I=R7K4AcBi;{~2QI8eo{ z_8-4eUdC!TRgAZBg|5Ru)L(v05idTztu2;@^PATYi4*jR)MAIxY;dKaZ|gaDX#C7M z<-g^}{t%{m^CrO#4GV;P!?yJALe=|3h2N@hE_Je|qm!@1*iO}fZDrtQH24PbI*+AH z;A|-~F_Pmu2HhuM5tnLHp|~~2Q<(Fi8t5OV1)3J+L&NL34RVG5E;`Vb+$)robufum z5kXO=2S)s+K`+0)xUr8c$XBZA-RS<>kzF(;-4 zRG8?yZzqjMiynx-dQMm5zz?;^H1!?DcynYb^W!iW%J43#ri>2oSS=o>|SPIQd0!D;&=~*BnSJY zyIc1;H5S*Gxu2yaN`&+i6|V?+tsMd95`jg0Jt9BCE6U#0o$wPfynGT4-qC$1-H)4+i&i)py%9{99wX*bv}1mxc{$z-x_kFr_h!XXjif?bkN)Y4Yu zcN)%M=EP0Q?&WNM+D&k97;lZ|_thUha3JE-V^O@Jk?im>GKeA4x5Qf+2iK8nD7cP( z!5DWR80nm8Y7d6E2;2Pai-;hH!DR_IFIzfMocpR^j-F!7aX;ZvZ|Bk|Ke5U6(@k`aCj>Wq0)&}Op~6ebn4#u8kby}*C;eDfysKw_X>Gya1lc7r$ zScaQ36m2h2Ad^^BQzSwd=FAyfs(^6d6J)crYaGWgV{4zVJs0s2yi176PxL{Ll40S< z7z((ryvpZ98{l@oFs!N9BpK-g^eyMML!5=4;`QI0A8wcpf;DgHLpjj3bR-_msnBwO z4^{3OX$4<@9tzqUbf!}%DU?w`W%Z6CuDnVEf`2Mt7@SNFX%Jbt#+lnMkz;AKVI_&K zsmKx(q-m+?4LSye0bi|@4~@&V z!Zbist^>DFp3t>AA=FM9Sst8KJ5;QKt%u{W=cJD$FaaimqBjDSJq9Jl?Kx{YZb~9B z&Lu5n@V+;)l&eHYN`fAAQW(moWt)_pLp@A?O5)P7YWE>IPTJZOF80JdWWYj33ZW$` z!6&BvTWTFGWf=)huo|f}#Cw1}0yf9k3Dm1_v?T|3rK7ZGAf6i~48jc(eOp2rV{(gQ zZgSmV1Es}%vG%zh>#WtGZm$)->LnL-%91ao{M(tXorJZy!0n)7XwnfCRq5n+;gr;F zp_D-Q#6A@j`ew;u=!}Ifa19J^ZhQ7JXm9da7gXT{e@#dWj9@Jw}6*99F}7UU+r`-_ANpUQD4d}x#BtqJ}RtE)@>9!2(YO6XKn=DT)p7{cWdiM}mR`l^=3JJG zx<=(1f-|-1w&738Lj78uHI9m0DS*bn%=B$n1smTh48LjJV`~2vMs;=JFj-S>kWVhPgDxUN?mVA%!auVFP1I_B5kPbfuKYa??iITI{T)u+H;t0R zdB`f>z{bZqwBTjt3&6K@rmfA1$bm8`U>-WPDbBAOx!)zZxs=@(MG;6fIv;!vKUq|UrCOM`g#R)_3O0$;g(^*&Sn^wIOPK1=n0Mx7k znsPZp_Aq^nN~qt`Zuk#x8V0KMFvhI4GW80{z+%@kOw=mF>MNybHth;)yD70NC0n(! zW-USE$CcTAP(pB#Q+q#7Jdcz{SGZGgOeBocZ(dfp#^;~8u z=m~Q@_vybd(&@U)Y8NRH*+#8}doA>y}Sh0qFEOl11HpUozkp*z+5Wt(pG$ zxhO2~sunb_TIR6!#rv(xg}F6rmY)yZP|{ZmxXK%V(_qd^0bkj2#G%c5`72!{>_Pv{ z*+;9kWslFFwexDgJRW+i;34+xWwPB7Sq}l$OaKmTb>g==sWt6G>C0jeiv87{7=>!R z-6}G1(0Ex8A>zv)Mz2_H>=LC_d;dEC+u6OFUAxLmKI(KR|ATuRb`O@|gbboj(?Ye6 zQvU^bxK=NVEt`@3xlW<5&Dq78RB(7dH+jYPx#S!ZdsYE(kr2b2blex)!f43Y-z#4? zMh>U^f@hpO&?NNAF&4oxiFzVp&hnebG>+--cTRdjO7#26$ zYi;Cj834n7uiO=Gyyn}tZ^p0(qr6enSwFwP#!wN^>(y>~y;f5*hDqq2CoKEsAURCn5O6}X-$Vv`^bsMqKoO}2g}kZib4TK` zvY}7*{w$Al18utKdNf9^w`%_eWBA){klcQ88R6WT2ag)`nXNEtpy@zVRpSZG+j0py z1qxlq)4fp7_Wo6+szr{+a2;I)c&1TzS9;OOzfq89(`)_?v`=6wZByIo*}+~n=xm_? z6X6|o1WW$}Bh!{S7u#QKSzlft35^NpnH7VqZ>$AD%toe(%!eI{T@5Cz#H)vH=`9xW zI#iLA^pAVW|KYU&Jn7GvG0>Q{w=fuzz}n)yRa~!mt3>*mySuMxKf#Tu<~&G={?W=Pk*sXIgI0GF@rT3@*+9b2(f9=F$opq)i8^vG5GRC?aJ9r6{W9p zDPUPmS+Jxz4h%;M`9!Izi@80T2MRewe(PB2C?d!60ZU`@*6%|X0+M8hd^$2}%Rj!b z^xwP?xJ&S)2d2mhf(C>uR&l@WJmJEkVRTBzT-jmws)lDd;~&*Swa(}ofHHg&VkFT& z3jVZ7tB7g_edHBmWxF}Tb`g%o@jy5S1Z`RmNJvB`cBbF_sH zg-u8SZ25ymKR)CVyLV6M6VFLG$IyNkAmA@e*YL~C`ubJ}vPm9QxUk zUr0NW`;MJ$)sb(~?dE+y{Kh^~b$6MCM%@m`LEiLSUMPlUH<57an#S+1J5h@{E~6g3 zAke_vTdjlhFS9mn(6|0#^eo!_Q&$LRh>Glv`oCE*6DE34Vi>_Rm#YM3>1nCLO%yS7 zQW-pi3_kp|2bzf_o1tK8I)V5HXN;J2X)ByI@Up+~l%L4GX!+aD<_+ye0NOPt)30ZsaIy7gcdEe( z2u5X>KgXwIl@#!SwtWG*aN=nV7zX=~}|CLb{WB8q5Rn*Y=0tHL6_!d_2CuMiy{z$I!`I z#&wQWLHMJLiH%GIyF_1#{3)Y0P`$sfy#g%E=(ppn2$3PI{Va{%oU67P8Zb!?XjJE5-CY>2LFPAREMj7=YdT>i2&$L%0`SkT^Qc% z%2}db5Not&VZB)2iuzL60T7<+k|qy??@ID+Ols$3>Gx6cPMoM~_k1V1aBe5-fLjXj z@H5J9%zkQo(|OX{7;2*E8%@1Mf0DcqHWMaM0Of@=mFUsDI{93)vjY)--u;tZjoYvp zv2FW6auOv5yi93D#7(z8Xw2!7`N=UbO-({|6ro^8%OXrrIN^+f+KAp?qYwU=nhjrb zi-kW1qXl(zL*_F(RpWhbG`!>jSBSlz^bKcTk1#!xUDzp|CP$gqQsr!O5%Nr!rOxu@N`N;Y{sGI>}{w z!4%M58(?3$y0&6ObXN4^)1bXG2?T~NR8M8*Gx}G52qU1(s|B^d)1fYJM6xgy=#wmV zJiBIY8#7$Y;X?`=sRqj43+vLZ(!xVaLP%p7QHE~$NTb=X=&vR^#_)QJFn}#ew1oa7 z8_FDB%Ub+d_+b!)3~a|i@whTs?XBwPGJsukCsa`T20QqO{w8X}w-gptg7|~fp|Ue+ z(Kdx=6ew!cdz4<@r9Cq@-t37F8J(HHwiR?iLz#)?i^asW?JPZrTO*K~b zHGX7?;;l*9R*35jtM*+;V3qHnhl0gA?1K+>t;zPZtQZy-$$nkxm{;$UTz1~X^mQX}r>yyY5d8A5>Z1dnDR@+!j)K% zKHi&@tD85dzKSZSBjCqcIY77%r|1l*(&1GrzmE2DFFDC&@Ctg5A za!MC(M2k%A>Nn~*aDUPf>lYI+uxpYxg)82C=mGR^`hv_pwTV>XT)OLBxFU%JJ&Pc{ zxa$t?My=F7T7wx#b-%aGFjDazZuV7T_(PFwE{tOW_B>b3F^_(eX%Jy$crxIyo23*$ z=4#-XM`N*EcNN;5bM;aUfv2S!56lIEL%3L>d>-9{r0t9uH3LMdbe}>_iOZNESO1D< z0h=yV zpME4T5y!lLiZg|Mqo;XFxP|3@RqSvEvWHI+aDzPqU(1Ytdm_V|UHElk<p^_=N?wt0IWk_I5MqMERD zK!=QADx@RG7LzH?)Ytu22m53&#}?$#a>J_gV!aP@z|N^Xr7`z2C%wwt#lY0d_%gY{3FL&fc#t zaxXJiP?v>`h^U2UrbZb(H9$_m)Qaf@`_FE}vo7CrnCvBQGlK>usr$mc>ymZw5?HJ) zdy8;b33uaYyM;I0j?-_aIvSZ^z)(s4R6gVO4Ad>02}qYG?Q@-l5--2nbVSzIsE8+A zrnX+?)a8xn0l3bqc2P*U7}xhyXq34SW`hL0#{VpS?&n=;DV(>%0hhv$nAe?Hj5u35 z=T`(##E#pgh$u|2y}bN|u(O1+;Cr7T67expr2H$Qcw%t;JQK&5c;)Drk9Dp?SV^fi z(1N5%3!rp+BM4=R83p5q=wDnqJ6IV0>j--09!I_hEz7+FtACgI-ZVp4wAk-9_mEsO z(JEu?^W1k*qm&zf*@4b8`OIT@jXxRtcVf#lx>16xlsvzh>)1%+9X3%A|D7v$>Ub&- z5^gG2f?TLNXOOcrhCe|f=_#DEz!({ zn!Vyiitz}N5i_dMU{qG;niK!k5)>dU3;@cLYaa^nRuyju)-}ZVYq^?X@)((i@_)?k zBHO(`t$?M>1L%hRFR@b+5K;^>V*MW#!&N8UTsw}wF4k?$d4Rk!+(>7PS-um+Qisv0 zp6hzuFR8<;Z$ z<%SIT@tJPkWLZ%x>ybV7HoIODkH+e)K8??{fre7V4fL>yS9AgLz@V5k6|}lWlnVg? zxnTd^1-nnopo%ZA?ev~Lb_gN@c!^DfFfBH;(8;zAEJdWiC7p!;`!?vwJl;x86T~a; z6Y8F^u`g=CG4!uxJ!QAQ3f3@CABNv)9oA1nP4r}5?`B!2#U@@5Z~Jc+v7?RIxXc8& zs?l2gj4L{6mbT2=|IWagi)sHzPOH?s6nO1te*OjY-hP&#tdY2)WYzlq9p5zA+TVlY zU~%}L*WC9T25%vFRmY6kP%vbfAP@Zmb(l7Cxb-RYbaxTc7S|#`Y{>!OX8D;#u~(r; za-dJqhH`$q6f&S+>;4h^HF)OHl>P=#v3(>Gd{TU$y4)3QpC|E0!P2lk5AzSbu>bDM zo<{?u$lMmQtX@x3#cOe^uyj_G4@Jq4dfTKDFCn--_PzosOv5A`pJYA}xR-A8b+h~b z^wFXJs&qwkTTLlPdeVZjJ+*=S&-$`;1NJp6Ex2Xnno3ahE+;IP0XZ-Kz0855dWn1& zhXxj+skor6d&ZeW1QOJ(HR zuRM@$W(^bj1ql|dqf4*0zF@fg{4og?<7p);9$6}1=_#9xX;G1xS{RN_B(TckCs^5E zCed`=dGTjB#z0A2Nb24$$5+9|7Gu)<-~rrzfdfraS<_SvcYgii+nkLwS=*2Y2Fp9#<9)K19@O6@7qH^{MF_Kj{1__u-D1jqJa^lKs@1IB5kY z;JKJT$B4eVUJXS%kJD+VeIL*=*D^dc8NZhCjPG|0;wliHP;zQ4w~`Vuq&h%{G}3&dq0O<3Q8LQ*8XD?x2RZT z)qqCgxm;U`3#(>P>GoLOW*p0_{3+W* z#%ys*oZZJGPF`bC;VVFSAVXB00IpP|+S{r`FfP*EI5k2peN>R`C-N>-oe|)Cdr6Pw zaG|ax*$)^;y+vQr=6@mJn~^TK7bG+mTm+00$k~ya(K`r^ySBGX`evj(Ki2yarDwsVFfbj&I&N!J|st&Sj{8BK<$GqSjA*7# zl|_b_5s%|We?UqL%#u>FHDar$1q%T>SQMUkL1?21p^T9JF)I=73yNu@vl4|`*tt!X zZk}!|ni6?bkrsYNm}b*pm|fpB0E&V*Z*Z$ullb{g#9A`sG#A2O zU@`>LZ!3u6?&a>T{-I}tA#WNs8 ziqa-_PU#!DYk@uo=+OPdz700=P+g2OK$D6)MnuWI`c8FzOmU*pXM__FYlrqQSx&Q+ zsqfp0YMh|((4QpIhCF+hpjt{t{!0fmf5f^8{%v-pR4Zq#t*QCF%oqTW#L zf=d$&vF*8Vz4tARnwozwh;44#GEg0eiYFKjlhabLsT~IB1rZb)+-yvdQJ*!dk`p`!Q>2sC$AI-4anGeI#?C~< z)3#9xSfn#k)jb*I$4mG;C8+)Ij1uX(lrrZQ^~6RJ_s!7Je{Qxw2z`3#PFNE*bGAR2#j_@e zTsp;COg~r77j`@Fb~Ooz-BsHeO!f~zKHmb)1t+C@LzNv-{d21MdDWo-w29Kmcl_k@*CbV8 z*K0bGE9S7WfG!~=YCn2?fEtSfF~d$U2~^V2Y+o23e%5$$N{{x++dgp$bZeGm?!AzL z35*odfr6^pwt@j6l9!LY1 zf^})EZfAnCL*G2;UAsXuL^t)d2o-!BFr&*KfvygKQi88g$yHy|8`A+@10}bjRUwIM z-{-J$hB7P#+2>sUeq?hV)hMJ%(;WafYQZ;)BJc43^v!*Cxns3SxV#)$sZn6kH>AQ^14I494`9kuI-HPk57KlP;x z8wRJ+PYG$}D=%0n76JOkZ#>3tv%ia2pc;3Oc;uW0yp?1DNyF!v``qMCV*o#Q#cEf! zn2&1V2610UMWvK4c#XWJqPd$uds+r@1IF%7FCQbPV|!~fj{`iyP68wsJCHwSw)BYx zLROoP0hItQf|%B;nX?NP2kBV^0sV*nqVXJZmENB1QoSb@f$1cEzhiXqfEqTt^Jy4# zm{O;n-qVlp?(@(-fmo+-@Vz+dJMkB(iVsaP58gQcP8~kF;*WKI#M=o93}iH5!4_YB zVy6`m(FTqf#}$b7Bq8KPA(>)f&|JZmZ$GjS**N$nFZbHY{&UAF)eznI zvJZRb_=-CO$ewm4Fx>whuyJJ?6P(yzna1x5jNJ%>J9DKt8l$=aeWZzHN)rT>CNR=y z5H{g-Jm}mo3lPnH2g8i{P|gluN&Be@iB31!y(22}xH(URP*HV;JY$1rE_b(Z^3D!e zh-XYRbktzltkH~Y-k%YQhGnumaDv&r^_nbhUwE)2alGG`chjxiKuv#w%82=Sc7W4u zP{4>DvCuoS3l^Lt)<()kej43aq>1@{T@5cO#S}bG&dgAr`-*egatY2qA&X46peK5J zvyr5*gY*v1VS_>aq(&gkya5sI3`FWVsrIqYull0GoU?F-XRkQ}`0c~+u4lWxp-e+j-sT)BB|rWt za$jwURi8za(^Z%O$jsr{<)@>6Ky|7WMVyL-tvQOy7y0nh{rcW{@wWL1;E++D`2NV! zVbb2CyQWDDGv5`S=n4r>bmT~z`Em0N%wH#kVP{9ua&E{qmeNaqB||idJz@7o!n=-^ zJCAk;XkHQ|{+gP+GfdHqf#Po58OBPK{VX(H#?@YGo+{d4iomhr2!XY*q98=-8L9#D z?rc$6A3xswan(5G^v$LS8po&R5e_M)bk6W>vV>Vd%bxoW;w+G- zjk9&vm+28z;Hc}pv2bePUAKa&R2j2E#CXe{?FX<{y$CTir@_{3)^~}w_>=h4ux0{_ z$GJmWno;?0XP~U^e924_!Xh4%Q3<85{$+}5y)25_9#FtSjq8<;feE`n%F&k$*v=OV zDu-Iz2x7tSxyPPL8wkG-!M1{KrwAc%`#yG)!9ePz=sL(T8mg9sY} zzsF64B)KA51fJx&kYmXS)47DXD23N5Ki<-}SA2#BvA(6e+(^nR7&be492(hT6+yzd zUUCuDGEI(S0mei?EVpmXe+->xxyoI5-(W5C$Gg2c4bRG*Zb*xYCxIJ4dU$0a)=uW1 zqfk8hndEA*7o||eh8^bf;3n>@t3?8e@U`S8i9OeD4K46{_+A7G#m6b1pc2%&>x8>j z`W`?P8I(RXE4qC7691&Vn@GSHr9;{FZ)VcnYGaBvzrukf=w-dI1u zULCQ$kEMt2FQG1}1_i*Nz-3FXV%#Y@+_E6DA|vIsm}hU<9vJtjZj`nb*8xx{k6U8b z2D^@x>N=Q(J~q*#>={k)64531dI=y|Q-UWr8T&R+zd!%;wIA8mdUFZ zcfWJ%C?F!mmx-!y?~bxp`W60iUNTY2@HP;OsS74^;^;<2&Z|ZV0PpjG;GTCuU{FYgGZ=Q!2Oqv zPI0J|6c-Ku>MX{GW;3ta9Pwxhbluu2U9Db#=zbpA@2d+b9(uDO`c^x!BZ=m>K&-84 z)qDdxnAa8T9uU9m@Wv)tzk8gtSLyNA$(Cj5=JQASZsoUA5DjCUgnKk9tBa# z)xR*IOYD$u@f0 z$EViAZY}QF*>~{1ax;=&$;!-uQ{tE%X~efIYdva4e(;C{Pb=rQ(FIB^e_`{N#?A(9 z8ryt=!O<~|DRz@T+#w(+?Z@HuF#|jF#3?#ta*4T9o{}eh&I;8Q@?LErPVUhkD0zmP(~0ExOYg#nc;KBHp_j_-OPc6yVPM>K4cxxqy;$fL}-5=pWU0Ngp-16K<;+RXEi^e5t1pF*Lj|Y*uCo`~}iJ z!eotBVau6ICnw1#B-K7AMHe%6`4#e848A{rh!>ym3id%n(T=B~`^~lx!sfkks3x(^ zj;@JLH)}e8LeHVghhaq!h%Z|glLv|L<3q*K7+UF_Ho47NO;A68qStaw5=WsAgxe!z z|25U=6Tv8w_uXwgs)*pf;IGHvDr8%Mil zZVQX)D9O5L)a=>z(eROS#X!Q59tY^xwq#-TsLYh`Hd(kQ3av3ID4!IX^}GqD>F_r+hG>kM7ycx-9r{t~0T7ZY)g?-Dw$1etGA-WJ?m4s>*L zOHA>!h62N{42Cj|i0`OD*OwBc%xHia5-sAoF;d8-mh+m!swdKE_CrK@T{LRfA;d(z zRVARN@`{)$k(!ZdtHt|%7k z)Y{vcDwZ~F%EOgWVF6Mlbv@g>hq6K%=~Pe+q%FHk-}j|!wb-VjptIqB$pXxw(r{&j z`i>N)a2Z~9n`6s~gC>_4T4ua}uf3usSpdCUbTKz02<}{PH}#79y2&cKb3z8`V&o|N zLnGrq{7>ETa9XfT!TVjU|KaOI_W^ip+MsZ|%Uc-8JjNb@14sFWKiL(_5+I^z6I2(= z4WGgsy&4t!YZ@yD5OqBbz~)+5#7}Md{QS&wZ5=jrv|xy3TIucOU3~2SW~G?~fZXxu zL8>_e3a_{RoXS?ZDf5{iYH?306<+g4GN08YP$Z%#V;`T6%jx+($$BsFPfwp?qa@5+7eFb=* zn9KDL{0CZ)AheH3s51lx(Mf-U7Q+?F3enal(mtc&@j^Sookz*jx_=M##u;QBWNnfQD#N6Y)!$!&7j2# z^YAw^hZ}DN7)pSI-kE*-yV9gv?<6)>6_0wl0eQ?JnLV#Cbr>emtOmefk8x8d5AnqP zh5a*;fBlE7$XyR$hho^aKeM_GKL>jL`*2+4S!V!CMXm=4pHuCR63P7cV-xOVo^-4_ zDFc=o*vLMGV z)R|i;<*PHsU>;!wYsPC`dC~6Qgdde+L4dPOeQV7ni~a_2$GEY3v4{c;UX)%o^~xI* zikBA`KhZPX`}}oeb1Y8jnR&V<9a{v-^@$z9 z-gilnYGl7{{omoxm~{lm%VrmNwD*^25qkhbL!!96qrE%6XnSOkM-w?nuTJbfxe|5V z*C!YY%LZXR`Gr!>Qa4jS;F|-(NdaS&a%Q}NA%FEZMM;0GUSM>lJ|TX1^7iBPZ_fDh zmF4R}rY=93tCbtWK*&SNEkNb8P+hvqG_VfDkJ=Lt?4vx@v}hYAzV)G<`qG+x3<#KT zG$Y+Zrh}MZ|2(~u>%;vp!HVzQMz6UTY=zr{1ylfT!a|$+H~n#4y1)Tu8#_WFh_(6iFmL_M_d<%<9OY}^kku{gwqj#+@d1AANibj~N{xTqfC&M2$U~Db z1w$0f^EtJU;L8UqXJz1QID8E8^+~e-z%{Onw3eCPFvSKOZzYpIDNY4n`NT2YG@l#p z#T>dekSrLa4Stu|uJ;94+5S|*rz7FteVyQ(_l*$GY=6a6tEw#8wjS2X&v}7gs45J0 z+?LM8`Q9e=>#*(9=uH;PSSoEv-s7C4>#1O>dU)p2=5Y>5ML5c=p4xaVstj)gAKKOa z!b7BB-588=1h1DOEA(;lvS7mI;|8?dy8qp&FkAMuobeveA_VfH*|YTe(jY>`L3%6a z-<9RE75xc0*_~O~z$s2Q7X;fmXTlPbAh5om>8E)Z)rG)T9Tmk$o~48a=~E#YfJbsO zTE;llCU`Kf&!YDY>ixVitcv5FgwV@M7|YfN788Rma#1NVcEQ38BY0I?kt}muYls5xvEBCB<@fyYXi1Xt?&@IXSU616(YSZsJ zZtvB5QoR@*czS4$P)m0vZ8I1^O<;_eb2@B?CX&?JJFK4WKtgvA&8AsuZZ9mt+sFZQ zx$&s{ZD8RKxQZ#Iu*FAitzmTY$!v;@$e~G-{`twz4_p5`TB|gJa?woqwTsZapWm!{ zL1o2$e`=jFD&?X_GF}1?J8xNym#RZ%_EbAXj3g!AtkPq;`Mz-S#9#%L%z$W>s&sP|k zeFHJ0qz^ZG%4Ht5+>h70OPUK`OxKgHWGU|)3_9jw`@vfWtqrSzR03@W(l%oNvC9iv zJzDqEa#`cz*z`Q|DwsLO{Cc|pVW_;RK(?h46pq|lLPz~+)2*h5m_H3&2yWb(a_ zqa!ynsG!$9n!Tvmlf~_}6Rw2%LXIF<23Aj?$)3GkRO8mJq>SDPeG6NcXYiN)a*EUr zU=8&^fUkRh$+z^{q9^8)q6HpN$~vQ_dWwZ+8_x%#7tP32zykhf8r^&)yPs}lSUJpF z-5P(Gpr25c8@C4?rnD@OKrZWdLt8uzI+>4fjGtKU9%n`UM<#2Xd$f~vWiHI(0up0 z&9~d=%w=xO&#U&|dss*heJJ|_j`;Pmragvm#_3P*QMna0PdaT?h}cu`!v5uk8I@51 zGZC9Tuh(NGJr;5>c?VKEx=~fhyRj>`N935r6jCUVbP(HCt-eF8fB1c*F+Wt<5&-2K z;gphWciNXp^l=stAS_GkUqpV`#|AIky@~G(tV16>An#S+pU8P7lE9E0$sVs5Q0Hu!u=pRQ;lX9Y9kmzTNhB0atUS zat5eg{&JDfu#4I}Uc`F29Afl=RL+MsS6a4hU{!n^n$qy!RD~b z94ooI?REo-W$n@9>W$2u!nQm~$DMG5bui@CbK}FRA8-(q`#Z!Te^21?&!^FC$Ym%V zxuUBb0}$h06sKM^EJU*uo1l8Ce+#`ezr19d9^Gk`(y!;g2e$o1M}_tG&U3w;3|i)! z{2#d;H&)z;23X+e?FRJ%z`(iOOg(W+CTIM^@p`nd(qKFVwggQN0~CBY;gxq67*fo@ zRt@)nQ*QI&s>jGCLfoF}5ho+!fgijRK)>DqlT{PYri4~s$=z|BA^$$YmX(-)DC$(& z4EgFCDuR6m*uvp=lj+~*Lr0G2R~a#ZJ^49MIXESsAxXai(mC9%yIi6twRK;?w9Yu`WXU@d5o;<=KCSNU_zH1Q zYf#J1!YVU5GC?jH9D&6=iUkqZq@FMYO{=IlT(f_m?F{cYmyo<0#wNodT z>uQD&Ug25$E^p(;8DY?=Y>R)oLhu0Wjtcy-uiO~8l>2%CScwwQ?yC1zO@8(kf8N9Y zbd!Ql3c8^V>yqf|r;|*VT5&bRWwp)vy^|%GCVrHknP@@FEuUm<;KUm?;-c6^v)(66 z_Gv2v%FY3%JB}YD&=w$WG62Wk;K&<>p+tyDscWbJ^xNAcBVbrjt>SsP%hl=OO*BkYsEM|IUB50mK*QbvQMJU zM&w4L9|I9d72)%YN8P6wmt`J#jCCCBFyQf`(4$yV*=P9`O2jLM_%R2W2;$lb61|)0>y~FA~FAC+LneMe3Ht1EP-- zH{BVCz)UK$dieAD6qF5a2~R#^s(y8N^WWeC;3G<)WQWl|@r|6s;pX=RLjCG5nC;HG za#i@jpTvB^KxE1MFgYWXch|08zc4D)@-b62(_l2(*JgY5KX$`y;I8rwbRZ2)Hwi3- zJ9)X3&SZ66&9JFoj6%E#ByppFR{nC8a%ZDE`NkT=LVc=!Mpb%PfvacO4J&Qdy`%up zrr+j5j6a%RFn?&=duGULsqS3R=ZWO|HOVFH8cPWXw^i)1x={Yfp@pe5EjaQoK>G7_DrfzMnKgvLQ(b2wkZoeA! zD6m#bTDxfdQ~V!Ei?WkYyh8pcw;1rFVrUSzgYugrr_0BvBZ|rCiM;;f0L@3mIHHj)46die zND#lULB_l{+ceF~soP}VFt=dt)T`6|qeeeQn*h&)g00C$SM zcd31m&k%6G-ceKYj8iz&2zU#+^ZT+)AhdIp0G+B-Nd5|tD2J_1muHm3CpJ_1tsXY% z5qwAL`SHSq01U1%NhIyt!Ll!D^#To*MC5Lk4!S+^7czUqy&jT{0Fr~QX0(mKppT{L z)q0n(`6WOd8dlr4-+VJ%Y8c@=0FjZSq#wl=$1qs`5u%OU>5ijL)OHV}mmC0pu`02XG9YYh1*c zz=#(f!(?2+K(RC~0HCcxdRO3r-T~Kf;a={BWj;Y00oa~(_OTC ztRLRT(xmq`qfoyAfmBlT<3+A4a>h~30AFkoY4##gKHXb#x@~*)zpp4C1B!Zu;wx-( zL3D{S05UAwl|I1^d}+l6u~$$sK_%YYEEz#1et#XuRcGkc0N_IiiXLmavsrE|F!J^D zqy&(d>zexlb&Kf>Z|OAg0M1{34xy6W@o`DMfo*b`#<0Q{|97fkzb&+gZ_fjC+M(k;g$NYHeM>G05MNM^sMOsqWXwc`J~F@g~Zt3 zi=g5oT^J6A%w49b0QS-jXd=uY_O?&q$wcF>OeW;TuVYCp$TUU@77c>f076Z+pidP> zz9Jr7PE5XOZ^={<0yBSPYwXI(HI1lS0NH_@5(Cb?`B#uTWOT z!guY?08>AFM%CX+gSXxYWR$h39oi})a4h=S3|xXW_686k05x6;uV&rh#8YI~Y-Vts z`Ap~`5I6IiFudO`W1fN80FFHW3LvO?Gth`O7B`Vi+N1bKBOG7|`U)wYc7$-_021mY zX;c5=w6x;TGp-CO8gVsAy`vi<6I)PT3$RL>0NGqhEkMYu_imFSI=&8fMdCArC>K*6oQGc#RFpUmh6M29IybKC-J0O$ez^=r2QM3nBa z8sm2V4D;u6@tyj(zR5&AOwu2^0Q2Z7+r&Q45I8wMBy@aX*kA1v0E^>b=zrV=#te`pwRZ{8 z$1LFKz14oi8VaVujT>}M0K~Sm6uQl{73Vqgw!3eyg`9aq%7;-P-t`qYM4pOn07%4J zo@x^8o4xy=%G1g}EZcX$_mZ`HUJCXaUSp+t05p09&fom+@&d7!`r)rU_xo1zpGYj& z!({&Quaqo?0IL8$=ze+ZDXY&n>Y5=|i(CAZ`j(7Q!mk05;wf*#T6}Qtp+g3cZ<02?b9EUf9QJsHN8Fkba! z7eWrI0{3dF6v>7k_mr7|+y9xG2mmNJ3pi7i064+mWj_(d#%TH) zSSir~)eJIn!%jV(z9j4uk1|Jq01919W1Oa4308;QJ$xz9@FtC1Jtm@Q)<)`L3+nJn z05iMz%h#H$i?n`s2*g7yfX2zqoSJGdW!k^S*kH6C09Y=MYa>iKx2Q5K)5Za*<%`-t^R1S*03uBNrW4}>gn{lP%#(lRpu8{u5nnaaP%0F!Gl$gC03Q$pA)8IY zcKsR9uI8+kI#o6;dO<}r`_G?wJw;3~0J%(0%Ym!k=iHwJ>YA@P^({~gK(2eF=S@e9 zamU0=0Bqh+ZpRf!aMSs`do3%TJGJVrpuBv}MmGTb$Z{>}0F|i9A1>V@fof{>L6JxD zxBjCngqL2^Vy3CaJpDm30KKp#$j(;!Q})}fl+6;Mm}qp9rGBO`fguBYsq0eS0Pr;G%@D+39Hw09v#v+eieYYY_4q4wE|feDI{^)HhlV zv&q-ZcYtAk0E4!G1Zt~9n`HQHz8w~<0Kd9M?}(W!<|v$|qOTk809iuk!q#)FOH_;q zO<^J3Q+W91IRWa}C1VcwsCV=%03+;B%#HMc-;)#SDBF>%GKHu#Co z0Dz3c47`g6)=VklU1(23s01|OWiA+yQ;46e~w0H7=nimaEEZrJlF*?Si7fQEiK_M#i-I~^f(5G#}*08S(8E3`qX zY#P89MlSqu?~9)`ZHu?vg?N^qD(^<*0Dygc|3g&=m4xEbQKEjB4o_;7!A?5ur#-`? zh@H`%06b^MyM;AFJ~8T2QFGWF&OXGkv0?v0Ix_0_US)%7iCuGq&%1~ z`mfLzu~rFa0G|{o z>eyPsf=eWuJJTJ!+}vJ)W1<1aegQkN2vmg|07uB6V#*gOC3hz_gP8c4X0t6FNEsMd zjNg#SdB6+y0B{Wq5L@+MOA{U1b>A&s&695wfOI4*j0_buNP?Ur0E1#d{9JTW^P4yi zXa$D+*fKrhgU%Cz80C$*CXlZa7 zYnmUEI62z~Q|4ZWm3Zfh>L{TeIcRiX042*@YjK3M&2`*Zhl4$jzUjWfhk(W7qPf_w z*s*)Q06@`xN)=i5Z0CfJ^~YoWU80FIhJ8w;G-IBbOkw+808&!#ZW4CIgn)L}=rPn? zxn2nZ8@NGKgOK^>%?slf0Kk%M95DEFZ(unz)=q6`P!GSw)|Cjl3f*9?u3|8p06b1l zEig)S3ps@H6Lmjd;$|Dkx$u8SjIeoEhwHow03V!NTV8|CY!P(4{K3>FtM@%9H$Y0L ze*+%g+0L^)05CKa{`pE>4oue?NHsvEC;}iwujOC%)h;0{=;(gR09>881XKXN<~$yO z#y7BH{rinuoxIJr@*yLal`@Nm|Z6>Xx;O14(Sf~WErBbGQ6r8 z0J!JlC!UqyT3{ZO?JdcpneHroy~4d~UlO~1jAVtB04LPITO%B&gF(&;{!FuvuJ8+! zK~_D2A3|o^(cGzF0I{%vh0X6KnBf?-nRYf)ebTcBWtC36U=+oWDdp(&0Fn7okpb5# z1?Hirjo+@Y-W+aNyAP5iiIb&ZuDB0b0AWX<{OH^5ZMP5}FQpNM$x1LtLn*w+002=8 zmCO9B05I#sNmn^OfYx~(*%bRL1{w$(Pv|9;F%q~};ySF{087(j9Ye@cF%5Az@FQlU z1=bGfa`!@+iQ34LDeel+07r}1%)YlT;O@|Do)+XrLeYLke+}JE0GaeI01`N`C|$6O_E!{e8zP`wY^E=?4UFzWL?nIx z04h~S(q6Ga`N_p0O&a3my`gTieV=%9hoWCdvh5COl%y|GwJN* zQoLG60Lol)!V_!(4Pg_8t{#!G=Y}CaN$6W-Ebu){q7Nh}0CyIIHs$fJyBS^rOBnxK z1sq4a?=~qYDnLD64uf3c0N5>Y0s$?!I+n;w1cc(-&cd!Y7MBDPcmgEsdlfXt0AOUj z1MeP81_C@$^}(g9($(v|2Kj^=qw%eW#{I3z0NIf+h<53M<4(02`SqSd0Ku zCL{7o@PHJ7iFS}3Ok^`r7$dmhzDZ#|0R4GVKdmm7Am`dUfn1&~ts2u;!1$T*!_O^- zIAQ1b0PV{QnbZk7xwRIwcG++4Yoeiu^+H0i3_B5TPcg#yFjyiX0sXRS2>y->2O0Mm6` zQv_dogK?Tr86&iq*2e=EKILa~AgP18)=}Z{0CpvNA3ltc^Ga&tr8w#Kw17APj%`0BkdBeD3y#(kq!KF^2RVvvP6jFZ_`o0J=kTbbq5TE4K7^30r(y6bgK+r#rB-^^^K8N101;W~Ac=ilbSLPZRh)sC{>>D<{HDp1?d842MXK<| z0KDsu`eWsSR&=)FC(8vGcsFz>Z>rGACP4Jp?(=gtYEmh&+T=+uzW_|? z5Kx8$j)n7wrKDyO0A$HEnM^+K&N1ubc3kmsvaU~PZJe?3_4~>EutLhO0FWmHL`RXc z>SB1`ThSywrKdmJc6+HVB{Hx02O@UG0Q08?d=-VKKQHhrMQ3Dm^^EYx*q>n{)xg&xXO literal 0 HcmV?d00001 diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index d727d2c159..70c21afed4 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -383,9 +383,8 @@ pub async fn handle_rpc( == ForkName::Fulu && (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2 - || method == ENGINE_GET_PAYLOAD_V3) - // TODO(fulu): Uncomment this once v5 method is ready for Fulu - // || method == ENGINE_GET_PAYLOAD_V4) + || method == ENGINE_GET_PAYLOAD_V3 + || method == ENGINE_GET_PAYLOAD_V4) { return Err(( format!("{} called after Fulu fork!", method), @@ -451,22 +450,6 @@ pub async fn handle_rpc( }) .unwrap() } - // TODO(fulu): remove this once we switch to v5 method - JsonExecutionPayload::V5(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseV5 { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V5 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - execution_requests: Default::default(), - }) - .unwrap() - } _ => unreachable!(), }), ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index fba34121a7..87ea8642be 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -546,7 +546,7 @@ impl MockBuilder { .map_err(|_| "incorrect payload variant".to_string())? .into(), blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) + .map(|b| b.commitments.clone()) .unwrap_or_default(), value: self.get_bid_value(value), pubkey: self.builder_sk.public_key().compress(), @@ -558,7 +558,7 @@ impl MockBuilder { .map_err(|_| "incorrect payload variant".to_string())? .into(), blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) + .map(|b| b.commitments.clone()) .unwrap_or_default(), value: self.get_bid_value(value), pubkey: self.builder_sk.public_key().compress(), @@ -570,7 +570,7 @@ impl MockBuilder { .map_err(|_| "incorrect payload variant".to_string())? .into(), blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) + .map(|b| b.commitments.clone()) .unwrap_or_default(), value: self.get_bid_value(value), pubkey: self.builder_sk.public_key().compress(), diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 17441a15fb..245aa71a15 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -58,6 +58,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v5: true, get_client_version_v1: true, get_blobs_v1: true, + get_blobs_v2: true, }; pub static DEFAULT_CLIENT_VERSION: LazyLock = diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index a5cd94536d..ab70521686 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -364,7 +364,7 @@ fn spawn_build_data_sidecar_task( } else { // Post PeerDAS: construct data columns. let gossip_verified_data_columns = - build_gossip_verified_data_columns(&chain, &block, blobs)?; + build_gossip_verified_data_columns(&chain, &block, blobs, kzg_proofs)?; Ok((vec![], gossip_verified_data_columns)) } }, @@ -383,10 +383,11 @@ fn build_gossip_verified_data_columns( chain: &BeaconChain, block: &SignedBeaconBlock>, blobs: BlobsList, + kzg_cell_proofs: KzgProofs, ) -> Result>>, Rejection> { let slot = block.slot(); let data_column_sidecars = - build_blob_data_column_sidecars(chain, block, blobs).map_err(|e| { + build_blob_data_column_sidecars(chain, block, blobs, kzg_cell_proofs).map_err(|e| { error!( error = ?e, %slot, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 733f2ca1db..d61ea58377 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -825,7 +825,8 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::InvalidKzgProof { .. } | GossipDataColumnError::UnexpectedDataColumn | GossipDataColumnError::InvalidColumnIndex(_) - | GossipDataColumnError::InconsistentCommitmentsOrProofLength + | GossipDataColumnError::InconsistentCommitmentsLength { .. } + | GossipDataColumnError::InconsistentProofsLength { .. } | GossipDataColumnError::NotFinalizedDescendant { .. } => { debug!( error = ?err, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index cdcbe1bb8d..9a8edbfa4c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -844,7 +844,6 @@ impl NetworkBeaconProcessor { publish_blobs: bool, ) { let custody_columns = self.network_globals.sampling_columns.clone(); - let is_supernode = self.network_globals.is_supernode(); let self_cloned = self.clone(); let publish_fn = move |blobs_or_data_column| { if publish_blobs { @@ -852,10 +851,7 @@ impl NetworkBeaconProcessor { BlobsOrDataColumns::Blobs(blobs) => { self_cloned.publish_blobs_gradually(blobs, block_root); } - BlobsOrDataColumns::DataColumns(mut columns) => { - if !is_supernode { - columns.retain(|col| custody_columns.contains(&col.index)); - } + BlobsOrDataColumns::DataColumns(columns) => { self_cloned.publish_data_columns_gradually(columns, block_root); } }; @@ -866,6 +862,7 @@ impl NetworkBeaconProcessor { self.chain.clone(), block_root, block.clone(), + custody_columns, publish_fn, ) .instrument(tracing::info_span!( diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 90a914dfae..03ab6a74f8 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; use crate::BeaconStateError; -use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; +use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; use kzg::Error as KzgError; @@ -56,7 +56,7 @@ pub struct DataColumnSidecar { pub column: DataColumn, /// All the KZG commitments and proofs associated with the block, used for verifying sample cells. pub kzg_commitments: KzgCommitments, - pub kzg_proofs: KzgProofs, + pub kzg_proofs: VariableList, pub signed_block_header: SignedBeaconBlockHeader, /// An inclusion proof, proving the inclusion of `blob_kzg_commitments` in `BeaconBlockBody`. pub kzg_commitments_inclusion_proof: FixedVector, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 0bc074072f..6f1b3e6ce6 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -4,8 +4,8 @@ use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ bit::B0, UInt, U0, U1, U10, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, - U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, - U65536, U8, U8192, + U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U33554432, U4, U4096, U512, + U625, U64, U65536, U8, U8192, }; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -146,6 +146,11 @@ pub trait EthSpec: /// Must be set to `BytesPerFieldElement * FieldElementsPerCell`. type BytesPerCell: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The maximum number of cell commitments per block + /// + /// FieldElementsPerExtBlob * MaxBlobCommitmentsPerBlock + type MaxCellsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* * New in Electra */ @@ -421,6 +426,7 @@ impl EthSpec for MainnetEthSpec { type FieldElementsPerExtBlob = U8192; type BytesPerBlob = U131072; type BytesPerCell = U2048; + type MaxCellsPerBlock = U33554432; type KzgCommitmentInclusionProofDepth = U17; type KzgCommitmentsInclusionProofDepth = U4; // inclusion of the whole list of commitments type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count @@ -474,6 +480,7 @@ impl EthSpec for MinimalEthSpec { type MaxWithdrawalRequestsPerPayload = U2; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; + type MaxCellsPerBlock = U33554432; type BytesPerCell = U2048; type KzgCommitmentsInclusionProofDepth = U4; @@ -566,6 +573,7 @@ impl EthSpec for GnosisEthSpec { type MaxPendingDepositsPerEpoch = U16; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; + type MaxCellsPerBlock = U33554432; type BytesPerCell = U2048; type KzgCommitmentsInclusionProofDepth = U4; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 73a50b4ef3..1d39c89cab 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -272,7 +272,14 @@ pub type Address = fixed_bytes::Address; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; pub type Blob = FixedVector::BytesPerBlob>; -pub type KzgProofs = VariableList::MaxBlobCommitmentsPerBlock>; +// Note on List limit: +// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` +// - Fulu: `MaxCellsPerBlock` +// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to +// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types +// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, +// which we don't current do on `KzgProofs` anyway. +pub type KzgProofs = VariableList::MaxCellsPerBlock>; pub type VersionedHash = Hash256; pub type Hash64 = alloy_primitives::B64; diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 2a5c6e47f5..5d752cc0a5 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -220,7 +220,7 @@ impl Kzg { .map_err(Into::into) } - /// Computes the cells and associated proofs for a given `blob` at index `index`. + /// Computes the cells and associated proofs for a given `blob`. pub fn compute_cells_and_proofs( &self, blob: KzgBlobRef<'_>, @@ -235,11 +235,14 @@ impl Kzg { Ok((cells, c_kzg_proof)) } + /// Computes the cells for a given `blob`. + pub fn compute_cells(&self, blob: KzgBlobRef<'_>) -> Result<[Cell; CELLS_PER_EXT_BLOB], Error> { + self.context() + .compute_cells(blob) + .map_err(Error::PeerDASKZG) + } + /// Verifies a batch of cell-proof-commitment triplets. - /// - /// Here, `coordinates` correspond to the (row, col) coordinate of the cell in the extended - /// blob "matrix". In the 1D extension, row corresponds to the blob index, and col corresponds - /// to the data column index. pub fn verify_cell_proof_batch( &self, cells: &[CellRef<'_>], diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml index 80b4bc95c6..d47dfa6b5a 100644 --- a/scripts/local_testnet/network_params_das.yaml +++ b/scripts/local_testnet/network_params_das.yaml @@ -1,6 +1,7 @@ participants: - cl_type: lighthouse cl_image: lighthouse:local + el_image: ethpandaops/geth:engine-getblobs-v2-3676b56 cl_extra_params: - --subscribe-all-data-column-subnets - --subscribe-all-subnets @@ -10,6 +11,7 @@ participants: count: 2 - cl_type: lighthouse cl_image: lighthouse:local + el_image: ethpandaops/geth:engine-getblobs-v2-3676b56 cl_extra_params: # Note: useful for testing range sync (only produce block if node is in sync to prevent forking) - --sync-tolerance-epochs=0 @@ -19,6 +21,10 @@ network_params: electra_fork_epoch: 1 fulu_fork_epoch: 2 seconds_per_slot: 6 + max_blobs_per_block_electra: 64 + target_blobs_per_block_electra: 48 + max_blobs_per_block_fulu: 64 + target_blobs_per_block_fulu: 48 snooper_enabled: false global_log_level: debug additional_services: @@ -26,4 +32,8 @@ additional_services: - spamoor_blob - prometheus_grafana dora_params: - image: ethpandaops/dora:fulu-support \ No newline at end of file + image: ethpandaops/dora:fulu-support +spamoor_blob_params: + # Throughput of spamoor + # Defaults to 3 + throughput: 32 \ No newline at end of file From 863c19863ff376f9baa8a7b44119082f8db4cea9 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 8 Apr 2025 01:10:17 -0700 Subject: [PATCH 20/35] Mainnet Electra fork epoch (#7275) #6918 See https://github.com/eth-clients/mainnet/pull/4 --- .../built_in_network_configs/mainnet/config.yaml | 14 +++++++++++++- consensus/types/src/chain_spec.rs | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 74fe727867..3ff99b7171 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -49,7 +49,7 @@ DENEB_FORK_VERSION: 0x04000000 DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC # Electra ELECTRA_FORK_VERSION: 0x05000000 -ELECTRA_FORK_EPOCH: 18446744073709551615 +ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC # Fulu FULU_FORK_VERSION: 0x06000000 FULU_FORK_EPOCH: 18446744073709551615 @@ -142,6 +142,18 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + # DAS NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 230805e86c..006c40a205 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -879,7 +879,7 @@ impl ChainSpec { * Electra hard fork params */ electra_fork_version: [0x05, 00, 00, 00], - electra_fork_epoch: None, + electra_fork_epoch: Some(Epoch::new(364032)), unset_deposit_requests_start_index: u64::MAX, full_exit_request_amount: 0, min_activation_balance: option_wrapper(|| { From d24a4ffe30defee35c4285ff16865e434a437caa Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 8 Apr 2025 19:00:55 -0700 Subject: [PATCH 21/35] Fix builder API electra json response (#7285) #7277 Implement `ForkVersionDeserialize` for `ExecutionPayloadAndBlobs` so we get fork hinting when deserializing --- Cargo.lock | 2 + common/eth2/Cargo.toml | 2 + common/eth2/src/types.rs | 137 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 138 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 746cac9c14..d1ceb2dbaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2615,6 +2615,7 @@ dependencies = [ "mediatype", "pretty_reqwest_error", "proto_array", + "rand 0.8.5", "reqwest", "reqwest-eventsource", "sensitive_url", @@ -2623,6 +2624,7 @@ dependencies = [ "slashing_protection", "ssz_types", "store", + "test_random_derive", "tokio", "types", "zeroize", diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index a1bc9d025b..a39a58ac14 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -18,6 +18,7 @@ lighthouse_network = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } proto_array = { workspace = true } +rand = { workspace = true } reqwest = { workspace = true } reqwest-eventsource = "0.5.0" sensitive_url = { workspace = true } @@ -26,6 +27,7 @@ serde_json = { workspace = true } slashing_protection = { workspace = true } ssz_types = { workspace = true } store = { workspace = true } +test_random_derive = { path = "../../common/test_random_derive" } types = { workspace = true } zeroize = { workspace = true } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index dd4f5437ae..66b4b7ea54 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -16,7 +16,9 @@ use std::fmt::{self, Display}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use test_random_derive::TestRandom; use types::beacon_block_body::KzgCommitments; +use types::test_utils::TestRandom; pub use types::*; #[cfg(feature = "lighthouse")] @@ -2017,11 +2019,11 @@ impl ForkVersionDeserialize for FullPayloadContents { fork_name: ForkName, ) -> Result { if fork_name.deneb_enabled() { - serde_json::from_value(value) + ExecutionPayloadAndBlobs::deserialize_by_fork::<'de, D>(value, fork_name) .map(Self::PayloadAndBlobs) .map_err(serde::de::Error::custom) } else if fork_name.bellatrix_enabled() { - serde_json::from_value(value) + ExecutionPayload::deserialize_by_fork::<'de, D>(value, fork_name) .map(Self::Payload) .map_err(serde::de::Error::custom) } else { @@ -2039,6 +2041,28 @@ pub struct ExecutionPayloadAndBlobs { pub blobs_bundle: BlobsBundle, } +impl ForkVersionDeserialize for ExecutionPayloadAndBlobs { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "E: EthSpec")] + struct Helper { + execution_payload: serde_json::Value, + blobs_bundle: BlobsBundle, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(Self { + execution_payload: ExecutionPayload::deserialize_by_fork::<'de, D>( + helper.execution_payload, + fork_name, + )?, + blobs_bundle: helper.blobs_bundle, + }) + } +} + impl ForkVersionDecode for ExecutionPayloadAndBlobs { fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { let mut builder = ssz::SszDecoderBuilder::new(bytes); @@ -2069,7 +2093,7 @@ pub enum ContentType { Ssz, } -#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { pub commitments: KzgCommitments, @@ -2080,6 +2104,10 @@ pub struct BlobsBundle { #[cfg(test)] mod test { + use std::fmt::Debug; + + use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use super::*; #[test] @@ -2093,4 +2121,107 @@ mod test { let y: ValidatorId = serde_json::from_str(pubkey_str).unwrap(); assert_eq!(serde_json::to_string(&y).unwrap(), pubkey_str); } + + #[test] + fn test_execution_payload_execution_payload_deserialize_by_fork() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + + let payloads = [ + ExecutionPayload::Bellatrix( + ExecutionPayloadBellatrix::::random_for_test(rng), + ), + ExecutionPayload::Capella(ExecutionPayloadCapella::::random_for_test( + rng, + )), + ExecutionPayload::Deneb(ExecutionPayloadDeneb::::random_for_test( + rng, + )), + ExecutionPayload::Electra(ExecutionPayloadElectra::::random_for_test( + rng, + )), + ExecutionPayload::Fulu(ExecutionPayloadFulu::::random_for_test(rng)), + ]; + let merged_forks = &ForkName::list_all()[2..]; + assert_eq!( + payloads.len(), + merged_forks.len(), + "we should test every known fork; add new fork variant to payloads above" + ); + + for (payload, &fork_name) in payloads.into_iter().zip(merged_forks) { + assert_eq!(payload.fork_name(), fork_name); + let payload_str = serde_json::to_string(&payload).unwrap(); + let mut de = serde_json::Deserializer::from_str(&payload_str); + generic_deserialize_by_fork(&mut de, payload, fork_name); + } + } + + #[test] + fn test_execution_payload_and_blobs_deserialize_by_fork() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + + let payloads = [ + { + let execution_payload = + ExecutionPayload::Deneb( + ExecutionPayloadDeneb::::random_for_test(rng), + ); + let blobs_bundle = BlobsBundle::random_for_test(rng); + ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } + }, + { + let execution_payload = + ExecutionPayload::Electra( + ExecutionPayloadElectra::::random_for_test(rng), + ); + let blobs_bundle = BlobsBundle::random_for_test(rng); + ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } + }, + { + let execution_payload = + ExecutionPayload::Fulu( + ExecutionPayloadFulu::::random_for_test(rng), + ); + let blobs_bundle = BlobsBundle::random_for_test(rng); + ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } + }, + ]; + let blob_forks = &ForkName::list_all()[4..]; + + assert_eq!( + payloads.len(), + blob_forks.len(), + "we should test every known fork; add new fork variant to payloads above" + ); + + for (payload, &fork_name) in payloads.into_iter().zip(blob_forks) { + assert_eq!(payload.execution_payload.fork_name(), fork_name); + let payload_str = serde_json::to_string(&payload).unwrap(); + let mut de = serde_json::Deserializer::from_str(&payload_str); + generic_deserialize_by_fork(&mut de, payload, fork_name); + } + } + + fn generic_deserialize_by_fork< + 'de, + D: Deserializer<'de>, + O: ForkVersionDeserialize + PartialEq + Debug, + >( + deserializer: D, + original: O, + fork_name: ForkName, + ) { + let val = Value::deserialize(deserializer).unwrap(); + let roundtrip = O::deserialize_by_fork::<'de, D>(val, fork_name).unwrap(); + assert_eq!(original, roundtrip); + } } From 076f3f09843c52347bb04d5b3dc4364d1018c2e3 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 8 Apr 2025 19:50:45 -0700 Subject: [PATCH 22/35] Clarify network limits (#7175) Resolves #6811 Rename `GOSSIP_MAX_SIZE` to `MAX_PAYLOAD_SIZE` and remove `MAX_CHUNK_SIZE` in accordance with the spec. The spec also "clarifies" the message size limits at different levels. The rpc limits are equivalent to what we had before imo. The gossip limits have additional checks. I have gotten rid of the `is_bellatrix_enabled` checks that used a lower limit (1mb) pre-merge. Since all networks we run start from the merge, I don't think this will break any setups. --- beacon_node/lighthouse_network/src/config.rs | 19 +---- beacon_node/lighthouse_network/src/lib.rs | 1 - .../lighthouse_network/src/rpc/codec.rs | 34 ++++----- beacon_node/lighthouse_network/src/rpc/mod.rs | 8 +- .../lighthouse_network/src/rpc/protocol.rs | 11 +-- .../lighthouse_network/src/service/mod.rs | 8 +- .../lighthouse_network/src/types/pubsub.rs | 28 ++++--- .../lighthouse_network/tests/rpc_tests.rs | 18 ++--- beacon_node/src/config.rs | 5 +- .../chiado/config.yaml | 4 +- .../gnosis/config.yaml | 3 +- .../holesky/config.yaml | 4 +- .../hoodi/config.yaml | 4 +- .../mainnet/config.yaml | 4 +- .../sepolia/config.yaml | 4 +- consensus/types/src/chain_spec.rs | 76 +++++++++++++------ .../environment/tests/testnet_dir/config.yaml | 3 +- 17 files changed, 114 insertions(+), 120 deletions(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 5a6628439e..89d260569a 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -14,7 +14,7 @@ use std::num::NonZeroU16; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; -use types::{ForkContext, ForkName}; +use types::ForkContext; pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const DEFAULT_TCP_PORT: u16 = 9000u16; @@ -22,18 +22,9 @@ pub const DEFAULT_DISC_PORT: u16 = 9000u16; pub const DEFAULT_QUIC_PORT: u16 = 9001u16; pub const DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD: usize = 1000usize; -/// The maximum size of gossip messages. -pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { - if is_merge_enabled { - gossip_max_size - } else { - gossip_max_size / 10 - } -} - pub struct GossipsubConfigParams { pub message_domain_valid_snappy: [u8; 4], - pub gossip_max_size: usize, + pub gossipsub_max_transmit_size: usize, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -480,7 +471,6 @@ pub fn gossipsub_config( } } let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; - let is_bellatrix_enabled = fork_context.fork_exists(ForkName::Bellatrix); let gossip_message_id = move |message: &gossipsub::Message| { gossipsub::MessageId::from( &Sha256::digest( @@ -499,10 +489,7 @@ pub fn gossipsub_config( let duplicate_cache_time = Duration::from_secs(slots_per_epoch * seconds_per_slot * 2); gossipsub::ConfigBuilder::default() - .max_transmit_size(gossip_max_size( - is_bellatrix_enabled, - gossipsub_config_params.gossip_max_size, - )) + .max_transmit_size(gossipsub_config_params.gossipsub_max_transmit_size) .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) .mesh_n_low(load.mesh_n_low) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 2f8fd82c51..98c61bd068 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -12,7 +12,6 @@ pub mod peer_manager; pub mod rpc; pub mod types; -pub use config::gossip_max_size; use libp2p::swarm::DialError; pub use listen_addr::*; diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 838f1b8a16..b3239fa6cb 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1002,10 +1002,7 @@ mod tests { } /// Bellatrix block with length < max_rpc_size. - fn bellatrix_block_small( - fork_context: &ForkContext, - spec: &ChainSpec, - ) -> SignedBeaconBlock { + fn bellatrix_block_small(spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); @@ -1014,17 +1011,14 @@ mod tests { block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() <= spec.max_payload_size as usize); SignedBeaconBlock::from_block(block, Signature::empty()) } /// Bellatrix block with length > MAX_RPC_SIZE. /// The max limit for a Bellatrix block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a Bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. - fn bellatrix_block_large( - fork_context: &ForkContext, - spec: &ChainSpec, - ) -> SignedBeaconBlock { + fn bellatrix_block_large(spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); @@ -1033,7 +1027,7 @@ mod tests { block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() > spec.max_payload_size as usize); SignedBeaconBlock::from_block(block, Signature::empty()) } @@ -1138,7 +1132,7 @@ mod tests { ) -> Result { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); + let max_packet_size = spec.max_payload_size as usize; let mut buf = BytesMut::new(); let mut snappy_inbound_codec = @@ -1185,7 +1179,7 @@ mod tests { ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); + let max_packet_size = spec.max_payload_size as usize; let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); // decode message just as snappy message @@ -1206,7 +1200,7 @@ mod tests { /// Verifies that requests we send are encoded in a way that we would correctly decode too. fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); + let max_packet_size = spec.max_payload_size as usize; let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); // Encode a request we send let mut buf = BytesMut::new(); @@ -1583,10 +1577,8 @@ mod tests { )))) ); - let bellatrix_block_small = - bellatrix_block_small(&fork_context(ForkName::Bellatrix), &chain_spec); - let bellatrix_block_large = - bellatrix_block_large(&fork_context(ForkName::Bellatrix), &chain_spec); + let bellatrix_block_small = bellatrix_block_small(&chain_spec); + let bellatrix_block_large = bellatrix_block_large(&chain_spec); assert_eq!( encode_then_decode_response( @@ -2086,7 +2078,7 @@ mod tests { // Insert length-prefix uvi_codec - .encode(chain_spec.max_chunk_size as usize + 1, &mut dst) + .encode(chain_spec.max_payload_size as usize + 1, &mut dst) .unwrap(); // Insert snappy stream identifier @@ -2124,7 +2116,7 @@ mod tests { let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), + chain_spec.max_payload_size as usize, fork_context, ); @@ -2160,7 +2152,7 @@ mod tests { let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), + chain_spec.max_payload_size as usize, fork_context, ); @@ -2189,7 +2181,7 @@ mod tests { let chain_spec = Spec::default_spec(); - let max_rpc_size = max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize); + let max_rpc_size = chain_spec.max_payload_size as usize; let limit = protocol_id.rpc_response_limits::(&fork_context); let mut max = encode_len(limit.max + 1); let mut codec = SSZSnappyOutboundCodec::::new( diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 03f1395b8b..0e7686175a 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -33,7 +33,7 @@ pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, ResponseTermination, RpcErrorResponse, StatusMessage, }; -pub use protocol::{max_rpc_size, Protocol, RPCError}; +pub use protocol::{Protocol, RPCError}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; use self::protocol::RPCProtocol; @@ -143,7 +143,7 @@ pub struct RPCMessage { type BehaviourAction = ToSwarm, RPCSend>; pub struct NetworkParams { - pub max_chunk_size: usize, + pub max_payload_size: usize, pub ttfb_timeout: Duration, pub resp_timeout: Duration, } @@ -284,7 +284,7 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), + max_rpc_size: self.fork_context.spec.max_payload_size as usize, enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, ttfb_timeout: self.network_params.ttfb_timeout, @@ -315,7 +315,7 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), + max_rpc_size: self.fork_context.spec.max_payload_size as usize, enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, ttfb_timeout: self.network_params.ttfb_timeout, diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index eac7d67490..8fc1e9a5f4 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -57,7 +57,7 @@ pub static SIGNED_BEACON_BLOCK_ALTAIR_MAX: LazyLock = LazyLock::new(|| { /// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network -/// with `max_chunk_size`. +/// with `max_payload_size`. pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = LazyLock::new(|| // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX @@ -122,15 +122,6 @@ const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// established before the stream is terminated. const REQUEST_TIMEOUT: u64 = 15; -/// Returns the maximum bytes that can be sent across the RPC. -pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { - if fork_context.current_fork().bellatrix_enabled() { - max_chunk_size - } else { - max_chunk_size / 10 - } -} - /// Returns the rpc limits for beacon_block_by_range and beacon_block_by_root responses. /// /// Note: This function should take care to return the min/max limits accounting for all diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 06d806ce0b..7fc7de3edd 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -224,7 +224,7 @@ impl Network { let gossipsub_config_params = GossipsubConfigParams { message_domain_valid_snappy: ctx.chain_spec.message_domain_valid_snappy, - gossip_max_size: ctx.chain_spec.gossip_max_size as usize, + gossipsub_max_transmit_size: ctx.chain_spec.max_message_size(), }; let gs_config = gossipsub_config( config.network_load, @@ -310,7 +310,9 @@ impl Network { ) }); - let snappy_transform = SnappyTransform::new(gs_config.max_transmit_size()); + let spec = &ctx.chain_spec; + let snappy_transform = + SnappyTransform::new(spec.max_payload_size as usize, spec.max_compressed_len()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( MessageAuthenticity::Anonymous, gs_config.clone(), @@ -349,7 +351,7 @@ impl Network { }; let network_params = NetworkParams { - max_chunk_size: ctx.chain_spec.max_chunk_size as usize, + max_payload_size: ctx.chain_spec.max_payload_size as usize, ttfb_timeout: ctx.chain_spec.ttfb_timeout(), resp_timeout: ctx.chain_spec.resp_timeout(), }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index c199d2312b..880b387250 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -52,13 +52,16 @@ pub enum PubsubMessage { // Implements the `DataTransform` trait of gossipsub to employ snappy compression pub struct SnappyTransform { /// Sets the maximum size we allow gossipsub messages to decompress to. - max_size_per_message: usize, + max_uncompressed_len: usize, + /// Sets the maximum size we allow for compressed gossipsub message data. + max_compressed_len: usize, } impl SnappyTransform { - pub fn new(max_size_per_message: usize) -> Self { + pub fn new(max_uncompressed_len: usize, max_compressed_len: usize) -> Self { SnappyTransform { - max_size_per_message, + max_uncompressed_len, + max_compressed_len, } } } @@ -69,12 +72,19 @@ impl gossipsub::DataTransform for SnappyTransform { &self, raw_message: gossipsub::RawMessage, ) -> Result { - // check the length of the raw bytes - let len = decompress_len(&raw_message.data)?; - if len > self.max_size_per_message { + // first check the size of the compressed payload + if raw_message.data.len() > self.max_compressed_len { return Err(Error::new( ErrorKind::InvalidData, - "ssz_snappy decoded data > GOSSIP_MAX_SIZE", + "ssz_snappy encoded data > max_compressed_len", + )); + } + // check the length of the uncompressed bytes + let len = decompress_len(&raw_message.data)?; + if len > self.max_uncompressed_len { + return Err(Error::new( + ErrorKind::InvalidData, + "ssz_snappy decoded data > MAX_PAYLOAD_SIZE", )); } @@ -98,10 +108,10 @@ impl gossipsub::DataTransform for SnappyTransform { ) -> Result, std::io::Error> { // Currently we are not employing topic-based compression. Everything is expected to be // snappy compressed. - if data.len() > self.max_size_per_message { + if data.len() > self.max_uncompressed_len { return Err(Error::new( ErrorKind::InvalidData, - "ssz_snappy Encoded data > GOSSIP_MAX_SIZE", + "ssz_snappy Encoded data > MAX_PAYLOAD_SIZE", )); } let mut encoder = Encoder::new(); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 80364753d7..0d83c4f74e 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -5,7 +5,7 @@ mod common; use common::Protocol; use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; -use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Response}; +use lighthouse_network::{NetworkEvent, ReportSource, Response}; use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; @@ -15,14 +15,14 @@ use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, - EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, + EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MinimalEthSpec, RuntimeVariableList, Signature, SignedBeaconBlock, Slot, }; type E = MinimalEthSpec; /// Bellatrix block with length < max_rpc_size. -fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { +fn bellatrix_block_small(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); @@ -30,14 +30,14 @@ fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> Beacon block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() <= spec.max_payload_size as usize); block } /// Bellatrix block with length > MAX_RPC_SIZE. /// The max limit for a bellatrix block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. -fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { +fn bellatrix_block_large(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); @@ -45,7 +45,7 @@ fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> Beacon block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() > spec.max_payload_size as usize); block } @@ -193,7 +193,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); - let full_block = bellatrix_block_small(&common::fork_context(ForkName::Bellatrix), &spec); + let full_block = bellatrix_block_small(&spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_bellatrix_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -455,7 +455,7 @@ fn test_tcp_blocks_by_range_over_limit() { })); // BlocksByRange Response - let full_block = bellatrix_block_large(&common::fork_context(ForkName::Bellatrix), &spec); + let full_block = bellatrix_block_large(&spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_bellatrix_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -834,7 +834,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); - let full_block = bellatrix_block_small(&common::fork_context(ForkName::Bellatrix), &spec); + let full_block = bellatrix_block_small(&spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_bellatrix_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index cd92ee8fad..686843b000 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -675,10 +675,7 @@ pub fn get_config( }; } - client_config.chain.max_network_size = lighthouse_network::gossip_max_size( - spec.bellatrix_fork_epoch.is_some(), - spec.gossip_max_size as usize, - ); + client_config.chain.max_network_size = spec.max_payload_size as usize; if cli_args.get_flag("slasher") { let slasher_dir = if let Some(slasher_dir) = cli_args.get_one::("slasher-dir") { diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 1455ec5f63..dbfe2707d7 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -100,15 +100,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25 # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # 33024, ~31 days MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 9ff5a16198..359d57b0a5 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -97,9 +97,8 @@ DEPOSIT_CONTRACT_ADDRESS: 0x0B98057eA310F4d31F2a452B414647007d1645d9 # Network # --------------------------------------------------------------- SUBNETS_PER_NODE: 4 -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -MAX_CHUNK_SIZE: 10485760 TTFB_TIMEOUT: 5 RESP_TIMEOUT: 10 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index e5f38b8c9b..58010991bf 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -88,15 +88,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml index 19d7797424..5cca1cd037 100644 --- a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml @@ -93,15 +93,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 3ff99b7171..375441e504 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -103,15 +103,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index af78332205..e9e8a3ab14 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -89,15 +89,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 006c40a205..12602026e5 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -210,10 +210,9 @@ pub struct ChainSpec { pub boot_nodes: Vec, pub network_id: u8, pub target_aggregators_per_committee: u64, - pub gossip_max_size: u64, + pub max_payload_size: u64, max_request_blocks: u64, pub min_epochs_for_block_requests: u64, - pub max_chunk_size: u64, pub ttfb_timeout: u64, pub resp_timeout: u64, pub attestation_propagation_slot_range: u64, @@ -712,6 +711,35 @@ impl ChainSpec { } } + /// Worst-case compressed length for a given payload of size n when using snappy. + /// + /// https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#max_compressed_len + fn max_compressed_len_snappy(n: usize) -> Option { + 32_usize.checked_add(n)?.checked_add(n / 6) + } + + /// Max compressed length of a message that we receive over gossip. + pub fn max_compressed_len(&self) -> usize { + Self::max_compressed_len_snappy(self.max_payload_size as usize) + .expect("should not overflow") + } + + /// Max allowed size of a raw, compressed message received over the network. + /// + /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#max_compressed_len + pub fn max_message_size(&self) -> usize { + std::cmp::max( + // 1024 to account for framing + encoding overhead + Self::max_compressed_len_snappy(self.max_payload_size as usize) + .expect("should not overflow") + .safe_add(1024) + .expect("should not overflow"), + //1MB + 1024 * 1024, + ) + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { @@ -926,9 +954,8 @@ impl ChainSpec { subnets_per_node: 2, maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - gossip_max_size: default_gossip_max_size(), + max_payload_size: default_max_payload_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), - max_chunk_size: default_max_chunk_size(), ttfb_timeout: default_ttfb_timeout(), resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), @@ -1256,9 +1283,8 @@ impl ChainSpec { subnets_per_node: 4, // Make this larger than usual to avoid network damage maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - gossip_max_size: default_gossip_max_size(), + max_payload_size: default_max_payload_size(), min_epochs_for_block_requests: 33024, - max_chunk_size: default_max_chunk_size(), ttfb_timeout: default_ttfb_timeout(), resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), @@ -1430,18 +1456,15 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] gas_limit_adjustment_factor: u64, - #[serde(default = "default_gossip_max_size")] + #[serde(default = "default_max_payload_size")] #[serde(with = "serde_utils::quoted_u64")] - gossip_max_size: u64, + max_payload_size: u64, #[serde(default = "default_max_request_blocks")] #[serde(with = "serde_utils::quoted_u64")] max_request_blocks: u64, #[serde(default = "default_min_epochs_for_block_requests")] #[serde(with = "serde_utils::quoted_u64")] min_epochs_for_block_requests: u64, - #[serde(default = "default_max_chunk_size")] - #[serde(with = "serde_utils::quoted_u64")] - max_chunk_size: u64, #[serde(default = "default_ttfb_timeout")] #[serde(with = "serde_utils::quoted_u64")] ttfb_timeout: u64, @@ -1576,7 +1599,7 @@ const fn default_gas_limit_adjustment_factor() -> u64 { 1024 } -const fn default_gossip_max_size() -> u64 { +const fn default_max_payload_size() -> u64 { 10485760 } @@ -1584,10 +1607,6 @@ const fn default_min_epochs_for_block_requests() -> u64 { 33024 } -const fn default_max_chunk_size() -> u64 { - 10485760 -} - const fn default_ttfb_timeout() -> u64 { 5 } @@ -1853,10 +1872,9 @@ impl Config { gas_limit_adjustment_factor: spec.gas_limit_adjustment_factor, - gossip_max_size: spec.gossip_max_size, + max_payload_size: spec.max_payload_size, max_request_blocks: spec.max_request_blocks, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, - max_chunk_size: spec.max_chunk_size, ttfb_timeout: spec.ttfb_timeout, resp_timeout: spec.resp_timeout, attestation_propagation_slot_range: spec.attestation_propagation_slot_range, @@ -1934,9 +1952,8 @@ impl Config { deposit_network_id, deposit_contract_address, gas_limit_adjustment_factor, - gossip_max_size, + max_payload_size, min_epochs_for_block_requests, - max_chunk_size, ttfb_timeout, resp_timeout, message_domain_invalid_snappy, @@ -2005,9 +2022,8 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - gossip_max_size, + max_payload_size, min_epochs_for_block_requests, - max_chunk_size, ttfb_timeout, resp_timeout, message_domain_invalid_snappy, @@ -2307,9 +2323,8 @@ mod yaml_tests { check_default!(terminal_block_hash); check_default!(terminal_block_hash_activation_epoch); check_default!(bellatrix_fork_version); - check_default!(gossip_max_size); + check_default!(max_payload_size); check_default!(min_epochs_for_block_requests); - check_default!(max_chunk_size); check_default!(ttfb_timeout); check_default!(resp_timeout); check_default!(message_domain_invalid_snappy); @@ -2335,4 +2350,17 @@ mod yaml_tests { [0, 0, 0, 1] ); } + + #[test] + fn test_max_network_limits_overflow() { + let mut spec = MainnetEthSpec::default_spec(); + // Should not overflow + let _ = spec.max_message_size(); + let _ = spec.max_compressed_len(); + + spec.max_payload_size *= 10; + // Should not overflow even with a 10x increase in max + let _ = spec.max_message_size(); + let _ = spec.max_compressed_len(); + } } diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 34e42a61f6..3f72e2ea6c 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -87,9 +87,8 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Network # --------------------------------------------------------------- SUBNETS_PER_NODE: 2 -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -MAX_CHUNK_SIZE: 10485760 TTFB_TIMEOUT: 5 RESP_TIMEOUT: 10 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 From ec643843e02614a845a9a30a0f8cd94626f1f4b4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 9 Apr 2025 14:14:50 +1000 Subject: [PATCH 23/35] Remove/document remaining Electra TODOs (#6982) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not essential to merge this now, but I'm going through TODOs for Electra to make sure we haven't missed anything. Targeting this at the release branch anyway so that auditors/readers don't get alarmed 😅 --- beacon_node/execution_layer/src/lib.rs | 1 + consensus/state_processing/src/genesis.rs | 3 +-- .../src/per_block_processing/verify_attestation.rs | 2 +- consensus/types/src/validator.rs | 1 - 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 820ec8d6b6..b09205646e 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -209,6 +209,7 @@ pub enum BlockProposalContents> { /// `None` for blinded `PayloadAndBlobs`. blobs_and_proofs: Option<(BlobsList, KzgProofs)>, // TODO(electra): this should probably be a separate variant/superstruct + // See: https://github.com/sigp/lighthouse/issues/6981 requests: Option>, }, } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 10723ecc51..8e62427ef1 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -123,8 +123,7 @@ pub fn initialize_beacon_state_from_eth1( // Remove intermediate Deneb fork from `state.fork`. state.fork_mut().previous_version = spec.electra_fork_version; - // TODO(electra): think about this more and determine the best way to - // do this. The spec tests will expect that the sync committees are + // The spec tests will expect that the sync committees are // calculated using the electra value for MAX_EFFECTIVE_BALANCE when // calling `initialize_beacon_state_from_eth1()`. But the sync committees // are actually calcuated back in `upgrade_to_altair()`. We need to diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 0b399bea6c..6b4a394c73 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -63,7 +63,7 @@ pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( ) -> Result> { let data = attestation.data(); - // TODO(electra) choosing a validation based on the attestation's fork + // NOTE: choosing a validation based on the attestation's fork // rather than the state's fork makes this simple, but technically the spec // defines this verification based on the state's fork. match attestation { diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 5aed90d2c1..027958b178 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -249,7 +249,6 @@ impl Validator { } } - /// TODO(electra): refactor these functions and make it simpler.. this is a mess /// Returns `true` if the validator is partially withdrawable. fn is_partially_withdrawable_validator_capella(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) From d96b73152e0e4bcedb8e747016ba8070029db50d Mon Sep 17 00:00:00 2001 From: SunnysidedJ Date: Wed, 9 Apr 2025 16:35:15 +0100 Subject: [PATCH 24/35] Fix for #6296: Deterministic RNG in peer DAS publish block tests (#7192) #6296: Deterministic RNG in peer DAS publish block tests Made test functions to call publish-block APIs with true for the deterministic RNG boolean parameter while production code with false. This will deterministically shuffle columns for unit tests under broadcast_validation_tests.rs. --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 3 +++ beacon_node/beacon_chain/src/builder.rs | 16 ++++++++++++++++ beacon_node/beacon_chain/src/test_utils.rs | 6 +++--- beacon_node/beacon_chain/tests/store_tests.rs | 2 ++ beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/builder.rs | 7 ++++++- beacon_node/http_api/src/publish_blocks.rs | 2 +- .../network/src/subnet_service/tests/mod.rs | 3 +++ 9 files changed, 36 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86019c913d..b11b585173 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1417,6 +1417,7 @@ dependencies = [ "monitoring_api", "network", "operation_pool", + "rand 0.8.5", "sensitive_url", "serde", "serde_json", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d6475de243..4d8e94f86d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -92,6 +92,7 @@ use operation_pool::{ }; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use proto_array::{DoNotReOrg, ProposerHeadError}; +use rand::RngCore; use safe_arith::SafeArith; use slasher::Slasher; use slot_clock::SlotClock; @@ -491,6 +492,8 @@ pub struct BeaconChain { pub data_availability_checker: Arc>, /// The KZG trusted setup used by this chain. pub kzg: Arc, + /// RNG instance used by the chain. Currently used for shuffling column sidecars in block publishing. + pub rng: Arc>>, } pub enum BeaconBlockResponseWrapper { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 975be33f0b..812dcbeda7 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -30,6 +30,7 @@ use logging::crit; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; +use rand::RngCore; use rayon::prelude::*; use slasher::Slasher; use slot_clock::{SlotClock, TestingSlotClock}; @@ -105,6 +106,7 @@ pub struct BeaconChainBuilder { task_executor: Option, validator_monitor_config: Option, import_all_data_columns: bool, + rng: Option>, } impl @@ -145,6 +147,7 @@ where task_executor: None, validator_monitor_config: None, import_all_data_columns: false, + rng: None, } } @@ -691,6 +694,14 @@ where self } + /// Sets the `rng` field. + /// + /// Currently used for shuffling column sidecars in block publishing. + pub fn rng(mut self, rng: Box) -> Self { + self.rng = Some(rng); + self + } + /// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied. /// /// An error will be returned at runtime if all required parameters have not been configured. @@ -716,6 +727,7 @@ where .genesis_state_root .ok_or("Cannot build without a genesis state root")?; let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); + let rng = self.rng.ok_or("Cannot build without an RNG")?; let beacon_proposer_cache: Arc> = <_>::default(); let mut validator_monitor = @@ -979,6 +991,7 @@ where .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), + rng: Arc::new(Mutex::new(rng)), }; let head = beacon_chain.head_snapshot(); @@ -1184,6 +1197,8 @@ mod test { use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, }; + use rand::rngs::StdRng; + use rand::SeedableRng; use ssz::Encode; use std::time::Duration; use store::config::StoreConfig; @@ -1230,6 +1245,7 @@ mod test { .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) + .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index bcab512a4b..e007d46fc3 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -38,8 +38,7 @@ use kzg::{Kzg, TrustedSetup}; use logging::create_test_tracing_subscriber; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; -use parking_lot::Mutex; -use parking_lot::RwLockWriteGuard; +use parking_lot::{Mutex, RwLockWriteGuard}; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; @@ -588,7 +587,8 @@ where .chain_config(chain_config) .import_all_data_columns(self.import_all_data_columns) .event_handler(Some(ServerSentEventHandler::new_with_capacity(5))) - .validator_monitor_config(validator_monitor_config); + .validator_monitor_config(validator_monitor_config) + .rng(Box::new(StdRng::seed_from_u64(42))); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index e41f547fb5..2fe1ecc08f 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -16,6 +16,7 @@ use beacon_chain::{ }; use logging::create_test_tracing_subscriber; use maplit::hashset; +use rand::rngs::StdRng; use rand::Rng; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::{state_advance::complete_state_advance, BlockReplayer}; @@ -2373,6 +2374,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .chain_config(ChainConfig::default()) .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) .execution_layer(Some(mock.el)) + .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index e11fc23072..195c53c4a0 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -31,6 +31,7 @@ logging = { workspace = true } metrics = { workspace = true } monitoring_api = { workspace = true } network = { workspace = true } +rand = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c8ff6521c8..3cb7b33aae 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -33,6 +33,8 @@ use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; +use rand::rngs::{OsRng, StdRng}; +use rand::SeedableRng; use slasher::Slasher; use slasher_service::SlasherService; use std::net::TcpListener; @@ -210,7 +212,10 @@ where .event_handler(event_handler) .execution_layer(execution_layer) .import_all_data_columns(config.network.subscribe_all_data_column_subnets) - .validator_monitor_config(config.validator_monitor.clone()); + .validator_monitor_config(config.validator_monitor.clone()) + .rng(Box::new( + StdRng::from_rng(OsRng).map_err(|e| format!("Failed to create RNG: {:?}", e))?, + )); let builder = if let Some(slasher) = self.slasher.clone() { builder.slasher(slasher) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index ab70521686..3d152f8852 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -521,7 +521,7 @@ fn publish_column_sidecars( .len() .saturating_sub(malicious_withhold_count); // Randomize columns before dropping the last malicious_withhold_count items - data_column_sidecars.shuffle(&mut rand::thread_rng()); + data_column_sidecars.shuffle(&mut **chain.rng.lock()); data_column_sidecars.truncate(columns_to_keep); } let pubsub_messages = data_column_sidecars diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 6f9e8cd41a..7fdf9047fc 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -7,6 +7,8 @@ use beacon_chain::{ }; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::NetworkConfig; +use rand::rngs::StdRng; +use rand::SeedableRng; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::sync::{Arc, LazyLock}; use std::time::{Duration, SystemTime}; @@ -76,6 +78,7 @@ impl TestBeaconChain { Duration::from_millis(SLOT_DURATION_MILLIS), )) .shutdown_sender(shutdown_tx) + .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"), ); From 7534f5752df50521aabe4ea8c4db86d80788ebca Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 10 Apr 2025 11:21:01 +1000 Subject: [PATCH 25/35] Add `pending_consolidations` Beacon API endpoint (#7290) #7282 Adds the missing `beacon/states/{state_id}/pending_consolidations` Beacon API endpoint along with related tests. --- beacon_node/http_api/src/lib.rs | 34 +++++++++++++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 29 ++++++++++++++++++++++++ common/eth2/src/lib.rs | 20 +++++++++++++++++ 3 files changed, 83 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a33508dde9..07e20b4437 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1186,6 +1186,39 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/pending_consolidations + let get_beacon_state_pending_consolidations = beacon_states_path + .clone() + .and(warp::path("pending_consolidations")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(consolidations) = state.pending_consolidations() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending consolidations not found".to_string(), + )); + }; + + Ok((consolidations.clone(), execution_optimistic, finalized)) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -4853,6 +4886,7 @@ pub fn serve( .uor(get_beacon_state_randao) .uor(get_beacon_state_pending_deposits) .uor(get_beacon_state_pending_partial_withdrawals) + .uor(get_beacon_state_pending_consolidations) .uor(get_beacon_headers) .uor(get_beacon_headers_block_id) .uor(get_beacon_block) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6ddd49bfd9..b573302e8b 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1246,6 +1246,33 @@ impl ApiTester { self } + pub async fn test_beacon_states_pending_consolidations(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = self + .client + .get_beacon_states_pending_consolidations(state_id.0) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.pending_consolidations().unwrap(); + + assert_eq!(result.unwrap(), expected.to_vec()); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -6404,6 +6431,8 @@ async fn beacon_get_state_info_electra() { .test_beacon_states_pending_deposits() .await .test_beacon_states_pending_partial_withdrawals() + .await + .test_beacon_states_pending_consolidations() .await; } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index a0f7a74527..c806ae065b 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -821,6 +821,26 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/states/{state_id}/pending_consolidations` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_pending_consolidations( + &self, + state_id: StateId, + ) -> Result>>, Error> + { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("pending_consolidations"); + + self.get_opt(path).await + } + /// `GET beacon/light_client/updates` /// /// Returns `Ok(None)` on a 404 error. From aed562abef148e64ef119ea5400e3e06055e1285 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 9 Apr 2025 19:17:07 -0700 Subject: [PATCH 26/35] Downgrade light client errors (#7300) Downgrade light client errors to debug Error messages are alarming and usually indicate somethings wrong with the beacon node. The Light Client service is supposed to minimally impact users, and most will not care if the light client server is erroring. Furthermore, the only errors we've seen in the wild are during hard forks, for the first few epochs before the fork finalizes. --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 42e6deaf16..1dcdb077b5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4104,7 +4104,7 @@ impl BeaconChain { &mut state, ) .unwrap_or_else(|e| { - error!(self.log, "error caching light_client data {:?}", e); + debug!(self.log, "error caching light_client data {:?}", e); }); } From bb5b00ead56e54afec215a279e119c97f9157ba7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 10 Apr 2025 16:27:31 +1000 Subject: [PATCH 27/35] Update and cleanup Electra preset (#7303) - Update the bundled `electra.yaml` preset files for `mainnet` and `minimal` to match `consensus-specs` as of: https://github.com/ethereum/consensus-specs/commit/bf09edef17e2900258f7e37631e9452941c26e86 - Add the field `max_pending_deposits_per_epoch` to our runtime representation of `ElectraPreset`. This results in it appearing in `/eth/v1/config/spec` where it was previously absent. --- consensus/types/presets/mainnet/electra.yaml | 40 ++++++++++---------- consensus/types/presets/minimal/electra.yaml | 40 ++++++++++---------- consensus/types/src/preset.rs | 37 ++++++++++++------ 3 files changed, 66 insertions(+), 51 deletions(-) diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 42afbb233e..55308d5b1c 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -7,44 +7,44 @@ MIN_ACTIVATION_BALANCE: 32000000000 # 2**11 * 10**9 (= 2,048,000,000,000) Gwei MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 -# State list lengths +# Rewards and penalties # --------------------------------------------------------------- -# `uint64(2**27)` (= 134,217,728) -PENDING_DEPOSITS_LIMIT: 134217728 -# `uint64(2**27)` (= 134,217,728) -PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 -# `uint64(2**18)` (= 262,144) -PENDING_CONSOLIDATIONS_LIMIT: 262144 - -# Reward and penalty quotients -# --------------------------------------------------------------- -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: 4096 -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 -# # Max operations per block +# State list lengths # --------------------------------------------------------------- -# `uint64(2**0)` (= 1) +# 2**27 (= 134,217,728) pending deposits +PENDING_DEPOSITS_LIMIT: 134217728 +# 2**27 (= 134,217,728) pending partial withdrawals +PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 +# 2**18 (= 262,144) pending consolidations +PENDING_CONSOLIDATIONS_LIMIT: 262144 + +# Max operations per block +# --------------------------------------------------------------- +# 2**0 (= 1) attester slashings MAX_ATTESTER_SLASHINGS_ELECTRA: 1 -# `uint64(2**3)` (= 8) +# 2**3 (= 8) attestations MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**1)` (= 2) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) deposit requests +# 2**13 (= 8,192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 +# 2**1 (= 2) consolidation requests +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Withdrawals processing # --------------------------------------------------------------- -# 2**3 ( = 8) pending withdrawals +# 2**3 (= 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 # Pending deposits processing # --------------------------------------------------------------- -# 2**4 ( = 4) pending deposits +# 2**4 (= 16) pending deposits MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index 44e4769756..f99effe0f1 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -7,44 +7,44 @@ MIN_ACTIVATION_BALANCE: 32000000000 # 2**11 * 10**9 (= 2,048,000,000,000) Gwei MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 -# State list lengths +# Rewards and penalties # --------------------------------------------------------------- -# `uint64(2**27)` (= 134,217,728) -PENDING_DEPOSITS_LIMIT: 134217728 -# [customized] `uint64(2**6)` (= 64) -PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 -# [customized] `uint64(2**6)` (= 64) -PENDING_CONSOLIDATIONS_LIMIT: 64 - -# Reward and penalty quotients -# --------------------------------------------------------------- -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: 4096 -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 -# # Max operations per block +# State list lengths # --------------------------------------------------------------- -# `uint64(2**0)` (= 1) +# 2**27 (= 134,217,728) pending deposits +PENDING_DEPOSITS_LIMIT: 134217728 +# [customized] 2**6 (= 64) pending partial withdrawals +PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 +# [customized] 2**6 (= 64) pending consolidations +PENDING_CONSOLIDATIONS_LIMIT: 64 + +# Max operations per block +# --------------------------------------------------------------- +# 2**0 (= 1) attester slashings MAX_ATTESTER_SLASHINGS_ELECTRA: 1 -# `uint64(2**3)` (= 8) +# 2**3 (= 8) attestations MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**1)` (= 2) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# [customized] +# [customized] 2**2 (= 4) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 4 # [customized] 2**1 (= 2) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 +# 2**1 (= 2) consolidation requests +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Withdrawals processing # --------------------------------------------------------------- -# 2**1 ( = 2) pending withdrawals +# 2**1 (= 2) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 2 # Pending deposits processing # --------------------------------------------------------------- -# 2**4 ( = 4) pending deposits +# 2**4 (= 16) pending deposits MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 707d2d4697..d025c72eac 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -227,28 +227,36 @@ pub struct ElectraPreset { pub min_activation_balance: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance_electra: u64, + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_electra: u64, #[serde(with = "serde_utils::quoted_u64")] pub whistleblower_reward_quotient_electra: u64, - #[serde(with = "serde_utils::quoted_u64")] - pub max_pending_partials_per_withdrawals_sweep: u64, + #[serde(with = "serde_utils::quoted_u64")] pub pending_deposits_limit: u64, #[serde(with = "serde_utils::quoted_u64")] pub pending_partial_withdrawals_limit: u64, #[serde(with = "serde_utils::quoted_u64")] pub pending_consolidations_limit: u64, - #[serde(with = "serde_utils::quoted_u64")] - pub max_consolidation_requests_per_payload: u64, - #[serde(with = "serde_utils::quoted_u64")] - pub max_deposit_requests_per_payload: u64, + #[serde(with = "serde_utils::quoted_u64")] pub max_attester_slashings_electra: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_attestations_electra: u64, + + #[serde(with = "serde_utils::quoted_u64")] + pub max_deposit_requests_per_payload: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_withdrawal_requests_per_payload: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_consolidation_requests_per_payload: u64, + + #[serde(with = "serde_utils::quoted_u64")] + pub max_pending_partials_per_withdrawals_sweep: u64, + + #[serde(with = "serde_utils::quoted_u64")] + pub max_pending_deposits_per_epoch: u64, } impl ElectraPreset { @@ -256,19 +264,26 @@ impl ElectraPreset { Self { min_activation_balance: spec.min_activation_balance, max_effective_balance_electra: spec.max_effective_balance_electra, + min_slashing_penalty_quotient_electra: spec.min_slashing_penalty_quotient_electra, whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, - max_pending_partials_per_withdrawals_sweep: spec - .max_pending_partials_per_withdrawals_sweep, + pending_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, - max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() - as u64, - max_deposit_requests_per_payload: E::max_deposit_requests_per_payload() as u64, + max_attester_slashings_electra: E::max_attester_slashings_electra() as u64, max_attestations_electra: E::max_attestations_electra() as u64, + + max_deposit_requests_per_payload: E::max_deposit_requests_per_payload() as u64, max_withdrawal_requests_per_payload: E::max_withdrawal_requests_per_payload() as u64, + max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() + as u64, + + max_pending_partials_per_withdrawals_sweep: spec + .max_pending_partials_per_withdrawals_sweep, + + max_pending_deposits_per_epoch: E::max_pending_deposits_per_epoch() as u64, } } } From 93703623c8ca80e0f4315311dccdafff694dd1e3 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 10 Apr 2025 16:51:14 +0800 Subject: [PATCH 28/35] Gnosis Pectra fork epoch (#7296) ~~* #7274~~ * #7295 --- .../gnosis/config.yaml | 18 +++++++++++++++--- consensus/types/src/chain_spec.rs | 12 ++++++------ 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 359d57b0a5..4413c21c4b 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -43,7 +43,7 @@ DENEB_FORK_VERSION: 0x04000064 DENEB_FORK_EPOCH: 889856 # 2024-03-11T18:30:20.000Z # Electra ELECTRA_FORK_VERSION: 0x05000064 -ELECTRA_FORK_EPOCH: 18446744073709551615 +ELECTRA_FORK_EPOCH: 1337856 # 2025-04-30T14:03:40.000Z # Fulu FULU_FORK_VERSION: 0x06000064 FULU_FORK_EPOCH: 18446744073709551615 @@ -117,8 +117,20 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 -# `uint64(6)` -MAX_BLOBS_PER_BLOCK: 6 +# `uint64(2)` +MAX_BLOBS_PER_BLOCK: 2 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**6 * 10**9 (= 64,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 64000000000 +# `2` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 2 +# `uint64(2)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 2 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256 # DAS NUMBER_OF_COLUMNS: 128 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 12602026e5..a9908e87f3 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1236,7 +1236,7 @@ impl ChainSpec { * Electra hard fork params */ electra_fork_version: [0x05, 0x00, 0x00, 0x64], - electra_fork_epoch: None, + electra_fork_epoch: Some(Epoch::new(1337856)), unset_deposit_requests_start_index: u64::MAX, full_exit_request_amount: 0, min_activation_balance: option_wrapper(|| { @@ -1258,7 +1258,7 @@ impl ChainSpec { }) .expect("calculation does not overflow"), max_per_epoch_activation_exit_churn_limit: option_wrapper(|| { - u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) + u64::checked_pow(2, 6)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), @@ -1300,7 +1300,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: 16384, blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), - max_blobs_per_block: default_max_blobs_per_block(), + max_blobs_per_block: 2, /* * Derived Deneb Specific @@ -1313,9 +1313,9 @@ impl ChainSpec { /* * Networking Electra specific */ - max_blobs_per_block_electra: default_max_blobs_per_block_electra(), - blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), - max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + max_blobs_per_block_electra: 2, + blob_sidecar_subnet_count_electra: 2, + max_request_blob_sidecars_electra: 256, /* * Application specific From 9304a592bf0b4f963a49f5e8ca0aae1596cbae11 Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 11 Apr 2025 12:39:52 +1000 Subject: [PATCH 29/35] Update crossbeam to fix `cargo audit` failure (#7313) There is a `cargo audit` failure related to this CVE: https://rustsec.org/advisories/RUSTSEC-2025-0024 Unblock CI by updating `crossbeam-channel` --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1ceb2dbaf..eee67a413e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1696,9 +1696,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] From ef8ec35ac582e3d3bf27ad2070fa61e0a9e1fb5b Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 10 Apr 2025 21:47:27 -0700 Subject: [PATCH 30/35] Ensure `light_client/updates` endpoint returns spec compliant SSZ data (#7230) Closes #7167 - Ensure the fork digest is generated from ther light client updates attested header and not the signature slot - Ensure the format of the SSZ response is spec compliant --- beacon_node/http_api/src/light_client.rs | 24 +++++++++++++----------- common/eth2/src/types.rs | 8 ++++---- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index ac8c08581c..2d0a5d09a1 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -4,7 +4,7 @@ use crate::version::{ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, - LightClientUpdateResponseChunk, LightClientUpdateSszResponse, LightClientUpdatesQuery, + LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; @@ -37,15 +37,9 @@ pub fn get_light_client_updates( .map(|update| map_light_client_update_to_ssz_chunk::(&chain, update)) .collect::>(); - let ssz_response = LightClientUpdateSszResponse { - response_chunk_len: (light_client_updates.len() as u64).to_le_bytes().to_vec(), - response_chunk: response_chunks.as_ssz_bytes(), - } - .as_ssz_bytes(); - Response::builder() .status(200) - .body(ssz_response) + .body(response_chunks.as_ssz_bytes()) .map(|res: Response>| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -159,16 +153,24 @@ fn map_light_client_update_to_ssz_chunk( ) -> LightClientUpdateResponseChunk { let fork_name = chain .spec - .fork_name_at_slot::(*light_client_update.signature_slot()); + .fork_name_at_slot::(light_client_update.attested_header_slot()); let fork_digest = ChainSpec::compute_fork_digest( chain.spec.fork_version_for_name(fork_name), chain.genesis_validators_root, ); - LightClientUpdateResponseChunk { + let payload = light_client_update.as_ssz_bytes(); + let response_chunk_len = fork_digest.len() + payload.len(); + + let response_chunk = LightClientUpdateResponseChunkInner { context: fork_digest, - payload: light_client_update.as_ssz_bytes(), + payload, + }; + + LightClientUpdateResponseChunk { + response_chunk_len: response_chunk_len as u64, + response_chunk, } } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 66b4b7ea54..7ce486b855 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -802,13 +802,13 @@ pub struct LightClientUpdatesQuery { } #[derive(Encode, Decode)] -pub struct LightClientUpdateSszResponse { - pub response_chunk_len: Vec, - pub response_chunk: Vec, +pub struct LightClientUpdateResponseChunk { + pub response_chunk_len: u64, + pub response_chunk: LightClientUpdateResponseChunkInner, } #[derive(Encode, Decode)] -pub struct LightClientUpdateResponseChunk { +pub struct LightClientUpdateResponseChunkInner { pub context: [u8; 4], pub payload: Vec, } From af51d50b05b75f078f710c719b62beee397274d4 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 10 Apr 2025 21:47:30 -0700 Subject: [PATCH 31/35] Ensure `/eth/v2/beacon/pool/attestations` honors `committee_index` (#7298) #7294 Fix the filtering logic so that we actually filter by committee index for both `Base` and `Electra` attestations. Added a tiny optimization when calculating committee_index to prevent unneeded memory allocations Added a regression test --- beacon_node/http_api/src/lib.rs | 22 +++-- beacon_node/http_api/tests/tests.rs | 98 ++++++++++++++++++- .../operation_pool/src/attestation_storage.rs | 20 +++- beacon_node/operation_pool/src/lib.rs | 8 +- consensus/types/src/attestation.rs | 14 ++- 5 files changed, 143 insertions(+), 19 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 07e20b4437..f101e35ed9 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -68,6 +68,7 @@ use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; +use std::collections::HashSet; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -85,13 +86,14 @@ use tokio_stream::{ wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, StreamExt, }; +use types::AttestationData; use types::{ - fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, - AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, - Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, - ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, - SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, + fork_versioned_response::EmptyMetadata, Attestation, AttestationShufflingId, AttesterSlashing, + BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, + ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, + RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -2032,11 +2034,11 @@ pub fn serve( chain: Arc>, query: api_types::AttestationPoolQuery| { task_spawner.blocking_response_task(Priority::P1, move || { - let query_filter = |data: &AttestationData| { + let query_filter = |data: &AttestationData, committee_indices: HashSet| { query.slot.is_none_or(|slot| slot == data.slot) && query .committee_index - .is_none_or(|index| index == data.index) + .is_none_or(|index| committee_indices.contains(&index)) }; let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); @@ -2045,7 +2047,9 @@ pub fn serve( .naive_aggregation_pool .read() .iter() - .filter(|&att| query_filter(att.data())) + .filter(|&att| { + query_filter(att.data(), att.get_committee_indices_map()) + }) .cloned(), ); // Use the current slot to find the fork version, and convert all messages to the diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b573302e8b..a5aeb30e1a 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -28,6 +28,7 @@ use http_api::{ use lighthouse_network::{types::SyncState, Enr, EnrExt, PeerId}; use logging::test_logger; use network::NetworkReceivers; +use operation_pool::attestation_storage::CheckpointKey; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; @@ -2119,7 +2120,7 @@ impl ApiTester { self } - pub async fn test_get_beacon_pool_attestations(self) -> Self { + pub async fn test_get_beacon_pool_attestations(self) { let result = self .client .get_beacon_pool_attestations_v1(None, None) @@ -2138,9 +2139,80 @@ impl ApiTester { .await .unwrap() .data; + assert_eq!(result, expected); - self + let result_committee_index_filtered = self + .client + .get_beacon_pool_attestations_v1(None, Some(0)) + .await + .unwrap() + .data; + + let expected_committee_index_filtered = expected + .clone() + .into_iter() + .filter(|att| att.get_committee_indices_map().contains(&0)) + .collect::>(); + + assert_eq!( + result_committee_index_filtered, + expected_committee_index_filtered + ); + + let result_committee_index_filtered = self + .client + .get_beacon_pool_attestations_v1(None, Some(1)) + .await + .unwrap() + .data; + + let expected_committee_index_filtered = expected + .clone() + .into_iter() + .filter(|att| att.get_committee_indices_map().contains(&1)) + .collect::>(); + + assert_eq!( + result_committee_index_filtered, + expected_committee_index_filtered + ); + + let fork_name = self + .harness + .chain + .spec + .fork_name_at_slot::(self.harness.chain.slot().unwrap()); + + // aggregate electra attestations + if fork_name.electra_enabled() { + // Take and drop the lock in a block to avoid clippy complaining + // about taking locks across await points + { + let mut all_attestations = self.chain.op_pool.attestations.write(); + let (prev_epoch_key, curr_epoch_key) = + CheckpointKey::keys_for_state(&self.harness.get_current_state()); + all_attestations.aggregate_across_committees(prev_epoch_key); + all_attestations.aggregate_across_committees(curr_epoch_key); + } + let result_committee_index_filtered = self + .client + .get_beacon_pool_attestations_v2(None, Some(0)) + .await + .unwrap() + .data; + let mut expected = self.chain.op_pool.get_all_attestations(); + expected.extend(self.chain.naive_aggregation_pool.read().iter().cloned()); + let expected_committee_index_filtered = expected + .clone() + .into_iter() + .filter(|att| att.get_committee_indices_map().contains(&0)) + .collect::>(); + assert_eq!( + result_committee_index_filtered, + expected_committee_index_filtered + ); + } } pub async fn test_post_beacon_pool_attester_slashings_valid_v1(mut self) -> Self { @@ -6463,10 +6535,30 @@ async fn beacon_get_blocks() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_get_pools() { +async fn test_beacon_pool_attestations_electra() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_beacon_pool_attestations() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_beacon_pool_attestations_base() { ApiTester::new() .await .test_get_beacon_pool_attestations() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_pools() { + ApiTester::new() .await .test_get_beacon_pool_attester_slashings() .await diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 49ef5c279c..67c24b9c7a 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -1,6 +1,6 @@ use crate::AttestationStats; use itertools::Itertools; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use types::{ attestation::{AttestationBase, AttestationElectra}, superstruct, AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, @@ -119,6 +119,18 @@ impl CompactAttestationRef<'_, E> { } } + pub fn get_committee_indices_map(&self) -> HashSet { + match self.indexed { + CompactIndexedAttestation::Base(_) => HashSet::from([self.data.index]), + CompactIndexedAttestation::Electra(indexed_att) => indexed_att + .committee_bits + .iter() + .enumerate() + .filter_map(|(index, bit)| if bit { Some(index as u64) } else { None }) + .collect(), + } + } + pub fn clone_as_attestation(&self) -> Attestation { match self.indexed { CompactIndexedAttestation::Base(indexed_att) => Attestation::Base(AttestationBase { @@ -268,7 +280,11 @@ impl CompactIndexedAttestationElectra { } pub fn committee_index(&self) -> Option { - self.get_committee_indices().first().copied() + self.committee_bits + .iter() + .enumerate() + .find(|&(_, bit)| bit) + .map(|(index, _)| index as u64) } pub fn get_committee_indices(&self) -> Vec { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 584a5f9f32..ec8c6640b1 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,5 +1,5 @@ mod attestation; -mod attestation_storage; +pub mod attestation_storage; mod attester_slashing; mod bls_to_execution_changes; mod max_cover; @@ -47,7 +47,7 @@ type SyncContributions = RwLock { /// Map from attestation ID (see below) to vectors of attestations. - attestations: RwLock>, + pub attestations: RwLock>, /// Map from sync aggregate ID to the best `SyncCommitteeContribution`s seen for that ID. sync_contributions: SyncContributions, /// Set of attester slashings, and the fork version they were verified against. @@ -673,12 +673,12 @@ impl OperationPool { /// This method may return objects that are invalid for block inclusion. pub fn get_filtered_attestations(&self, filter: F) -> Vec> where - F: Fn(&AttestationData) -> bool, + F: Fn(&AttestationData, HashSet) -> bool, { self.attestations .read() .iter() - .filter(|att| filter(&att.attestation_data())) + .filter(|att| filter(&att.attestation_data(), att.get_committee_indices_map())) .map(|att| att.clone_as_attestation()) .collect() } diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 1485842edb..0895377063 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -5,6 +5,7 @@ use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::BitVector; +use std::collections::HashSet; use std::hash::{Hash, Hasher}; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -209,6 +210,13 @@ impl Attestation { } } + pub fn get_committee_indices_map(&self) -> HashSet { + match self { + Attestation::Base(att) => HashSet::from([att.data.index]), + Attestation::Electra(att) => att.get_committee_indices().into_iter().collect(), + } + } + pub fn is_aggregation_bits_zero(&self) -> bool { match self { Attestation::Base(att) => att.aggregation_bits.is_zero(), @@ -292,7 +300,11 @@ impl AttestationRef<'_, E> { impl AttestationElectra { pub fn committee_index(&self) -> Option { - self.get_committee_indices().first().cloned() + self.committee_bits + .iter() + .enumerate() + .find(|&(_, bit)| bit) + .map(|(index, _)| index as u64) } pub fn get_aggregation_bits(&self) -> Vec { From 70f8ab9a6fc24f0d5bf99832d979d99602da3cd7 Mon Sep 17 00:00:00 2001 From: Robert Mordzon Date: Fri, 11 Apr 2025 15:17:22 +0200 Subject: [PATCH 32/35] Add riscv64 build support (#7309) Lighthouse does not compile on the _riscv64_ architecture due to a missing target configuration in the `bls_hardware_acceleration` function. Added support for compiling Lighthouse on the riscv64 architecture by explicitly handling the _riscv64_ target in the bls_hardware_acceleration function. Specifically, this line was added: ``` #[cfg(target_arch = "riscv64")] return false; ``` This prevents a compilation error on RISC-V. --- lighthouse/src/main.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 66dae05326..7ddf04db01 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -68,6 +68,9 @@ fn bls_hardware_acceleration() -> bool { #[cfg(target_arch = "aarch64")] return std::arch::is_aarch64_feature_detected!("neon"); + + #[cfg(target_arch = "riscv64")] + return false; } fn allocator_name() -> String { From be68dd24d05f55dab688669cd9879ac6ce29726f Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 11 Apr 2025 19:00:57 -0300 Subject: [PATCH 33/35] Fix wrong custody column count for lookup blocks (#7281) Fixes - https://github.com/sigp/lighthouse/issues/7278 Don't assume 0 columns for `RpcBlockInner::Block` --- .../beacon_chain/src/block_verification.rs | 34 ----------------- .../src/block_verification_types.rs | 4 +- beacon_node/beacon_chain/src/test_utils.rs | 4 +- .../beacon_chain/tests/block_verification.rs | 37 +++++++++++++------ .../tests/payload_invalidation.rs | 23 ++++++++---- beacon_node/beacon_chain/tests/store_tests.rs | 19 ++++++++-- beacon_node/http_api/src/publish_blocks.rs | 8 +++- .../src/network_beacon_processor/tests.rs | 18 ++++++++- .../network/src/sync/block_lookups/common.rs | 10 +---- .../src/sync/block_sidecar_coupling.rs | 3 +- .../network/src/sync/network_context.rs | 8 +++- beacon_node/network/src/sync/tests/range.rs | 3 +- testing/ef_tests/src/cases/fork_choice.rs | 3 +- 13 files changed, 98 insertions(+), 76 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 46ba1bc992..074ae93a79 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1267,40 +1267,6 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } } -impl IntoExecutionPendingBlock for Arc> { - /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` - /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. - fn into_execution_pending_block_slashable( - self, - block_root: Hash256, - chain: &Arc>, - notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo> { - // Perform an early check to prevent wasting time on irrelevant blocks. - let block_root = check_block_relevancy(&self, block_root, chain) - .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; - let maybe_available = chain - .data_availability_checker - .verify_kzg_for_rpc_block(RpcBlock::new_without_blobs(Some(block_root), self.clone())) - .map_err(|e| { - BlockSlashInfo::SignatureNotChecked( - self.signed_block_header(), - BlockError::AvailabilityCheck(e), - ) - })?; - SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)? - .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) - } - - fn block(&self) -> &SignedBeaconBlock { - self - } - - fn block_cloned(&self) -> Arc> { - self.clone() - } -} - impl IntoExecutionPendingBlock for RpcBlock { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index aa7418646f..dab54dc823 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -103,14 +103,14 @@ impl RpcBlock { pub fn new_without_blobs( block_root: Option, block: Arc>, + custody_columns_count: usize, ) -> Self { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); Self { block_root, block: RpcBlockInner::Block(block), - // Block has zero columns - custody_columns_count: 0, + custody_columns_count, } } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e007d46fc3..ca083f0572 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2366,7 +2366,7 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); if !has_blobs { - return RpcBlock::new_without_blobs(Some(block_root), block); + return RpcBlock::new_without_blobs(Some(block_root), block, 0); } // Blobs are stored as data columns from Fulu (PeerDAS) @@ -2417,7 +2417,7 @@ where &self.spec, )? } else { - RpcBlock::new_without_blobs(Some(block_root), block) + RpcBlock::new_without_blobs(Some(block_root), block, 0) } } else { let blobs = blob_items diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 3dc46be16e..9225ffd9f4 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -147,7 +147,7 @@ fn build_rpc_block( RpcBlock::new_with_custody_columns(None, block, columns.clone(), columns.len(), spec) .unwrap() } - None => RpcBlock::new_without_blobs(None, block), + None => RpcBlock::new_without_blobs(None, block, 0), } } @@ -370,6 +370,7 @@ async fn chain_segment_non_linear_parent_roots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), + harness.sampling_column_count, ); assert!( @@ -407,6 +408,7 @@ async fn chain_segment_non_linear_slots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), + harness.sampling_column_count, ); assert!( @@ -434,6 +436,7 @@ async fn chain_segment_non_linear_slots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), + harness.sampling_column_count, ); assert!( @@ -575,11 +578,16 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); + let rpc_block = RpcBlock::new_without_blobs( + None, + Arc::new(signed_block), + harness.sampling_column_count, + ); let process_res = harness .chain .process_block( - signed_block.canonical_root(), - Arc::new(signed_block), + rpc_block.block_root(), + rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1541,12 +1549,13 @@ async fn add_base_block_to_altair_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. + let base_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(base_block.clone()), 0); assert!(matches!( harness .chain .process_block( - base_block.canonical_root(), - Arc::new(base_block.clone()), + base_rpc_block.block_root(), + base_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1564,7 +1573,7 @@ async fn add_base_block_to_altair_chain() { harness .chain .process_chain_segment( - vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))], + vec![RpcBlock::new_without_blobs(None, Arc::new(base_block), 0)], NotifyExecutionLayer::Yes, ) .await, @@ -1677,12 +1686,13 @@ async fn add_altair_block_to_base_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. + let altair_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(altair_block.clone()), 0); assert!(matches!( harness .chain .process_block( - altair_block.canonical_root(), - Arc::new(altair_block.clone()), + altair_rpc_block.block_root(), + altair_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1700,7 +1710,7 @@ async fn add_altair_block_to_base_chain() { harness .chain .process_chain_segment( - vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))], + vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block), 0)], NotifyExecutionLayer::Yes ) .await, @@ -1761,11 +1771,16 @@ async fn import_duplicate_block_unrealized_justification() { // Create two verified variants of the block, representing the same block being processed in // parallel. let notify_execution_layer = NotifyExecutionLayer::Yes; - let verified_block1 = block + let rpc_block = RpcBlock::new_without_blobs( + Some(block_root), + block.clone(), + harness.sampling_column_count, + ); + let verified_block1 = rpc_block .clone() .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); - let verified_block2 = block + let verified_block2 = rpc_block .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 4c4f0d8c6a..c6fc3416e0 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,5 +1,6 @@ #![cfg(not(debug_assertions))] +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, @@ -687,12 +688,14 @@ async fn invalidates_all_descendants() { assert_eq!(fork_parent_state.slot(), fork_parent_slot); let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let fork_rpc_block = + RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); let fork_block_root = rig .harness .chain .process_block( - fork_block.canonical_root(), - fork_block, + fork_rpc_block.block_root(), + fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -788,12 +791,14 @@ async fn switches_heads() { let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); + let fork_rpc_block = + RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); let fork_block_root = rig .harness .chain .process_block( - fork_block.canonical_root(), - fork_block, + fork_rpc_block.block_root(), + fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1057,8 +1062,10 @@ async fn invalid_parent() { )); // Ensure the block built atop an invalid payload is invalid for import. + let rpc_block = + RpcBlock::new_without_blobs(None, block.clone(), rig.harness.sampling_column_count); assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, + rig.harness.chain.process_block(rpc_block.block_root(), rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) @@ -1380,11 +1387,13 @@ async fn recover_from_invalid_head_by_importing_blocks() { } = InvalidHeadSetup::new().await; // Import the fork block, it should become the head. + let fork_rpc_block = + RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); rig.harness .chain .process_block( - fork_block.canonical_root(), - fork_block.clone(), + fork_rpc_block.block_root(), + fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 2fe1ecc08f..3343dc101b 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,6 +1,7 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_verification::Error as AttnError; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; @@ -2643,12 +2644,17 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert_eq!(split.block_root, valid_fork_block.parent_root()); assert_ne!(split.state_root, unadvanced_split_state_root); + let invalid_fork_rpc_block = RpcBlock::new_without_blobs( + None, + invalid_fork_block.clone(), + harness.sampling_column_count, + ); // Applying the invalid block should fail. let err = harness .chain .process_block( - invalid_fork_block.canonical_root(), - invalid_fork_block.clone(), + invalid_fork_rpc_block.block_root(), + invalid_fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2658,11 +2664,16 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); // Applying the valid block should succeed, but it should not become head. + let valid_fork_rpc_block = RpcBlock::new_without_blobs( + None, + valid_fork_block.clone(), + harness.sampling_column_count, + ); harness .chain .process_block( - valid_fork_block.canonical_root(), - valid_fork_block.clone(), + valid_fork_rpc_block.block_root(), + valid_fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 3d152f8852..b613cf8467 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -2,7 +2,7 @@ use crate::metrics; use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; -use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ @@ -302,7 +302,11 @@ pub async fn publish_block>( ); let import_result = Box::pin(chain.process_block( block_root, - block.clone(), + RpcBlock::new_without_blobs( + Some(block_root), + block.clone(), + network_globals.custody_columns_count() as usize, + ), NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index aa5f54ac1f..5000941b0d 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -323,12 +323,22 @@ impl TestRig { } } + pub fn custody_columns_count(&self) -> usize { + self.network_beacon_processor + .network_globals + .custody_columns_count() as usize + } + pub fn enqueue_rpc_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), + RpcBlock::new_without_blobs( + Some(block_root), + self.next_block.clone(), + self.custody_columns_count(), + ), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 0 }, ) @@ -340,7 +350,11 @@ impl TestRig { self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), + RpcBlock::new_without_blobs( + Some(block_root), + self.next_block.clone(), + self.custody_columns_count(), + ), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 8eefb2d675..86b6894bac 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -6,7 +6,6 @@ use crate::sync::block_lookups::{ }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use lighthouse_network::service::api_types::Id; use parking_lot::RwLock; @@ -97,13 +96,8 @@ impl RequestState for BlockRequestState { seen_timestamp, .. } = download_result; - cx.send_block_for_processing( - id, - block_root, - RpcBlock::new_without_blobs(Some(block_root), value), - seen_timestamp, - ) - .map_err(LookupRequestError::SendFailedProcessor) + cx.send_block_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) } fn response_type() -> ResponseType { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index ef9285c8dc..99428b0c80 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -266,7 +266,8 @@ impl RangeBlockComponentsRequest { ) .map_err(|e| format!("{e:?}"))? } else { - RpcBlock::new_without_blobs(Some(block_root), block) + // Block has no data, expects zero columns + RpcBlock::new_without_blobs(Some(block_root), block, 0) }); } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 69b350f8cb..2cb5ec9a0a 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1308,7 +1308,7 @@ impl SyncNetworkContext { &self, id: Id, block_root: Hash256, - block: RpcBlock, + block: Arc>, seen_timestamp: Duration, ) -> Result<(), SendErrorProcessor> { let span = span!( @@ -1322,6 +1322,12 @@ impl SyncNetworkContext { .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + let block = RpcBlock::new_without_blobs( + Some(block_root), + block, + self.network_globals().custody_columns_count() as usize, + ); + debug!(block = ?block_root, id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` with this process type diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 2871ea2a4d..932f485dd0 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -459,7 +459,8 @@ fn build_rpc_block( ) .unwrap() } - None => RpcBlock::new_without_blobs(None, block), + // Block has no data, expects zero columns + None => RpcBlock::new_without_blobs(None, block, 0), } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 43e96e3f1e..b507383190 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -3,6 +3,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; use beacon_chain::blob_verification::GossipBlobError; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::chain_config::{ DisallowedReOrgOffsets, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, @@ -519,7 +520,7 @@ impl Tester { let result: Result, _> = self .block_on_dangerous(self.harness.chain.process_block( block_root, - block.clone(), + RpcBlock::new_without_blobs(Some(block_root), block.clone(), 0), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), From 08882c64cae509208d0ba4ff8d67c6a601633979 Mon Sep 17 00:00:00 2001 From: EllipticPoint <95832864+EllipticPoint@users.noreply.github.com> Date: Sat, 12 Apr 2025 19:03:50 +1000 Subject: [PATCH 34/35] Fix execution engine integration tests with latest geth version (#6996) https://github.com/sigp/lighthouse/issues/6937 - Use `ethers-rs` [`Signer`](https://www.gakonst.com/ethers-rs/middleware/signer.html) middleware for local signing and sending raw txs to geth - ~~Set `totalDifficulty = 0` through `serde` default if the block does not contain a `totalDifficulty` field~~ --- Cargo.lock | 441 ++++++++++++++++-- Cargo.toml | 2 + .../execution_engine_integration/Cargo.toml | 2 + .../execution_engine_integration/src/geth.rs | 15 +- .../execution_engine_integration/src/main.rs | 4 +- .../src/test_rig.rs | 59 ++- 6 files changed, 461 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4bf6c7cb0a..b98e096718 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -92,7 +92,7 @@ dependencies = [ "cipher 0.3.0", "cpufeatures", "ctr 0.8.0", - "opaque-debug", + "opaque-debug 0.3.1", ] [[package]] @@ -674,6 +674,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "auto_impl" version = "1.2.1" @@ -724,6 +736,28 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "base58check" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ee2fe4c9a0c84515f136aaae2466744a721af6d63339c18689d9e995d74d99b" +dependencies = [ + "base58", + "sha2 0.8.2", +] + +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.1" @@ -880,6 +914,12 @@ dependencies = [ "types", ] +[[package]] +name = "bech32" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" + [[package]] name = "bincode" version = "1.3.3" @@ -939,6 +979,16 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +[[package]] +name = "bitvec" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium 0.3.0", +] + [[package]] name = "bitvec" version = "0.20.4" @@ -972,14 +1022,26 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding 0.1.5", + "byte-tools", + "byteorder", + "generic-array 0.12.4", +] + [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", - "generic-array", + "block-padding 0.2.1", + "generic-array 0.14.7", ] [[package]] @@ -988,7 +1050,16 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array", + "generic-array 0.14.7", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", ] [[package]] @@ -1105,6 +1176,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + [[package]] name = "byteorder" version = "1.5.0" @@ -1310,7 +1387,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -1445,6 +1522,63 @@ dependencies = [ "cc", ] +[[package]] +name = "coins-bip32" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634c509653de24b439672164bbf56f5f582a2ab0e313d3b0f6af0b7345cf2560" +dependencies = [ + "bincode", + "bs58 0.4.0", + "coins-core", + "digest 0.10.7", + "getrandom 0.2.15", + "hmac 0.12.1", + "k256 0.11.6", + "lazy_static", + "serde", + "sha2 0.10.8", + "thiserror 1.0.69", +] + +[[package]] +name = "coins-bip39" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" +dependencies = [ + "bitvec 0.17.4", + "coins-bip32", + "getrandom 0.2.15", + "hex", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror 1.0.69", +] + +[[package]] +name = "coins-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94090a6663f224feae66ab01e41a2555a8296ee07b5f20dab8888bdefc9f617" +dependencies = [ + "base58check", + "base64 0.12.3", + "bech32", + "blake2", + "digest 0.10.7", + "generic-array 0.14.7", + "hex", + "ripemd", + "serde", + "serde_derive", + "sha2 0.10.8", + "sha3 0.10.8", + "thiserror 1.0.69", +] + [[package]] name = "colorchoice" version = "1.0.3" @@ -1536,6 +1670,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1713,7 +1856,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ - "generic-array", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -1725,7 +1868,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "generic-array", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -1737,7 +1880,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array", + "generic-array 0.14.7", "rand_core 0.6.4", "typenum", ] @@ -1748,7 +1891,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array", + "generic-array 0.14.7", "subtle", ] @@ -1758,7 +1901,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ - "generic-array", + "generic-array 0.14.7", "subtle", ] @@ -2053,7 +2196,7 @@ version = "0.99.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version 0.4.1", @@ -2101,13 +2244,22 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.4", +] + [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -2336,8 +2488,9 @@ dependencies = [ "der 0.6.1", "digest 0.10.7", "ff 0.12.1", - "generic-array", + "generic-array 0.14.7", "group 0.12.1", + "pkcs8 0.9.0", "rand_core 0.6.4", "sec1 0.3.0", "subtle", @@ -2354,7 +2507,7 @@ dependencies = [ "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.1", - "generic-array", + "generic-array 0.14.7", "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", @@ -2485,6 +2638,28 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "eth-keystore" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" +dependencies = [ + "aes 0.8.4", + "ctr 0.9.2", + "digest 0.10.7", + "hex", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "rand 0.8.5", + "scrypt 0.10.0", + "serde", + "serde_json", + "sha2 0.10.8", + "sha3 0.10.8", + "thiserror 1.0.69", + "uuid 0.8.2", +] + [[package]] name = "eth1" version = "0.2.0" @@ -2603,7 +2778,7 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "rand 0.8.5", - "scrypt", + "scrypt 0.7.0", "serde", "serde_json", "serde_repr", @@ -2872,13 +3047,15 @@ dependencies = [ "bytes", "cargo_metadata 0.15.4", "chrono", + "convert_case 0.6.0", "elliptic-curve 0.12.3", "ethabi 18.0.0", - "generic-array", + "generic-array 0.14.7", "hex", "k256 0.11.6", "once_cell", "open-fastrlp", + "proc-macro2", "rand 0.8.5", "rlp", "rlp-derive", @@ -2891,6 +3068,49 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "ethers-etherscan" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" +dependencies = [ + "ethers-core", + "getrandom 0.2.15", + "reqwest", + "semver 1.0.26", + "serde", + "serde-aux", + "serde_json", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "ethers-middleware" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e71df7391b0a9a51208ffb5c7f2d068900e99d6b3128d3a4849d138f194778b7" +dependencies = [ + "async-trait", + "auto_impl 0.5.0", + "ethers-contract", + "ethers-core", + "ethers-etherscan", + "ethers-providers", + "ethers-signers", + "futures-locks", + "futures-util", + "instant", + "reqwest", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-futures", + "url", +] + [[package]] name = "ethers-providers" version = "1.0.2" @@ -2898,7 +3118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", - "auto_impl", + "auto_impl 1.2.1", "base64 0.13.1", "ethers-core", "futures-core", @@ -2926,6 +3146,24 @@ dependencies = [ "ws_stream_wasm", ] +[[package]] +name = "ethers-signers" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f41ced186867f64773db2e55ffdd92959e094072a1d09a5e5e831d443204f98" +dependencies = [ + "async-trait", + "coins-bip32", + "coins-bip39", + "elliptic-curve 0.12.3", + "eth-keystore", + "ethers-core", + "hex", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror 1.0.69", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -2971,7 +3209,9 @@ dependencies = [ "async-channel 1.9.0", "deposit_contract", "ethers-core", + "ethers-middleware", "ethers-providers", + "ethers-signers", "execution_layer", "fork_choice", "futures", @@ -3049,6 +3289,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -3074,7 +3320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", - "auto_impl", + "auto_impl 1.2.1", "bytes", ] @@ -3085,7 +3331,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" dependencies = [ "arrayvec", - "auto_impl", + "auto_impl 1.2.1", "bytes", ] @@ -3337,6 +3583,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "futures-locks" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" +dependencies = [ + "futures-channel", + "futures-task", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -3417,6 +3673,15 @@ dependencies = [ "windows 0.58.0", ] +[[package]] +name = "generic-array" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +dependencies = [ + "typenum", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -3481,7 +3746,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ - "opaque-debug", + "opaque-debug 0.3.1", "polyval", ] @@ -3851,7 +4116,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array", + "generic-array 0.14.7", "hmac 0.8.1", ] @@ -4469,7 +4734,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -6186,6 +6451,12 @@ version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + [[package]] name = "opaque-debug" version = "0.3.1" @@ -6199,7 +6470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ "arrayvec", - "auto_impl", + "auto_impl 1.2.1", "bytes", "ethereum-types 0.14.1", "open-fastrlp-derive", @@ -6293,6 +6564,15 @@ dependencies = [ "types", ] +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -6626,7 +6906,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", - "opaque-debug", + "opaque-debug 0.3.1", "universal-hash", ] @@ -6638,7 +6918,7 @@ checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", - "opaque-debug", + "opaque-debug 0.3.1", "universal-hash", ] @@ -6736,6 +7016,30 @@ dependencies = [ "toml_edit 0.22.24", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + [[package]] name = "proc-macro2" version = "1.0.94" @@ -6992,6 +7296,12 @@ dependencies = [ "rusqlite", ] +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + [[package]] name = "radium" version = "0.6.2" @@ -7296,6 +7606,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rlp" version = "0.5.2" @@ -7642,6 +7961,15 @@ dependencies = [ "cipher 0.3.0", ] +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "same-file" version = "1.0.6" @@ -7713,10 +8041,22 @@ checksum = "879588d8f90906e73302547e20fffefdd240eb3e0e744e142321f5d49dea0518" dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", - "salsa20", + "salsa20 0.8.1", "sha2 0.9.9", ] +[[package]] +name = "scrypt" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" +dependencies = [ + "hmac 0.12.1", + "pbkdf2 0.11.0", + "salsa20 0.10.2", + "sha2 0.10.8", +] + [[package]] name = "sct" version = "0.7.1" @@ -7735,7 +8075,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct 0.1.1", "der 0.6.1", - "generic-array", + "generic-array 0.14.7", "pkcs8 0.9.0", "subtle", "zeroize", @@ -7749,7 +8089,7 @@ checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", "der 0.7.9", - "generic-array", + "generic-array 0.14.7", "pkcs8 0.10.2", "subtle", "zeroize", @@ -7828,6 +8168,27 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-aux" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5290c39c5f6992b9dddbda28541d965dba46468294e6018a408fa297e6c602de" +dependencies = [ + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_array_query" version = "0.1.0" @@ -7908,6 +8269,18 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + [[package]] name = "sha2" version = "0.9.9" @@ -7918,7 +8291,7 @@ dependencies = [ "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug", + "opaque-debug 0.3.1", ] [[package]] @@ -7941,7 +8314,7 @@ dependencies = [ "block-buffer 0.9.0", "digest 0.9.0", "keccak", - "opaque-debug", + "opaque-debug 0.3.1", ] [[package]] @@ -9170,6 +9543,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + [[package]] name = "unicode-xid" version = "0.2.6" diff --git a/Cargo.toml b/Cargo.toml index de5d6b541e..31f50068dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -141,6 +141,8 @@ ethereum_ssz = "0.8.2" ethereum_ssz_derive = "0.8.2" ethers-core = "1" ethers-providers = { version = "1", default-features = false } +ethers-signers = { version = "1", default-features = false } +ethers-middleware = { version = "1", default-features = false } exit-future = "0.2" fnv = "1" fs2 = "0.4" diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 28ff944799..55c42eb9d3 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -7,7 +7,9 @@ edition = { workspace = true } async-channel = { workspace = true } deposit_contract = { workspace = true } ethers-core = { workspace = true } +ethers-middleware = { workspace = true } ethers-providers = { workspace = true } +ethers-signers = { workspace = true } execution_layer = { workspace = true } fork_choice = { workspace = true } futures = { workspace = true } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index ea143ed433..8c39fda4e3 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,10 +7,7 @@ use std::{env, fs}; use tempfile::TempDir; use unused_port::unused_tcp4_port; -// This is not currently used due to the following breaking changes in geth that requires updating our tests: -// 1. removal of `personal` namespace in v1.14.12: See #30704 -// 2. removal of `totalDifficulty` field from RPC in v1.14.11. See #30386. -// const GETH_BRANCH: &str = "master"; +const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -30,14 +27,12 @@ pub fn build(execution_clients_dir: &Path) { } // Get the latest tag on the branch - // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); - // Using an older release due to breaking changes in recent releases. See comment on `GETH_BRANCH` const. - let release_tag = "v1.14.10"; - build_utils::checkout(&repo_dir, dbg!(release_tag)).unwrap(); + let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth build_utils::check_command_output(build_result(&repo_dir), || { - format!("geth make failed using release {release_tag}") + format!("geth make failed using release {last_release}") }); } @@ -102,7 +97,7 @@ impl GenericExecutionEngine for GethEngine { .arg(datadir.path().to_str().unwrap()) .arg("--http") .arg("--http.api") - .arg("engine,eth,personal") + .arg("engine,eth") .arg("--http.port") .arg(http_port.to_string()) .arg("--authrpc.port") diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index efb06833f6..d453c415d4 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -32,12 +32,12 @@ fn main() { fn test_geth() { let test_dir = build_utils::prepare_dir(); geth::build(&test_dir); - TestRig::new(GethEngine).perform_tests_blocking(); + TestRig::new(GethEngine, true).perform_tests_blocking(); geth::clean(&test_dir); } fn test_nethermind() { let test_dir = build_utils::prepare_dir(); nethermind::build(&test_dir); - TestRig::new(NethermindEngine).perform_tests_blocking(); + TestRig::new(NethermindEngine, false).perform_tests_blocking(); } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index cf31c184fe..b0d115960c 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -2,7 +2,9 @@ use crate::execution_engine::{ ExecutionEngine, GenericExecutionEngine, ACCOUNT1, ACCOUNT2, KEYSTORE_PASSWORD, PRIVATE_KEYS, }; use crate::transactions::transactions; +use ethers_middleware::SignerMiddleware; use ethers_providers::Middleware; +use ethers_signers::LocalWallet; use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, @@ -44,6 +46,7 @@ pub struct TestRig { ee_b: ExecutionPair, spec: ChainSpec, _runtime_shutdown: async_channel::Sender<()>, + use_local_signing: bool, } /// Import a private key into the execution engine and unlock it so that we can @@ -104,7 +107,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: } impl TestRig { - pub fn new(generic_engine: Engine) -> Self { + pub fn new(generic_engine: Engine, use_local_signing: bool) -> Self { let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -166,6 +169,7 @@ impl TestRig { ee_b, spec, _runtime_shutdown: runtime_shutdown, + use_local_signing, } } @@ -197,15 +201,9 @@ impl TestRig { pub async fn perform_tests(&self) { self.wait_until_synced().await; - // Import and unlock all private keys to sign transactions - let _ = futures::future::join_all([&self.ee_a, &self.ee_b].iter().map(|ee| { - import_and_unlock( - ee.execution_engine.http_url(), - &PRIVATE_KEYS, - KEYSTORE_PASSWORD, - ) - })) - .await; + // Create a local signer in case we need to sign transactions locally + let wallet1: LocalWallet = PRIVATE_KEYS[0].parse().expect("Invalid private key"); + let signer = SignerMiddleware::new(&self.ee_a.execution_engine.provider, wallet1); // We hardcode the accounts here since some EEs start with a default unlocked account let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); @@ -236,15 +234,38 @@ impl TestRig { // Submit transactions before getting payload let txs = transactions::(account1, account2); let mut pending_txs = Vec::new(); - for tx in txs.clone().into_iter() { - let pending_tx = self - .ee_a - .execution_engine - .provider - .send_transaction(tx, None) - .await - .unwrap(); - pending_txs.push(pending_tx); + + if self.use_local_signing { + // Sign locally with the Signer middleware + for (i, tx) in txs.clone().into_iter().enumerate() { + // The local signer uses eth_sendRawTransaction, so we need to manually set the nonce + let mut tx = tx.clone(); + tx.set_nonce(i as u64); + let pending_tx = signer.send_transaction(tx, None).await.unwrap(); + pending_txs.push(pending_tx); + } + } else { + // Sign on the EE + // Import and unlock all private keys to sign transactions on the EE + let _ = futures::future::join_all([&self.ee_a, &self.ee_b].iter().map(|ee| { + import_and_unlock( + ee.execution_engine.http_url(), + &PRIVATE_KEYS, + KEYSTORE_PASSWORD, + ) + })) + .await; + + for tx in txs.clone().into_iter() { + let pending_tx = self + .ee_a + .execution_engine + .provider + .send_transaction(tx, None) + .await + .unwrap(); + pending_txs.push(pending_tx); + } } /* From 476f3a593c20bbc93d87f691f561a36c00afdfa2 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 15 Apr 2025 08:20:46 +0800 Subject: [PATCH 35/35] Add `MAX_BLOBS_PER_BLOCK_FULU` config (#7161) Add `MAX_BLOBS_PER_BLOCK_FULU` config. --- .../src/network_beacon_processor/tests.rs | 134 ++++++++++++++---- .../chiado/config.yaml | 3 +- .../gnosis/config.yaml | 3 +- .../holesky/config.yaml | 3 +- .../mainnet/config.yaml | 3 +- .../sepolia/config.yaml | 3 +- consensus/types/src/chain_spec.rs | 29 +++- 7 files changed, 145 insertions(+), 33 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 5000941b0d..292e894870 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -9,11 +9,14 @@ use crate::{ sync::{manager::BlockProcessType, SyncMessage}, }; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; use beacon_chain::test_utils::{ - test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + get_kzg, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, + EphemeralHarnessType, }; use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; +use itertools::Itertools; use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::{ @@ -29,9 +32,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, - SubnetId, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, DataColumnSidecarList, + DataColumnSubnetId, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, }; type E = MainnetEthSpec; @@ -52,6 +55,7 @@ struct TestRig { chain: Arc>, next_block: Arc>, next_blobs: Option>, + next_data_columns: Option>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -241,7 +245,7 @@ impl TestRig { let network_beacon_processor = Arc::new(network_beacon_processor); let beacon_processor = BeaconProcessor { - network_globals, + network_globals: network_globals.clone(), executor, current_workers: 0, config: beacon_processor_config, @@ -262,15 +266,36 @@ impl TestRig { assert!(beacon_processor.is_ok()); let block = next_block_tuple.0; - let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { - Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap()) + let (blob_sidecars, data_columns) = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + let kzg = get_kzg(&chain.spec); + let custody_columns: DataColumnSidecarList = blobs_to_data_column_sidecars( + &blobs.iter().collect_vec(), + kzg_proofs.clone().into_iter().collect_vec(), + &block, + &kzg, + &chain.spec, + ) + .unwrap() + .into_iter() + .filter(|c| network_globals.sampling_columns.contains(&c.index)) + .collect::>(); + + (None, Some(custody_columns)) + } else { + let blob_sidecars = + BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap(); + (Some(blob_sidecars), None) + } } else { - None + (None, None) }; + Self { chain, next_block: block, next_blobs: blob_sidecars, + next_data_columns: data_columns, attestations, next_block_attestations, next_block_aggregate_attestations, @@ -323,6 +348,22 @@ impl TestRig { } } + pub fn enqueue_gossip_data_columns(&self, col_index: usize) { + if let Some(data_columns) = self.next_data_columns.as_ref() { + let data_column = data_columns.get(col_index).unwrap(); + self.network_beacon_processor + .send_gossip_data_column_sidecar( + junk_message_id(), + junk_peer_id(), + Client::default(), + DataColumnSubnetId::from_column_index(data_column.index, &self.chain.spec), + data_column.clone(), + Duration::from_secs(0), + ) + .unwrap(); + } + } + pub fn custody_columns_count(&self) -> usize { self.network_beacon_processor .network_globals @@ -375,6 +416,19 @@ impl TestRig { } } + pub fn enqueue_single_lookup_rpc_data_columns(&self) { + if let Some(data_columns) = self.next_data_columns.clone() { + self.network_beacon_processor + .send_rpc_custody_columns( + self.next_block.canonical_root(), + data_columns, + Duration::default(), + BlockProcessType::SingleCustodyColumn(1), + ) + .unwrap(); + } + } + pub fn enqueue_blobs_by_range_request(&self, count: u64) { self.network_beacon_processor .send_blobs_by_range_request( @@ -632,6 +686,13 @@ async fn import_gossip_block_acceptably_early() { .await; } + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) + .await; + } + // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for // processing. @@ -708,19 +769,20 @@ async fn import_gossip_block_at_current_slot() { rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; - let num_blobs = rig - .next_blobs - .as_ref() - .map(|blobs| blobs.len()) - .unwrap_or(0); - + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - rig.assert_event_journal_completes(&[WorkType::GossipBlobSidecar]) .await; } + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) + .await; + } + assert_eq!( rig.head_root(), rig.next_block.canonical_root(), @@ -773,11 +835,8 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - let num_blobs = rig - .next_blobs - .as_ref() - .map(|blobs| blobs.len()) - .unwrap_or(0); + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); let mut events = vec![]; match import_method { BlockImportMethod::Gossip => { @@ -787,6 +846,10 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod rig.enqueue_gossip_blob(i); events.push(WorkType::GossipBlobSidecar); } + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + events.push(WorkType::GossipDataColumnSidecar); + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); @@ -795,6 +858,10 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod rig.enqueue_single_lookup_rpc_blobs(); events.push(WorkType::RpcBlobs); } + if num_data_columns > 0 { + rig.enqueue_single_lookup_rpc_data_columns(); + events.push(WorkType::RpcCustodyColumn); + } } }; @@ -854,11 +921,8 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - let num_blobs = rig - .next_blobs - .as_ref() - .map(|blobs| blobs.len()) - .unwrap_or(0); + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); let mut events = vec![]; match import_method { BlockImportMethod::Gossip => { @@ -868,6 +932,10 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod rig.enqueue_gossip_blob(i); events.push(WorkType::GossipBlobSidecar); } + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + events.push(WorkType::GossipDataColumnSidecar) + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); @@ -876,6 +944,10 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod rig.enqueue_single_lookup_rpc_blobs(); events.push(WorkType::RpcBlobs); } + if num_data_columns > 0 { + rig.enqueue_single_lookup_rpc_data_columns(); + events.push(WorkType::RpcCustodyColumn); + } } }; @@ -1060,12 +1132,20 @@ async fn test_rpc_block_reprocessing() { rig.assert_event_journal_completes(&[WorkType::RpcBlock]) .await; - rig.enqueue_single_lookup_rpc_blobs(); - if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 { + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + if num_blobs > 0 { + rig.enqueue_single_lookup_rpc_blobs(); rig.assert_event_journal_completes(&[WorkType::RpcBlobs]) .await; } + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); + if num_data_columns > 0 { + rig.enqueue_single_lookup_rpc_data_columns(); + rig.assert_event_journal_completes(&[WorkType::RpcCustodyColumn]) + .await; + } + // next_block shouldn't be processed since it couldn't get the // duplicate cache handle assert_ne!(next_block_root, rig.head_root()); diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index dbfe2707d7..4d4ccdf717 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -148,9 +148,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 2 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 4413c21c4b..eece34b89c 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -132,9 +132,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 2 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 58010991bf..19a3f79cc0 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -137,9 +137,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 9 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 375441e504..886e5d12ed 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -152,9 +152,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 9 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index e9e8a3ab14..10be107263 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -138,9 +138,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 9 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 27c324aa2a..2b29ef1f10 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -240,6 +240,11 @@ pub struct ChainSpec { blob_sidecar_subnet_count_electra: u64, max_request_blob_sidecars_electra: u64, + /* + * Networking Fulu + */ + max_blobs_per_block_fulu: u64, + /* * Networking Derived * @@ -655,7 +660,9 @@ impl ChainSpec { /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { - if fork_name.electra_enabled() { + if fork_name.fulu_enabled() { + self.max_blobs_per_block_fulu + } else if fork_name.electra_enabled() { self.max_blobs_per_block_electra } else { self.max_blobs_per_block @@ -992,6 +999,11 @@ impl ChainSpec { blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + /* + * Networking Fulu specific + */ + max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), + /* * Application specific */ @@ -1321,6 +1333,11 @@ impl ChainSpec { blob_sidecar_subnet_count_electra: 2, max_request_blob_sidecars_electra: 256, + /* + * Networking Fulu specific + */ + max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), + /* * Application specific */ @@ -1540,6 +1557,9 @@ pub struct Config { #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] custody_requirement: u64, + #[serde(default = "default_max_blobs_per_block_fulu")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block_fulu: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1677,6 +1697,10 @@ const fn default_max_blobs_per_block_electra() -> u64 { 9 } +const fn default_max_blobs_per_block_fulu() -> u64 { + 12 +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1904,6 +1928,7 @@ impl Config { data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, samples_per_slot: spec.samples_per_slot, custody_requirement: spec.custody_requirement, + max_blobs_per_block_fulu: spec.max_blobs_per_block_fulu, } } @@ -1982,6 +2007,7 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, + max_blobs_per_block_fulu, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -2064,6 +2090,7 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, + max_blobs_per_block_fulu, ..chain_spec.clone() })