From c33307d70287fd3b7a70785f89dadcb737214903 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 21 Jan 2025 12:23:21 -0800 Subject: [PATCH 01/52] Refactor mock builder (#6735) * Update builder api for electra * Refactor mock builder to separate functionality * Return a higher payload value for builder by default * Add additional methods * Cleanup * Add a flag for always returning a max bid * Add logs for debugging * Take builder secret key as an argument * Merge branch 'unstable' into refactor-mock-builder * Change return type for submit_blinded_blocks * Merge branch 'unstable' into refactor-mock-builder * Respect gas_limit from validator registration * Revert "Respect gas_limit from validator registration" This reverts commit 1f7b4a327e95d0c7aea3e28dfd3666c093033d89. * Merge branch 'unstable' into refactor-mock-builder * Remove unnecessary derive --- beacon_node/execution_layer/src/lib.rs | 5 +- .../src/test_utils/mock_builder.rs | 944 +++++++++++------- consensus/types/src/builder_bid.rs | 6 +- 3 files changed, 611 insertions(+), 344 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f7abe73543..d5fef4c5aa 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -121,8 +121,7 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { payload: ExecutionPayloadHeader::Fulu(builder_bid.header).into(), @@ -330,7 +329,7 @@ impl> BlockProposalContents { pub parent_hash: ExecutionBlockHash, pub parent_gas_limit: u64, diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 65181dcf4f..3540909fe4 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,10 +1,15 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; -use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; +use eth2::types::PublishBlockRequest; +use eth2::types::{ + BlobsBundle, BlockId, BroadcastValidation, EventKind, EventTopic, FullPayloadContents, + ProposerData, StateId, ValidatorId, +}; use eth2::{BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER}; use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; +use slog::{debug, error, info, warn, Logger}; use std::collections::HashMap; use std::fmt::Debug; use std::future::Future; @@ -13,20 +18,26 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::NamedTempFile; +use tokio_stream::StreamExt; use tree_hash::TreeHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, BuilderBidFulu, SignedBuilderBid, }; use types::{ - Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ExecutionRequests, FixedBytesExtended, ForkName, - ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, - SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, + Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionedResponse, Hash256, + PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, + SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; use warp::{Filter, Rejection}; +pub const DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +pub const DEFAULT_BUILDER_PRIVATE_KEY: &str = + "607a11b45a7219cc61a3d9c5fd08c7eebd602a6a19a977f8d3771d5711a550f2"; + #[derive(Clone)] pub enum Operation { FeeRecipient(Address), @@ -259,6 +270,17 @@ impl BidStuff for BuilderBid { } } +// Non referenced version of `PayloadParameters` +#[derive(Clone)] +pub struct PayloadParametersCloned { + pub parent_hash: ExecutionBlockHash, + pub parent_gas_limit: u64, + pub proposer_gas_limit: Option, + pub payload_attributes: PayloadAttributes, + pub forkchoice_update_params: ForkchoiceUpdateParameters, + pub current_fork: ForkName, +} + #[derive(Clone)] pub struct MockBuilder { el: ExecutionLayer, @@ -268,6 +290,20 @@ pub struct MockBuilder { builder_sk: SecretKey, operations: Arc>>, invalidate_signatures: Arc>, + genesis_time: Option, + /// Only returns bids for registered validators if set to true. `true` by default. + validate_pubkey: bool, + /// Do not apply any operations if set to `false`. + /// Applying operations might modify the cached header in the execution layer. + /// Use this if you want get_header to return a valid bid that can be eventually submitted as + /// a valid block. + apply_operations: bool, + payload_id_cache: Arc>>, + /// If set to `true`, sets the bid returned by `get_header` to Uint256::MAX + max_bid: bool, + /// A cache that stores the proposers index for a given epoch + proposers_cache: Arc>>>, + log: Logger, } impl MockBuilder { @@ -295,7 +331,12 @@ impl MockBuilder { let builder = MockBuilder::new( el, BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), + true, + true, + false, spec, + None, + executor.log().clone(), ); let host: Ipv4Addr = Ipv4Addr::LOCALHOST; let port = 0; @@ -303,21 +344,47 @@ impl MockBuilder { (builder, server) } + #[allow(clippy::too_many_arguments)] pub fn new( el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, + validate_pubkey: bool, + apply_operations: bool, + max_bid: bool, spec: Arc, + sk: Option<&[u8]>, + log: Logger, ) -> Self { - let sk = SecretKey::random(); + let builder_sk = if let Some(sk_bytes) = sk { + match SecretKey::deserialize(sk_bytes) { + Ok(sk) => sk, + Err(_) => { + error!( + log, + "Invalid sk_bytes provided, generating random secret key" + ); + SecretKey::random() + } + } + } else { + SecretKey::deserialize(&hex::decode(DEFAULT_BUILDER_PRIVATE_KEY).unwrap()).unwrap() + }; Self { el, beacon_client, // Should keep spec and context consistent somehow spec, val_registration_cache: Arc::new(RwLock::new(HashMap::new())), - builder_sk: sk, + builder_sk, + validate_pubkey, operations: Arc::new(RwLock::new(vec![])), invalidate_signatures: Arc::new(RwLock::new(false)), + payload_id_cache: Arc::new(RwLock::new(HashMap::new())), + proposers_cache: Arc::new(RwLock::new(HashMap::new())), + apply_operations, + max_bid, + genesis_time: None, + log, } } @@ -342,8 +409,523 @@ impl MockBuilder { } bid.stamp_payload(); } + + /// Return the public key of the builder + pub fn public_key(&self) -> PublicKeyBytes { + self.builder_sk.public_key().compress() + } + + pub async fn register_validators( + &self, + registrations: Vec, + ) -> Result<(), String> { + info!( + self.log, + "Registering validators"; + "count" => registrations.len(), + ); + for registration in registrations { + if !registration.verify_signature(&self.spec) { + error!( + self.log, + "Failed to register validator"; + "error" => "invalid signature", + "validator" => %registration.message.pubkey + ); + return Err("invalid signature".to_string()); + } + self.val_registration_cache + .write() + .insert(registration.message.pubkey, registration); + } + Ok(()) + } + + pub async fn submit_blinded_block( + &self, + block: SignedBlindedBeaconBlock, + ) -> Result, String> { + let root = match &block { + SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { + return Err("invalid fork".to_string()); + } + SignedBlindedBeaconBlock::Bellatrix(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Deneb(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Electra(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Fulu(block) => { + block.message.body.execution_payload.tree_hash_root() + } + }; + info!( + self.log, + "Submitting blinded beacon block to builder"; + "block_hash" => %root + ); + let payload = self + .el + .get_payload_by_root(&root) + .ok_or_else(|| "missing payload for tx root".to_string())?; + + let (payload, blobs) = payload.deconstruct(); + let full_block = block + .try_into_full_block(Some(payload.clone())) + .ok_or("Internal error, just provided a payload")?; + debug!( + self.log, + "Got full payload, sending to local beacon node for propagation"; + "txs_count" => payload.transactions().len(), + "blob_count" => blobs.as_ref().map(|b| b.commitments.len()) + ); + let publish_block_request = PublishBlockRequest::new( + Arc::new(full_block), + blobs.clone().map(|b| (b.proofs, b.blobs)), + ); + self.beacon_client + .post_beacon_blocks_v2(&publish_block_request, Some(BroadcastValidation::Gossip)) + .await + .map_err(|e| format!("Failed to post blinded block {:?}", e))?; + Ok(FullPayloadContents::new(payload, blobs)) + } + + pub async fn get_header( + &self, + slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: PublicKeyBytes, + ) -> Result, String> { + info!(self.log, "In get_header"); + // Check if the pubkey has registered with the builder if required + if self.validate_pubkey && !self.val_registration_cache.read().contains_key(&pubkey) { + return Err("validator not registered with builder".to_string()); + } + let payload_parameters = { + let mut guard = self.payload_id_cache.write(); + guard.remove(&parent_hash) + }; + + let payload_parameters = match payload_parameters { + Some(params) => params, + None => { + warn!( + self.log, + "Payload params not cached for parent_hash {}", parent_hash + ); + self.get_payload_params(slot, None, pubkey, None).await? + } + }; + + info!(self.log, "Got payload params"); + + let fork = self.fork_name_at_slot(slot); + let payload_response_type = self + .el + .get_full_payload_caching(PayloadParameters { + parent_hash: payload_parameters.parent_hash, + parent_gas_limit: payload_parameters.parent_gas_limit, + proposer_gas_limit: payload_parameters.proposer_gas_limit, + payload_attributes: &payload_parameters.payload_attributes, + forkchoice_update_params: &payload_parameters.forkchoice_update_params, + current_fork: payload_parameters.current_fork, + }) + .await + .map_err(|e| format!("couldn't get payload {:?}", e))?; + + info!(self.log, "Got payload message, fork {}", fork); + + let mut message = match payload_response_type { + crate::GetPayloadResponseType::Full(payload_response) => { + #[allow(clippy::type_complexity)] + let (payload, value, maybe_blobs_bundle, maybe_requests): ( + ExecutionPayload, + Uint256, + Option>, + Option>, + ) = payload_response.into(); + + match fork { + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { + header: payload + .as_fulu() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + execution_requests: maybe_requests.unwrap_or_default(), + }), + ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { + header: payload + .as_electra() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + execution_requests: maybe_requests.unwrap_or_default(), + }), + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { + header: payload + .as_deneb() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + }), + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: payload + .as_capella() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + }), + ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { + header: payload + .as_bellatrix() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + }), + ForkName::Base | ForkName::Altair => return Err("invalid fork".to_string()), + } + } + _ => panic!("just requested full payload, cannot get blinded"), + }; + + if self.apply_operations { + info!(self.log, "Applying operations"); + self.apply_operations(&mut message); + } + info!(self.log, "Signing builder message"); + + let mut signature = message.sign_builder_message(&self.builder_sk, &self.spec); + + if *self.invalidate_signatures.read() { + signature = Signature::empty(); + }; + let signed_bid = SignedBuilderBid { message, signature }; + info!(self.log, "Builder bid {:?}", &signed_bid.message.value()); + Ok(signed_bid) + } + + fn fork_name_at_slot(&self, slot: Slot) -> ForkName { + self.spec.fork_name_at_slot::(slot) + } + + fn get_bid_value(&self, value: Uint256) -> Uint256 { + if self.max_bid { + Uint256::MAX + } else if !self.apply_operations { + value + } else { + Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI) + } + } + + /// Prepare the execution layer for payload creation every slot for the correct + /// proposer index + pub async fn prepare_execution_layer(&self) -> Result<(), String> { + info!( + self.log, + "Starting a task to prepare the execution layer"; + ); + let mut head_event_stream = self + .beacon_client + .get_events::(&[EventTopic::Head]) + .await + .map_err(|e| format!("Failed to get head event {:?}", e))?; + + while let Some(Ok(event)) = head_event_stream.next().await { + match event { + EventKind::Head(head) => { + debug!( + self.log, + "Got a new head event"; + "block_hash" => %head.block + ); + let next_slot = head.slot + 1; + // Find the next proposer index from the cached data or through a beacon api call + let epoch = next_slot.epoch(E::slots_per_epoch()); + let position_in_slot = next_slot.as_u64() % E::slots_per_epoch(); + let proposer_data = { + let proposers_opt = { + let proposers_cache = self.proposers_cache.read(); + proposers_cache.get(&epoch).cloned() + }; + match proposers_opt { + Some(proposers) => proposers + .get(position_in_slot as usize) + .expect("position in slot is max epoch size") + .clone(), + None => { + // make a call to the beacon api and populate the cache + let duties: Vec<_> = self + .beacon_client + .get_validator_duties_proposer(epoch) + .await + .map_err(|e| { + format!( + "Failed to get proposer duties for epoch: {}, {:?}", + epoch, e + ) + })? + .data; + let proposer_data = duties + .get(position_in_slot as usize) + .expect("position in slot is max epoch size") + .clone(); + self.proposers_cache.write().insert(epoch, duties); + proposer_data + } + } + }; + self.prepare_execution_layer_internal( + head.slot, + head.block, + proposer_data.validator_index, + proposer_data.pubkey, + ) + .await?; + } + e => { + warn!( + self.log, + "Got an unexpected event"; + "event" => %e.topic_name() + ); + } + } + } + Ok(()) + } + + async fn prepare_execution_layer_internal( + &self, + current_slot: Slot, + head_block_root: Hash256, + validator_index: u64, + pubkey: PublicKeyBytes, + ) -> Result<(), String> { + let next_slot = current_slot + 1; + let payload_parameters = self + .get_payload_params( + next_slot, + Some(head_block_root), + pubkey, + Some(validator_index), + ) + .await?; + + self.payload_id_cache + .write() + .insert(payload_parameters.parent_hash, payload_parameters); + Ok(()) + } + + /// Get the `PayloadParameters` for requesting an ExecutionPayload for `slot` + /// for the given `validator_index` and `pubkey`. + async fn get_payload_params( + &self, + slot: Slot, + head_block_root: Option, + pubkey: PublicKeyBytes, + validator_index: Option, + ) -> Result { + let fork = self.fork_name_at_slot(slot); + + let block_id = match head_block_root { + Some(block_root) => BlockId::Root(block_root), + None => BlockId::Head, + }; + let head = self + .beacon_client + .get_beacon_blocks::(block_id) + .await + .map_err(|_| "couldn't get head".to_string())? + .ok_or_else(|| "missing head block".to_string())? + .data; + + let head_block_root = head_block_root.unwrap_or(head.canonical_root()); + + let head_execution_payload = head + .message() + .body() + .execution_payload() + .map_err(|_| "pre-merge block".to_string())?; + let head_execution_hash = head_execution_payload.block_hash(); + let head_gas_limit = head_execution_payload.gas_limit(); + + let finalized_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(|_| "couldn't get finalized block".to_string())? + .ok_or_else(|| "missing finalized block".to_string())? + .data + .message() + .body() + .execution_payload() + .map_err(|_| "pre-merge block".to_string())? + .block_hash(); + + let justified_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(|_| "couldn't get justified block".to_string())? + .ok_or_else(|| "missing justified block".to_string())? + .data + .message() + .body() + .execution_payload() + .map_err(|_| "pre-merge block".to_string())? + .block_hash(); + + let (fee_recipient, proposer_gas_limit) = + match self.val_registration_cache.read().get(&pubkey) { + Some(cached_data) => ( + cached_data.message.fee_recipient, + cached_data.message.gas_limit, + ), + None => { + warn!( + self.log, + "Validator not registered {}, using default fee recipient and gas limits", + pubkey + ); + (DEFAULT_FEE_RECIPIENT, DEFAULT_GAS_LIMIT) + } + }; + let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + + let genesis_time = if let Some(genesis_time) = self.genesis_time { + genesis_time + } else { + self.beacon_client + .get_beacon_genesis() + .await + .map_err(|_| "couldn't get beacon genesis".to_string())? + .data + .genesis_time + }; + let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + + let head_state: BeaconState = self + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(|_| "couldn't get state".to_string())? + .ok_or_else(|| "missing state".to_string())? + .data; + + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(|_| "couldn't get prev randao".to_string())?; + + let expected_withdrawals = if fork.capella_enabled() { + Some( + self.beacon_client + .get_expected_withdrawals(&StateId::Head) + .await + .map_err(|e| format!("Failed to get expected withdrawals: {:?}", e))? + .data, + ) + } else { + None + }; + + let payload_attributes = match fork { + // the withdrawals root is filled in by operations, but we supply the valid withdrawals + // first to avoid polluting the execution block generator with invalid payload attributes + // NOTE: this was part of an effort to add payload attribute uniqueness checks, + // which was abandoned because it broke too many tests in subtle ways. + ForkName::Bellatrix | ForkName::Capella => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + None, + ), + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + ), + ForkName::Base | ForkName::Altair => { + return Err("invalid fork".to_string()); + } + }; + + // Tells the execution layer that the `validator_index` is expected to propose + // a block on top of `head_block_root` for the given slot + let val_index = validator_index.unwrap_or( + self.beacon_client + .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) + .await + .map_err(|_| "couldn't get validator".to_string())? + .ok_or_else(|| "missing validator".to_string())? + .data + .index, + ); + + self.el + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) + .await; + + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_hash: Some(head_execution_hash), + finalized_hash: Some(finalized_execution_hash), + justified_hash: Some(justified_execution_hash), + head_root: head_block_root, + }; + + let _status = self + .el + .notify_forkchoice_updated( + head_execution_hash, + justified_execution_hash, + finalized_execution_hash, + slot - 1, + head_block_root, + ) + .await + .map_err(|e| format!("fcu call failed : {:?}", e))?; + + let payload_parameters = PayloadParametersCloned { + parent_hash: head_execution_hash, + parent_gas_limit: head_gas_limit, + proposer_gas_limit: Some(proposer_gas_limit), + payload_attributes, + forkchoice_update_params, + current_fork: fork, + }; + Ok(payload_parameters) + } } +/// Serve the builder api using warp. Uses the functions defined in `MockBuilder` to serve +/// the requests. +/// +/// We should eventually move this to axum when we move everything else. pub fn serve( listen_addr: Ipv4Addr, listen_port: u16, @@ -362,19 +944,16 @@ pub fn serve( .and(warp::path::end()) .and(ctx_filter.clone()) .and_then( - |registrations: Vec, builder: MockBuilder| async move { - for registration in registrations { - if !registration.verify_signature(&builder.spec) { - return Err(reject("invalid signature")); - } - builder - .val_registration_cache - .write() - .insert(registration.message.pubkey, registration); - } - Ok(warp::reply()) + |registrations: Vec, + builder: MockBuilder| async move { + builder + .register_validators(registrations) + .await + .map_err(|e| warp::reject::custom(Custom(e)))?; + Ok::<_, Rejection>(warp::reply()) }, - ); + ) + .boxed(); let blinded_block = prefix @@ -387,30 +966,10 @@ pub fn serve( |block: SignedBlindedBeaconBlock, fork_name: ForkName, builder: MockBuilder| async move { - let root = match block { - SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { - return Err(reject("invalid fork")); - } - SignedBlindedBeaconBlock::Bellatrix(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Capella(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Deneb(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Electra(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Fulu(block) => { - block.message.body.execution_payload.tree_hash_root() - } - }; let payload = builder - .el - .get_payload_by_root(&root) - .ok_or_else(|| reject("missing payload for tx root"))?; + .submit_blinded_block(block) + .await + .map_err(|e| warp::reject::custom(Custom(e)))?; let resp: ForkVersionedResponse<_> = ForkVersionedResponse { version: Some(fork_name), metadata: Default::default(), @@ -453,305 +1012,12 @@ pub fn serve( parent_hash: ExecutionBlockHash, pubkey: PublicKeyBytes, builder: MockBuilder| async move { - let fork = builder.spec.fork_name_at_slot::(slot); - let signed_cached_data = builder - .val_registration_cache - .read() - .get(&pubkey) - .ok_or_else(|| reject("missing registration"))? - .clone(); - let cached_data = signed_cached_data.message; - - let head = builder - .beacon_client - .get_beacon_blocks::(BlockId::Head) + let fork_name = builder.fork_name_at_slot(slot); + let signed_bid = builder + .get_header(slot, parent_hash, pubkey) .await - .map_err(|_| reject("couldn't get head"))? - .ok_or_else(|| reject("missing head block"))?; + .map_err(|e| warp::reject::custom(Custom(e)))?; - let block = head.data.message(); - let head_block_root = block.tree_hash_root(); - let head_execution_payload = block - .body() - .execution_payload() - .map_err(|_| reject("pre-merge block"))?; - let head_execution_hash = head_execution_payload.block_hash(); - let head_gas_limit = head_execution_payload.gas_limit(); - if head_execution_hash != parent_hash { - return Err(reject("head mismatch")); - } - - let finalized_execution_hash = builder - .beacon_client - .get_beacon_blocks::(BlockId::Finalized) - .await - .map_err(|_| reject("couldn't get finalized block"))? - .ok_or_else(|| reject("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); - - let justified_execution_hash = builder - .beacon_client - .get_beacon_blocks::(BlockId::Justified) - .await - .map_err(|_| reject("couldn't get justified block"))? - .ok_or_else(|| reject("missing justified block"))? - .data - .message() - .body() - .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); - - let val_index = builder - .beacon_client - .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) - .await - .map_err(|_| reject("couldn't get validator"))? - .ok_or_else(|| reject("missing validator"))? - .data - .index; - let fee_recipient = cached_data.fee_recipient; - let slots_since_genesis = slot.as_u64() - builder.spec.genesis_slot.as_u64(); - - let genesis_data = builder - .beacon_client - .get_beacon_genesis() - .await - .map_err(|_| reject("couldn't get beacon genesis"))? - .data; - let genesis_time = genesis_data.genesis_time; - let timestamp = - (slots_since_genesis * builder.spec.seconds_per_slot) + genesis_time; - - let head_state: BeaconState = builder - .beacon_client - .get_debug_beacon_states(StateId::Head) - .await - .map_err(|_| reject("couldn't get state"))? - .ok_or_else(|| reject("missing state"))? - .data; - let prev_randao = head_state - .get_randao_mix(head_state.current_epoch()) - .map_err(|_| reject("couldn't get prev randao"))?; - - let expected_withdrawals = if fork.capella_enabled() { - Some( - builder - .beacon_client - .get_expected_withdrawals(&StateId::Head) - .await - .unwrap() - .data, - ) - } else { - None - }; - - let payload_attributes = match fork { - // the withdrawals root is filled in by operations, but we supply the valid withdrawals - // first to avoid polluting the execution block generator with invalid payload attributes - // NOTE: this was part of an effort to add payload attribute uniqueness checks, - // which was abandoned because it broke too many tests in subtle ways. - ForkName::Bellatrix | ForkName::Capella => PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - None, - ), - ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - Some(head_block_root), - ), - ForkName::Base | ForkName::Altair => { - return Err(reject("invalid fork")); - } - }; - - builder - .el - .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) - .await; - - let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: Hash256::zero(), - head_hash: None, - justified_hash: Some(justified_execution_hash), - finalized_hash: Some(finalized_execution_hash), - }; - - let proposer_gas_limit = builder - .val_registration_cache - .read() - .get(&pubkey) - .map(|v| v.message.gas_limit); - - let payload_parameters = PayloadParameters { - parent_hash: head_execution_hash, - parent_gas_limit: head_gas_limit, - proposer_gas_limit, - payload_attributes: &payload_attributes, - forkchoice_update_params: &forkchoice_update_params, - current_fork: fork, - }; - - let payload_response_type = builder - .el - .get_full_payload_caching(payload_parameters) - .await - .map_err(|_| reject("couldn't get payload"))?; - - let mut message = match payload_response_type { - crate::GetPayloadResponseType::Full(payload_response) => { - #[allow(clippy::type_complexity)] - let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( - ExecutionPayload, - Uint256, - Option>, - Option>, - ) = payload_response.into(); - - match fork { - ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { - header: payload - .as_fulu() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { - header: payload - .as_electra() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { - header: payload - .as_deneb() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: payload - .as_capella() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: payload - .as_bellatrix() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Base | ForkName::Altair => { - return Err(reject("invalid fork")) - } - } - } - crate::GetPayloadResponseType::Blinded(payload_response) => { - #[allow(clippy::type_complexity)] - let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( - ExecutionPayload, - Uint256, - Option>, - Option>, - ) = payload_response.into(); - match fork { - ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { - header: payload - .as_fulu() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { - header: payload - .as_electra() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { - header: payload - .as_deneb() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: payload - .as_capella() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: payload - .as_bellatrix() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Base | ForkName::Altair => { - return Err(reject("invalid fork")) - } - } - } - }; - - builder.apply_operations(&mut message); - - let mut signature = - message.sign_builder_message(&builder.builder_sk, &builder.spec); - - if *builder.invalidate_signatures.read() { - signature = Signature::empty(); - } - - let fork_name = builder - .spec - .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); - let signed_bid = SignedBuilderBid { message, signature }; let resp: ForkVersionedResponse<_> = ForkVersionedResponse { version: Some(fork_name), metadata: Default::default(), diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 2ce46ca704..ac53c41216 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -2,8 +2,8 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ ChainSpec, EthSpec, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, - SignedRoot, Uint256, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, + ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; @@ -36,6 +36,8 @@ pub struct BuilderBid { pub header: ExecutionPayloadHeaderFulu, #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, + #[superstruct(only(Electra, Fulu))] + pub execution_requests: ExecutionRequests, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, From 2b6ec96b4c0cacf9d1a95bdfcc1ff071d2e2f2a0 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 22 Jan 2025 15:05:29 +1100 Subject: [PATCH 02/52] Add MetaData V3 support to `node/identity` API (#6827) * Add metadata v3 support to `node/identity` api. --- beacon_node/http_api/src/lib.rs | 61 ++++++++++++++++++++--------- beacon_node/http_api/tests/tests.rs | 4 +- common/eth2/src/types.rs | 8 ++++ 3 files changed, 52 insertions(+), 21 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5dc9055c6c..29c27198c0 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -52,6 +52,7 @@ use eth2::types::{ }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; +use lighthouse_network::rpc::methods::MetaData; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; @@ -82,6 +83,7 @@ use tokio_stream::{ wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, StreamExt, }; +use types::ChainSpec; use types::{ fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, @@ -2898,36 +2900,24 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) + .and(chain_filter.clone()) .then( |task_spawner: TaskSpawner, - network_globals: Arc>| { + network_globals: Arc>, + chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let enr = network_globals.local_enr(); let p2p_addresses = enr.multiaddr_p2p_tcp(); let discovery_addresses = enr.multiaddr_p2p_udp(); - let meta_data = network_globals.local_metadata.read(); Ok(api_types::GenericResponse::from(api_types::IdentityData { peer_id: network_globals.local_peer_id().to_base58(), enr, p2p_addresses, discovery_addresses, - metadata: api_types::MetaData { - seq_number: *meta_data.seq_number(), - attnets: format!( - "0x{}", - hex::encode(meta_data.attnets().clone().into_bytes()), - ), - syncnets: format!( - "0x{}", - hex::encode( - meta_data - .syncnets() - .cloned() - .unwrap_or_default() - .into_bytes() - ) - ), - }, + metadata: from_meta_data::( + &network_globals.local_metadata, + &chain.spec, + ), })) }) }, @@ -4844,6 +4834,39 @@ pub fn serve( Ok(http_server) } +fn from_meta_data( + meta_data: &RwLock>, + spec: &ChainSpec, +) -> api_types::MetaData { + let meta_data = meta_data.read(); + let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); + + let seq_number = *meta_data.seq_number(); + let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); + let syncnets = format_hex( + &meta_data + .syncnets() + .cloned() + .unwrap_or_default() + .into_bytes(), + ); + + if spec.is_peer_das_scheduled() { + api_types::MetaData::V3(api_types::MetaDataV3 { + seq_number, + attnets, + syncnets, + custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), + }) + } else { + api_types::MetaData::V2(api_types::MetaDataV2 { + seq_number, + attnets, + syncnets, + }) + } +} + /// Publish a message to the libp2p pubsub network. fn publish_pubsub_message( network_tx: &UnboundedSender>, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index dd6a92603a..d9b3c8556c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2362,11 +2362,11 @@ impl ApiTester { enr: self.local_enr.clone(), p2p_addresses: self.local_enr.multiaddr_p2p_tcp(), discovery_addresses: self.local_enr.multiaddr_p2p_udp(), - metadata: eth2::types::MetaData { + metadata: MetaData::V2(MetaDataV2 { seq_number: 0, attnets: "0x0000000000000000".to_string(), syncnets: "0x00".to_string(), - }, + }), }; assert_eq!(result, expected); diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 6d76101cb6..c6e95e1ba3 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -584,12 +584,20 @@ pub struct IdentityData { pub metadata: MetaData, } +#[superstruct( + variants(V2, V3), + variant_attributes(derive(Clone, Debug, PartialEq, Serialize, Deserialize)) +)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] pub struct MetaData { #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, pub syncnets: String, + #[superstruct(only(V3))] + #[serde(with = "serde_utils::quoted_u64")] + pub custody_group_count: u64, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] From f008b84079bbb6eb86de22bb3421dfc8263a5650 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 22 Jan 2025 15:05:32 +1100 Subject: [PATCH 03/52] Avoid computing columns from EL blobs if block has already been imported (#6816) * Avoid computing columns from EL blobs if block has already been imported. * Downgrade a `warn` log to `debug` and update handling. --- beacon_node/beacon_chain/src/fetch_blobs.rs | 31 ++++++++++++++++----- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index 49e46a50fe..5bc2b92ec3 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -163,6 +163,20 @@ pub async fn fetch_and_process_engine_blobs( return Ok(None); } + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + // Avoid computing columns if block has already been imported. + debug!( + log, + "Ignoring EL blobs response"; + "info" => "block has already been imported", + ); + return Ok(None); + } + let data_columns_receiver = spawn_compute_and_publish_data_columns_task( &chain, block.clone(), @@ -248,18 +262,21 @@ fn spawn_compute_and_publish_data_columns_task( } }; - if let Err(e) = data_columns_sender.send(all_data_columns.clone()) { - error!(log, "Failed to send computed data columns"; "error" => ?e); + if data_columns_sender.send(all_data_columns.clone()).is_err() { + // Data column receiver have been dropped - block may have already been imported. + // This race condition exists because gossip columns may arrive and trigger block + // import during the computation. Here we just drop the computed columns. + debug!( + log, + "Failed to send computed data columns"; + ); + return; }; - // Check indices from cache before sending the columns, to make sure we don't - // publish components already seen on gossip. - let is_supernode = chain_cloned.data_availability_checker.is_supernode(); - // At the moment non supernodes are not required to publish any columns. // TODO(das): we could experiment with having full nodes publish their custodied // columns here. - if !is_supernode { + if !chain_cloned.data_availability_checker.is_supernode() { return; } From 54e37096b61c69991a9044be782bcd41ed5d9ad3 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 22 Jan 2025 23:29:56 +1100 Subject: [PATCH 04/52] Update discv5 (#6836) * Update discv5 dep * Handle yanked crates --- Cargo.lock | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 29ffdc49ba..ae7861f44f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2192,9 +2192,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "898d136ecb64116ec68aecf14d889bd30f8b1fe0c19e262953f7388dbe77052e" +checksum = "c4b4e7798d2ff74e29cee344dc490af947ae657d6ab5273dde35d58ce06a4d71" dependencies = [ "aes 0.8.4", "aes-gcm", @@ -4361,7 +4361,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.7.0", + "parity-scale-codec 3.6.12", ] [[package]] @@ -4793,7 +4793,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -6278,16 +6278,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.7.0", - "rustversion", + "parity-scale-codec-derive 3.6.12", "serde", ] @@ -6305,14 +6304,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 1.0.109", ] [[package]] @@ -7357,7 +7356,7 @@ dependencies = [ "fastrlp", "num-bigint", "num-traits", - "parity-scale-codec 3.7.0", + "parity-scale-codec 3.6.12", "primitive-types 0.12.2", "proptest", "rand", @@ -7633,7 +7632,7 @@ checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "cfg-if", "derive_more 1.0.0", - "parity-scale-codec 3.7.0", + "parity-scale-codec 3.6.12", "scale-info-derive", ] From 266b24112306355bdfa64cdc0d7b63f2e3b4572a Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 22 Jan 2025 16:34:22 -0800 Subject: [PATCH 05/52] Electra minor refactorings (#6839) N/A Fix some typos and other minor refactorings in the electra code. Thanks @jtraglia for bringing them up. Note to reviewiers: 47803496dedf1d0e6e4b11f527afff0119976ff0 is the commit that needs looking into in detail. The rest are very minor refactorings --- .../operation_pool/src/attestation_storage.rs | 2 +- .../src/per_block_processing.rs | 26 +++++------- .../src/per_epoch_processing/single_pass.rs | 10 ++--- .../state_processing/src/upgrade/electra.rs | 3 +- consensus/types/src/beacon_state.rs | 42 +++++-------------- consensus/types/src/validator.rs | 12 +++--- 6 files changed, 33 insertions(+), 62 deletions(-) diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 083c1170f0..49ef5c279c 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -214,7 +214,7 @@ impl CompactIndexedAttestationElectra { .is_zero() } - /// Returns `true` if aggregated, otherwise `false`. + /// Returns `true` if aggregated, otherwise `false`. pub fn aggregate_same_committee(&mut self, other: &Self) -> bool { if self.committee_bits != other.committee_bits { return false; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 502ad25838..ef4799c245 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -523,9 +523,9 @@ pub fn get_expected_withdrawals( // [New in Electra:EIP7251] // Consume pending partial withdrawals let processed_partial_withdrawals_count = - if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + if let Ok(pending_partial_withdrawals) = state.pending_partial_withdrawals() { let mut processed_partial_withdrawals_count = 0; - for withdrawal in partial_withdrawals { + for withdrawal in pending_partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize { @@ -552,7 +552,7 @@ pub fn get_expected_withdrawals( validator_index: withdrawal.validator_index, address: validator .get_execution_withdrawal_address(spec) - .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?, + .ok_or(BeaconStateError::NonExecutionAddressWithdrawalCredential)?, amount: withdrawable_balance, }); withdrawal_index.safe_add_assign(1)?; @@ -583,7 +583,7 @@ pub fn get_expected_withdrawals( validator_index as usize, ))? .safe_sub(partially_withdrawn_balance)?; - if validator.is_fully_withdrawable_at(balance, epoch, spec, fork_name) { + if validator.is_fully_withdrawable_validator(balance, epoch, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, @@ -600,9 +600,7 @@ pub fn get_expected_withdrawals( address: validator .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, - amount: balance.safe_sub( - validator.get_max_effective_balance(spec, state.fork_name_unchecked()), - )?, + amount: balance.safe_sub(validator.get_max_effective_balance(spec, fork_name))?, }); withdrawal_index.safe_add_assign(1)?; } @@ -624,7 +622,7 @@ pub fn process_withdrawals>( spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if state.fork_name_unchecked().capella_enabled() { - let (expected_withdrawals, partial_withdrawals_count) = + let (expected_withdrawals, processed_partial_withdrawals_count) = get_expected_withdrawals(state, spec)?; let expected_root = expected_withdrawals.tree_hash_root(); let withdrawals_root = payload.withdrawals_root()?; @@ -645,14 +643,10 @@ pub fn process_withdrawals>( } // Update pending partial withdrawals [New in Electra:EIP7251] - if let Some(partial_withdrawals_count) = partial_withdrawals_count { - // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 - let new_partial_withdrawals = state - .pending_partial_withdrawals()? - .iter_from(partial_withdrawals_count)? - .cloned() - .collect::>(); - *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; + if let Some(processed_partial_withdrawals_count) = processed_partial_withdrawals_count { + state + .pending_partial_withdrawals_mut()? + .pop_front(processed_partial_withdrawals_count)?; } // Update the next withdrawal index if this block contained withdrawals diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index a4a81c8eef..5c31669a60 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -1075,13 +1075,9 @@ fn process_pending_consolidations( next_pending_consolidation.safe_add_assign(1)?; } - let new_pending_consolidations = List::try_from_iter( - state - .pending_consolidations()? - .iter_from(next_pending_consolidation)? - .cloned(), - )?; - *state.pending_consolidations_mut()? = new_pending_consolidations; + state + .pending_consolidations_mut()? + .pop_front(next_pending_consolidation)?; // the spec tests require we don't perform effective balance updates when testing pending_consolidations if !perform_effective_balance_updates { diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 0f32e1553d..258b28a45b 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -47,10 +47,11 @@ pub fn upgrade_to_electra( .enumerate() .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch, *index)) + .map(|(index, _)| index) .collect::>(); // Process validators to queue entire balance and reset them - for (index, _) in pre_activation { + for index in pre_activation { let balance = post .balances_mut() .get_mut(index) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 6f44998cdf..157271b227 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -161,7 +161,7 @@ pub enum Error { InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), PartialWithdrawalCountInvalid(usize), - NonExecutionAddresWithdrawalCredential, + NonExecutionAddressWithdrawalCredential, NoCommitteeFound(CommitteeIndex), InvalidCommitteeIndex(CommitteeIndex), InvalidSelectionProof { @@ -2214,7 +2214,7 @@ impl BeaconState { // ******* Electra accessors ******* - /// Return the churn limit for the current epoch. + /// Return the churn limit for the current epoch. pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { let total_active_balance = self.get_total_active_balance()?; let churn = std::cmp::max( @@ -2329,21 +2329,12 @@ impl BeaconState { | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), - BeaconState::Electra(_) => { - let state = self.as_electra_mut()?; - + BeaconState::Electra(_) | BeaconState::Fulu(_) => { // Consume the balance and update state variables - state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; - state.earliest_exit_epoch = earliest_exit_epoch; - Ok(state.earliest_exit_epoch) - } - BeaconState::Fulu(_) => { - let state = self.as_fulu_mut()?; - - // Consume the balance and update state variables - state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; - state.earliest_exit_epoch = earliest_exit_epoch; - Ok(state.earliest_exit_epoch) + *self.exit_balance_to_consume_mut()? = + exit_balance_to_consume.safe_sub(exit_balance)?; + *self.earliest_exit_epoch_mut()? = earliest_exit_epoch; + self.earliest_exit_epoch() } } } @@ -2385,23 +2376,12 @@ impl BeaconState { | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), - BeaconState::Electra(_) => { - let state = self.as_electra_mut()?; - + BeaconState::Electra(_) | BeaconState::Fulu(_) => { // Consume the balance and update state variables. - state.consolidation_balance_to_consume = + *self.consolidation_balance_to_consume_mut()? = consolidation_balance_to_consume.safe_sub(consolidation_balance)?; - state.earliest_consolidation_epoch = earliest_consolidation_epoch; - Ok(state.earliest_consolidation_epoch) - } - BeaconState::Fulu(_) => { - let state = self.as_fulu_mut()?; - - // Consume the balance and update state variables. - state.consolidation_balance_to_consume = - consolidation_balance_to_consume.safe_sub(consolidation_balance)?; - state.earliest_consolidation_epoch = earliest_consolidation_epoch; - Ok(state.earliest_consolidation_epoch) + *self.earliest_consolidation_epoch_mut()? = earliest_consolidation_epoch; + self.earliest_consolidation_epoch() } } } diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 222b9292a2..5aed90d2c1 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -56,7 +56,7 @@ impl Validator { }; let max_effective_balance = validator.get_max_effective_balance(spec, fork_name); - // safe math is unnecessary here since the spec.effecive_balance_increment is never <= 0 + // safe math is unnecessary here since the spec.effective_balance_increment is never <= 0 validator.effective_balance = std::cmp::min( amount - (amount % spec.effective_balance_increment), max_effective_balance, @@ -195,7 +195,7 @@ impl Validator { /// Returns `true` if the validator is fully withdrawable at some epoch. /// /// Calls the correct function depending on the provided `fork_name`. - pub fn is_fully_withdrawable_at( + pub fn is_fully_withdrawable_validator( &self, balance: u64, epoch: Epoch, @@ -203,14 +203,14 @@ impl Validator { current_fork: ForkName, ) -> bool { if current_fork.electra_enabled() { - self.is_fully_withdrawable_at_electra(balance, epoch, spec) + self.is_fully_withdrawable_validator_electra(balance, epoch, spec) } else { - self.is_fully_withdrawable_at_capella(balance, epoch, spec) + self.is_fully_withdrawable_validator_capella(balance, epoch, spec) } } /// Returns `true` if the validator is fully withdrawable at some epoch. - fn is_fully_withdrawable_at_capella( + fn is_fully_withdrawable_validator_capella( &self, balance: u64, epoch: Epoch, @@ -222,7 +222,7 @@ impl Validator { /// Returns `true` if the validator is fully withdrawable at some epoch. /// /// Modified in electra as part of EIP 7251. - fn is_fully_withdrawable_at_electra( + fn is_fully_withdrawable_validator_electra( &self, balance: u64, epoch: Epoch, From a1b7d616b47604ec0cd1afb5543e03e68b629f96 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 23 Jan 2025 09:12:16 +0700 Subject: [PATCH 06/52] Modularize beacon node backend (#4718) #4669 Modularize the beacon node backend to make it easier to add new database implementations --- Cargo.lock | 2 + Makefile | 2 +- .../overflow_lru_cache.rs | 11 +- .../beacon_chain/src/historical_blocks.rs | 11 +- .../src/schema_change/migration_schema_v21.rs | 8 +- .../src/schema_change/migration_schema_v22.rs | 17 +- beacon_node/beacon_chain/src/test_utils.rs | 15 +- .../beacon_chain/tests/op_verification.rs | 5 +- beacon_node/beacon_chain/tests/store_tests.rs | 17 +- beacon_node/client/src/builder.rs | 5 +- beacon_node/http_api/tests/tests.rs | 4 +- beacon_node/src/cli.rs | 9 + beacon_node/src/config.rs | 4 + beacon_node/src/lib.rs | 13 +- beacon_node/store/Cargo.toml | 8 +- beacon_node/store/src/chunked_vector.rs | 9 +- beacon_node/store/src/config.rs | 25 +- beacon_node/store/src/database.rs | 5 + beacon_node/store/src/database/interface.rs | 220 ++++++++++ .../store/src/database/leveldb_impl.rs | 304 +++++++++++++ beacon_node/store/src/database/redb_impl.rs | 314 ++++++++++++++ beacon_node/store/src/errors.rs | 74 +++- beacon_node/store/src/forwards_iter.rs | 1 - beacon_node/store/src/garbage_collection.rs | 28 +- beacon_node/store/src/hot_cold_store.rs | 407 ++++++++++-------- beacon_node/store/src/impls/beacon_state.rs | 9 +- beacon_node/store/src/leveldb_store.rs | 310 ------------- beacon_node/store/src/lib.rs | 79 ++-- beacon_node/store/src/memory_store.rs | 83 ++-- beacon_node/store/src/metrics.rs | 33 ++ beacon_node/store/src/partial_beacon_state.rs | 13 +- book/src/help_bn.md | 3 + book/src/installation-source.md | 5 +- database_manager/src/cli.rs | 9 + database_manager/src/lib.rs | 40 +- lighthouse/Cargo.toml | 8 +- lighthouse/tests/beacon_node.rs | 17 +- wordlist.txt | 2 + 38 files changed, 1479 insertions(+), 650 deletions(-) create mode 100644 beacon_node/store/src/database.rs create mode 100644 beacon_node/store/src/database/interface.rs create mode 100644 beacon_node/store/src/database/leveldb_impl.rs create mode 100644 beacon_node/store/src/database/redb_impl.rs delete mode 100644 beacon_node/store/src/leveldb_store.rs diff --git a/Cargo.lock b/Cargo.lock index ae7861f44f..899435a66b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5301,6 +5301,7 @@ dependencies = [ "slasher", "slashing_protection", "slog", + "store", "task_executor", "tempfile", "types", @@ -8429,6 +8430,7 @@ dependencies = [ "metrics", "parking_lot 0.12.3", "rand", + "redb", "safe_arith", "serde", "slog", diff --git a/Makefile b/Makefile index 4d95f50c5c..e8b44cb780 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly # List of features to use when cross-compiling. Can be overridden via the environment. -CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc,beacon-node-leveldb,beacon-node-redb # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index c8e92f7e9f..cd793c8394 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -317,7 +317,6 @@ impl PendingComponents { None, ) }; - let executed_block = recover(diet_executed_block)?; let AvailabilityPendingExecutedBlock { @@ -732,7 +731,7 @@ mod test { use slog::{info, Logger}; use state_processing::ConsensusContext; use std::collections::VecDeque; - use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; + use store::{database::interface::BeaconNodeBackend, HotColdDB, ItemStore, StoreConfig}; use tempfile::{tempdir, TempDir}; use types::non_zero_usize::new_non_zero_usize; use types::{ExecPayload, MinimalEthSpec}; @@ -744,7 +743,7 @@ mod test { db_path: &TempDir, spec: Arc, log: Logger, - ) -> Arc, LevelDB>> { + ) -> Arc, BeaconNodeBackend>> { let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let blobs_path = db_path.path().join("blobs_db"); @@ -920,7 +919,11 @@ mod test { ) where E: EthSpec, - T: BeaconChainTypes, ColdStore = LevelDB, EthSpec = E>, + T: BeaconChainTypes< + HotStore = BeaconNodeBackend, + ColdStore = BeaconNodeBackend, + EthSpec = E, + >, { let log = test_logger(); let chain_db_path = tempdir().expect("should get temp dir"); diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index ddae54f464..e22ec95a79 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -10,10 +10,7 @@ use std::borrow::Cow; use std::iter; use std::time::Duration; use store::metadata::DataColumnInfo; -use store::{ - get_key_for_col, AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, - KeyValueStoreOp, -}; +use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use strum::IntoStaticStr; use types::{FixedBytesExtended, Hash256, Slot}; @@ -153,7 +150,8 @@ impl BeaconChain { // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_u64()..prev_block_slot.as_u64()).rev() { cold_batch.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); } @@ -169,7 +167,8 @@ impl BeaconChain { let genesis_slot = self.spec.genesis_slot; for slot in genesis_slot.as_u64()..prev_block_slot.as_u64() { cold_batch.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), self.genesis_block_root.as_slice().to_vec(), )); } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs index fcc8b9884a..f02f5ee6f3 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs @@ -3,9 +3,7 @@ use crate::validator_pubkey_cache::DatabasePubkey; use slog::{info, Logger}; use ssz::{Decode, Encode}; use std::sync::Arc; -use store::{ - get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, -}; +use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; use types::{Hash256, PublicKey}; const LOG_EVERY: usize = 200_000; @@ -62,9 +60,9 @@ pub fn downgrade_from_v21( message: format!("{e:?}"), })?; - let db_key = get_key_for_col(DBColumn::PubkeyCache.into(), key.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue( - db_key, + DBColumn::PubkeyCache, + key.as_slice().to_vec(), pubkey_bytes.as_ssz_bytes(), )); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs index c34512eded..982c3ded46 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use store::chunked_iter::ChunkedVectorIter; use store::{ chunked_vector::BlockRootsChunked, - get_key_for_col, metadata::{ SchemaVersion, ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN, }, @@ -21,7 +20,7 @@ fn load_old_schema_frozen_state( ) -> Result>, Error> { let Some(partial_state_bytes) = db .cold_db - .get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? + .get_bytes(DBColumn::BeaconState, state_root.as_slice())? else { return Ok(None); }; @@ -136,10 +135,7 @@ pub fn delete_old_schema_freezer_data( for column in columns { for res in db.cold_db.iter_column_keys::>(column) { let key = res?; - cold_ops.push(KeyValueStoreOp::DeleteKey(get_key_for_col( - column.as_str(), - &key, - ))); + cold_ops.push(KeyValueStoreOp::DeleteKey(column, key)); } } let delete_ops = cold_ops.len(); @@ -175,7 +171,8 @@ pub fn write_new_schema_block_roots( // Store the genesis block root if it would otherwise not be stored. if oldest_block_slot != 0 { cold_ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &0u64.to_be_bytes()), + DBColumn::BeaconBlockRoots, + 0u64.to_be_bytes().to_vec(), genesis_block_root.as_slice().to_vec(), )); } @@ -192,10 +189,8 @@ pub fn write_new_schema_block_roots( // OK to hold these in memory (10M slots * 43 bytes per KV ~= 430 MB). for (i, (slot, block_root)) in block_root_iter.enumerate() { cold_ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col( - DBColumn::BeaconBlockRoots.into(), - &(slot as u64).to_be_bytes(), - ), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 443cc686eb..ba0a2159da 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -56,7 +56,8 @@ use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; use std::time::Duration; -use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; +use store::database::interface::BeaconNodeBackend; +use store::{config::StoreConfig, HotColdDB, ItemStore, MemoryStore}; use task_executor::TaskExecutor; use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; @@ -116,7 +117,7 @@ pub fn get_kzg(spec: &ChainSpec) -> Arc { pub type BaseHarnessType = Witness, E, THotStore, TColdStore>; -pub type DiskHarnessType = BaseHarnessType, LevelDB>; +pub type DiskHarnessType = BaseHarnessType, BeaconNodeBackend>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; pub type BoxedMutator = Box< @@ -299,7 +300,10 @@ impl Builder> { impl Builder> { /// Disk store, start from genesis. - pub fn fresh_disk_store(mut self, store: Arc, LevelDB>>) -> Self { + pub fn fresh_disk_store( + mut self, + store: Arc, BeaconNodeBackend>>, + ) -> Self { let validator_keypairs = self .validator_keypairs .clone() @@ -324,7 +328,10 @@ impl Builder> { } /// Disk store, resume. - pub fn resumed_disk_store(mut self, store: Arc, LevelDB>>) -> Self { + pub fn resumed_disk_store( + mut self, + store: Arc, BeaconNodeBackend>>, + ) -> Self { let mutator = move |builder: BeaconChainBuilder<_>| { builder .resume_from_db() diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index df0d561e1c..44fb298d6c 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -14,7 +14,8 @@ use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; use std::sync::{Arc, LazyLock}; -use store::{LevelDB, StoreConfig}; +use store::database::interface::BeaconNodeBackend; +use store::StoreConfig; use tempfile::{tempdir, TempDir}; use types::*; @@ -26,7 +27,7 @@ static KEYPAIRS: LazyLock> = type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; -type HotColdDB = store::HotColdDB, LevelDB>; +type HotColdDB = store::HotColdDB, BeaconNodeBackend>; fn get_store(db_path: &TempDir) -> Arc { let spec = Arc::new(test_spec::()); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 60d46e8269..d1a38b1cde 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -25,10 +25,11 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::{Arc, LazyLock}; use std::time::Duration; +use store::database::interface::BeaconNodeBackend; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, - BlobInfo, DBColumn, HotColdDB, LevelDB, StoreConfig, + BlobInfo, DBColumn, HotColdDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; use tokio::time::sleep; @@ -46,7 +47,7 @@ static KEYPAIRS: LazyLock> = type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; -fn get_store(db_path: &TempDir) -> Arc, LevelDB>> { +fn get_store(db_path: &TempDir) -> Arc, BeaconNodeBackend>> { get_store_generic(db_path, StoreConfig::default(), test_spec::()) } @@ -54,7 +55,7 @@ fn get_store_generic( db_path: &TempDir, config: StoreConfig, spec: ChainSpec, -) -> Arc, LevelDB>> { +) -> Arc, BeaconNodeBackend>> { let hot_path = db_path.path().join("chain_db"); let cold_path = db_path.path().join("freezer_db"); let blobs_path = db_path.path().join("blobs_db"); @@ -73,7 +74,7 @@ fn get_store_generic( } fn get_harness( - store: Arc, LevelDB>>, + store: Arc, BeaconNodeBackend>>, validator_count: usize, ) -> TestHarness { // Most tests expect to retain historic states, so we use this as the default. @@ -85,7 +86,7 @@ fn get_harness( } fn get_harness_generic( - store: Arc, LevelDB>>, + store: Arc, BeaconNodeBackend>>, validator_count: usize, chain_config: ChainConfig, ) -> TestHarness { @@ -244,7 +245,6 @@ async fn full_participation_no_skips() { AttestationStrategy::AllValidators, ) .await; - check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store); check_chain_dump(&harness, num_blocks_produced + 1); @@ -3508,7 +3508,10 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { } /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. -fn check_split_slot(harness: &TestHarness, store: Arc, LevelDB>>) { +fn check_split_slot( + harness: &TestHarness, + store: Arc, BeaconNodeBackend>>, +) { let split_slot = store.get_split_slot(); assert_eq!( harness diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 1cd9e89b96..e3bfd60a48 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -14,7 +14,7 @@ use beacon_chain::{ eth1_chain::{CachingEth1Backend, Eth1Chain}, slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, - store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, + store::{HotColdDB, ItemStore, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; use beacon_chain::{Kzg, LightClientProducerEvent}; @@ -41,6 +41,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; +use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; use tokio::sync::oneshot; use types::{ @@ -1030,7 +1031,7 @@ where } impl - ClientBuilder, LevelDB>> + ClientBuilder, BeaconNodeBackend>> where TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d9b3c8556c..99b7696610 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1933,7 +1933,7 @@ impl ApiTester { .sync_committee_period(&self.chain.spec) .unwrap(); - let result = match self + match self .client .get_beacon_light_client_updates::(current_sync_committee_period, 1) .await @@ -1954,7 +1954,6 @@ impl ApiTester { .unwrap(); assert_eq!(1, expected.len()); - assert_eq!(result.clone().unwrap().len(), expected.len()); self } @@ -1979,7 +1978,6 @@ impl ApiTester { .get_light_client_bootstrap(&self.chain.store, &block_root, 1u64, &self.chain.spec); assert!(expected.is_ok()); - assert_eq!(result.unwrap().data, expected.unwrap().unwrap().0); self diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index cecfcee868..1339c15825 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1591,5 +1591,14 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("beacon-node-backend") + .long("beacon-node-backend") + .value_name("DATABASE") + .value_parser(store::config::DatabaseBackend::VARIANTS.to_vec()) + .help("Set the database backend to be used by the beacon node.") + .action(ArgAction::Set) + .display_order(0) + ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 8d8a44a6fd..6d3c18d363 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -432,6 +432,10 @@ pub fn get_config( warn!(log, "The slots-per-restore-point flag is deprecated"); } + if let Some(backend) = clap_utils::parse_optional(cli_args, "beacon-node-backend")? { + client_config.store.backend = backend; + } + if let Some(hierarchy_config) = clap_utils::parse_optional(cli_args, "hierarchy-exponents")? { client_config.store.hierarchy_config = hierarchy_config; } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 0c4cbf0f57..e3802c837c 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -2,7 +2,6 @@ mod cli; mod config; pub use beacon_chain; -use beacon_chain::store::LevelDB; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, }; @@ -16,11 +15,19 @@ use slasher::{DatabaseBackendOverride, Slasher}; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; +use store::database::interface::BeaconNodeBackend; use types::{ChainSpec, Epoch, EthSpec, ForkName}; /// A type-alias to the tighten the definition of a production-intended `Client`. -pub type ProductionClient = - Client, E, LevelDB, LevelDB>>; +pub type ProductionClient = Client< + Witness< + SystemTimeSlotClock, + CachingEth1Backend, + E, + BeaconNodeBackend, + BeaconNodeBackend, + >, +>; /// The beacon node `Client` that will be used in production. /// diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 21d0cf8dec..d2f3a5c562 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -4,6 +4,11 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } +[features] +default = ["leveldb"] +leveldb = ["dep:leveldb"] +redb = ["dep:redb"] + [dev-dependencies] beacon_chain = { workspace = true } criterion = { workspace = true } @@ -17,11 +22,12 @@ directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } itertools = { workspace = true } -leveldb = { version = "0.8" } +leveldb = { version = "0.8.6", optional = true } logging = { workspace = true } lru = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } +redb = { version = "2.1.3", optional = true } safe_arith = { workspace = true } serde = { workspace = true } slog = { workspace = true } diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 83b8da2a18..90e8c17310 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -680,7 +680,7 @@ where key: &[u8], ) -> Result, Error> { store - .get_bytes(column.into(), key)? + .get_bytes(column, key)? .map(|bytes| Self::decode(&bytes)) .transpose() } @@ -691,8 +691,11 @@ where key: &[u8], ops: &mut Vec, ) -> Result<(), Error> { - let db_key = get_key_for_col(column.into(), key); - ops.push(KeyValueStoreOp::PutKeyValue(db_key, self.encode()?)); + ops.push(KeyValueStoreOp::PutKeyValue( + column, + key.to_vec(), + self.encode()?, + )); Ok(()) } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 4f67530570..64765fd66a 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,16 +1,23 @@ use crate::hdiff::HierarchyConfig; +use crate::superstruct; use crate::{AnchorInfo, DBColumn, Error, Split, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::io::Write; use std::num::NonZeroUsize; -use superstruct::superstruct; +use strum::{Display, EnumString, EnumVariantNames}; use types::non_zero_usize::new_non_zero_usize; use types::EthSpec; use zstd::Encoder; -// Only used in tests. Mainnet sets a higher default on the CLI. +#[cfg(all(feature = "redb", not(feature = "leveldb")))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Redb; +#[cfg(feature = "leveldb")] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::LevelDb; + +pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; +pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 8; pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(64); pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); @@ -40,6 +47,8 @@ pub struct StoreConfig { pub compact_on_prune: bool, /// Whether to prune payloads on initialization and finalization. pub prune_payloads: bool, + /// Database backend to use. + pub backend: DatabaseBackend, /// State diff hierarchy. pub hierarchy_config: HierarchyConfig, /// Whether to prune blobs older than the blob data availability boundary. @@ -104,6 +113,7 @@ impl Default for StoreConfig { compact_on_init: false, compact_on_prune: true, prune_payloads: true, + backend: DEFAULT_BACKEND, hierarchy_config: HierarchyConfig::default(), prune_blobs: true, epochs_per_blob_prune: DEFAULT_EPOCHS_PER_BLOB_PRUNE, @@ -340,3 +350,14 @@ mod test { assert_eq!(config_out, config); } } + +#[derive( + Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, +)] +#[strum(serialize_all = "lowercase")] +pub enum DatabaseBackend { + #[cfg(feature = "leveldb")] + LevelDb, + #[cfg(feature = "redb")] + Redb, +} diff --git a/beacon_node/store/src/database.rs b/beacon_node/store/src/database.rs new file mode 100644 index 0000000000..2232f73c5c --- /dev/null +++ b/beacon_node/store/src/database.rs @@ -0,0 +1,5 @@ +pub mod interface; +#[cfg(feature = "leveldb")] +pub mod leveldb_impl; +#[cfg(feature = "redb")] +pub mod redb_impl; diff --git a/beacon_node/store/src/database/interface.rs b/beacon_node/store/src/database/interface.rs new file mode 100644 index 0000000000..b213433241 --- /dev/null +++ b/beacon_node/store/src/database/interface.rs @@ -0,0 +1,220 @@ +#[cfg(feature = "leveldb")] +use crate::database::leveldb_impl; +#[cfg(feature = "redb")] +use crate::database::redb_impl; +use crate::{config::DatabaseBackend, KeyValueStoreOp, StoreConfig}; +use crate::{metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, ItemStore, Key, KeyValueStore}; +use std::collections::HashSet; +use std::path::Path; +use types::EthSpec; + +pub enum BeaconNodeBackend { + #[cfg(feature = "leveldb")] + LevelDb(leveldb_impl::LevelDB), + #[cfg(feature = "redb")] + Redb(redb_impl::Redb), +} + +impl ItemStore for BeaconNodeBackend {} + +impl KeyValueStore for BeaconNodeBackend { + fn get_bytes(&self, column: DBColumn, key: &[u8]) -> Result>, Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::get_bytes(txn, column, key), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::get_bytes(txn, column, key), + } + } + + fn put_bytes(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options(), + ), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options(), + ), + } + } + + fn put_bytes_sync(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options_sync(), + ), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options_sync(), + ), + } + } + + fn sync(&self) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::sync(txn), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::sync(txn), + } + } + + fn key_exists(&self, column: DBColumn, key: &[u8]) -> Result { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::key_exists(txn, column, key), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::key_exists(txn, column, key), + } + } + + fn key_delete(&self, column: DBColumn, key: &[u8]) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::key_delete(txn, column, key), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::key_delete(txn, column, key), + } + } + + fn do_atomically(&self, batch: Vec) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::do_atomically(txn, batch), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::do_atomically(txn, batch), + } + } + + fn begin_rw_transaction(&self) -> parking_lot::MutexGuard<()> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::begin_rw_transaction(txn), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::begin_rw_transaction(txn), + } + } + + fn compact(&self) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::compact(txn), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::compact(txn), + } + } + + fn iter_column_keys_from(&self, _column: DBColumn, from: &[u8]) -> ColumnKeyIter { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => { + leveldb_impl::LevelDB::iter_column_keys_from(txn, _column, from) + } + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => { + redb_impl::Redb::iter_column_keys_from(txn, _column, from) + } + } + } + + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::iter_column_keys(txn, column), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::iter_column_keys(txn, column), + } + } + + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => { + leveldb_impl::LevelDB::iter_column_from(txn, column, from) + } + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::iter_column_from(txn, column, from), + } + } + + fn compact_column(&self, _column: DBColumn) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::compact_column(txn, _column), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::compact(txn), + } + } + + fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::delete_batch(txn, col, ops), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::delete_batch(txn, col, ops), + } + } + + fn delete_if( + &self, + column: DBColumn, + f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::delete_if(txn, column, f), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::delete_if(txn, column, f), + } + } +} + +impl BeaconNodeBackend { + pub fn open(config: &StoreConfig, path: &Path) -> Result { + metrics::inc_counter_vec(&metrics::DISK_DB_TYPE, &[&config.backend.to_string()]); + match config.backend { + #[cfg(feature = "leveldb")] + DatabaseBackend::LevelDb => { + leveldb_impl::LevelDB::open(path).map(BeaconNodeBackend::LevelDb) + } + #[cfg(feature = "redb")] + DatabaseBackend::Redb => redb_impl::Redb::open(path).map(BeaconNodeBackend::Redb), + } + } +} + +pub struct WriteOptions { + /// fsync before acknowledging a write operation. + pub sync: bool, +} + +impl WriteOptions { + pub fn new() -> Self { + WriteOptions { sync: false } + } +} + +impl Default for WriteOptions { + fn default() -> Self { + Self::new() + } +} diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs new file mode 100644 index 0000000000..3d8bbe1473 --- /dev/null +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -0,0 +1,304 @@ +use crate::hot_cold_store::{BytesKey, HotColdDBError}; +use crate::Key; +use crate::{ + get_key_for_col, metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, +}; +use leveldb::{ + compaction::Compaction, + database::{ + batch::{Batch, Writebatch}, + kv::KV, + Database, + }, + iterator::{Iterable, LevelDBIterator}, + options::{Options, ReadOptions}, +}; +use parking_lot::{Mutex, MutexGuard}; +use std::collections::HashSet; +use std::marker::PhantomData; +use std::path::Path; +use types::{EthSpec, FixedBytesExtended, Hash256}; + +use super::interface::WriteOptions; + +pub struct LevelDB { + db: Database, + /// A mutex to synchronise sensitive read-write transactions. + transaction_mutex: Mutex<()>, + _phantom: PhantomData, +} + +impl From for leveldb::options::WriteOptions { + fn from(options: WriteOptions) -> Self { + let mut opts = leveldb::options::WriteOptions::new(); + opts.sync = options.sync; + opts + } +} + +impl LevelDB { + pub fn open(path: &Path) -> Result { + let mut options = Options::new(); + + options.create_if_missing = true; + + let db = Database::open(path, options)?; + let transaction_mutex = Mutex::new(()); + + Ok(Self { + db, + transaction_mutex, + _phantom: PhantomData, + }) + } + + pub fn read_options(&self) -> ReadOptions { + ReadOptions::new() + } + + pub fn write_options(&self) -> WriteOptions { + WriteOptions::new() + } + + pub fn write_options_sync(&self) -> WriteOptions { + let mut opts = WriteOptions::new(); + opts.sync = true; + opts + } + + pub fn put_bytes_with_options( + &self, + col: DBColumn, + key: &[u8], + val: &[u8], + opts: WriteOptions, + ) -> Result<(), Error> { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[col.into()], + val.len() as u64, + ); + let timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + + self.db + .put(opts.into(), BytesKey::from_vec(column_key), val) + .map_err(Into::into) + .map(|()| { + metrics::stop_timer(timer); + }) + } + + /// Store some `value` in `column`, indexed with `key`. + pub fn put_bytes(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options()) + } + + pub fn put_bytes_sync(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options_sync()) + } + + pub fn sync(&self) -> Result<(), Error> { + self.put_bytes_sync(DBColumn::Dummy, b"sync", b"sync") + } + + // Retrieve some bytes in `column` with `key`. + pub fn get_bytes(&self, col: DBColumn, key: &[u8]) -> Result>, Error> { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[col.into()]); + let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); + + self.db + .get(self.read_options(), BytesKey::from_vec(column_key)) + .map_err(Into::into) + .map(|opt| { + opt.inspect(|bytes| { + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[col.into()], + bytes.len() as u64, + ); + metrics::stop_timer(timer); + }) + }) + } + + /// Return `true` if `key` exists in `column`. + pub fn key_exists(&self, col: DBColumn, key: &[u8]) -> Result { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_EXISTS_COUNT, &[col.into()]); + + self.db + .get(self.read_options(), BytesKey::from_vec(column_key)) + .map_err(Into::into) + .map(|val| val.is_some()) + } + + /// Removes `key` from `column`. + pub fn key_delete(&self, col: DBColumn, key: &[u8]) -> Result<(), Error> { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col.into()]); + + self.db + .delete(self.write_options().into(), BytesKey::from_vec(column_key)) + .map_err(Into::into) + } + + pub fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { + let mut leveldb_batch = Writebatch::new(); + for op in ops_batch { + match op { + KeyValueStoreOp::PutKeyValue(col, key, value) => { + let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[col.into()], + value.len() as u64, + ); + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col.into()]); + let column_key = get_key_for_col(col, &key); + leveldb_batch.put(BytesKey::from_vec(column_key), &value); + } + + KeyValueStoreOp::DeleteKey(col, key) => { + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col.into()]); + let column_key = get_key_for_col(col, &key); + leveldb_batch.delete(BytesKey::from_vec(column_key)); + } + } + } + self.db.write(self.write_options().into(), &leveldb_batch)?; + Ok(()) + } + + pub fn begin_rw_transaction(&self) -> MutexGuard<()> { + self.transaction_mutex.lock() + } + + /// Compact all values in the states and states flag columns. + pub fn compact(&self) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::DISK_DB_COMPACT_TIMES); + let endpoints = |column: DBColumn| { + ( + BytesKey::from_vec(get_key_for_col(column, Hash256::zero().as_slice())), + BytesKey::from_vec(get_key_for_col( + column, + Hash256::repeat_byte(0xff).as_slice(), + )), + ) + }; + + for (start_key, end_key) in [ + endpoints(DBColumn::BeaconStateTemporary), + endpoints(DBColumn::BeaconState), + endpoints(DBColumn::BeaconStateSummary), + ] { + self.db.compact(&start_key, &end_key); + } + + Ok(()) + } + + pub fn compact_column(&self, column: DBColumn) -> Result<(), Error> { + // Use key-size-agnostic keys [] and 0xff..ff with a minimum of 32 bytes to account for + // columns that may change size between sub-databases or schema versions. + let start_key = BytesKey::from_vec(get_key_for_col(column, &[])); + let end_key = BytesKey::from_vec(get_key_for_col( + column, + &vec![0xff; std::cmp::max(column.key_size(), 32)], + )); + self.db.compact(&start_key, &end_key); + Ok(()) + } + + pub fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); + let iter = self.db.iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |(key, _)| key.matches_column(column)) + .map(move |(bytes_key, value)| { + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[column.into()], + value.len() as u64, + ); + let key = bytes_key.remove_column_variable(column).ok_or_else(|| { + HotColdDBError::IterationError { + unexpected_key: bytes_key.clone(), + } + })?; + Ok((K::from_bytes(key)?, value)) + }), + ) + } + + pub fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); + + let iter = self.db.keys_iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |key| key.matches_column(column)) + .map(move |bytes_key| { + metrics::inc_counter_vec(&metrics::DISK_DB_KEY_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_KEY_READ_BYTES, + &[column.into()], + bytes_key.key.len() as u64, + ); + let key = &bytes_key.key[column.as_bytes().len()..]; + K::from_bytes(key) + }), + ) + } + + /// Iterate through all keys and values in a particular column. + pub fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + self.iter_column_keys_from(column, &vec![0; column.key_size()]) + } + + pub fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()]) + } + + pub fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error> { + let mut leveldb_batch = Writebatch::new(); + for op in ops { + let column_key = get_key_for_col(col, op); + leveldb_batch.delete(BytesKey::from_vec(column_key)); + } + self.db.write(self.write_options().into(), &leveldb_batch)?; + Ok(()) + } + + pub fn delete_if( + &self, + column: DBColumn, + mut f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + let mut leveldb_batch = Writebatch::new(); + let iter = self.db.iter(self.read_options()); + + iter.take_while(move |(key, _)| key.matches_column(column)) + .for_each(|(key, value)| { + if f(&value).unwrap_or(false) { + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[column.into()]); + leveldb_batch.delete(key); + } + }); + + self.db.write(self.write_options().into(), &leveldb_batch)?; + Ok(()) + } +} diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs new file mode 100644 index 0000000000..6a776da7b1 --- /dev/null +++ b/beacon_node/store/src/database/redb_impl.rs @@ -0,0 +1,314 @@ +use crate::{metrics, ColumnIter, ColumnKeyIter, Key}; +use crate::{DBColumn, Error, KeyValueStoreOp}; +use parking_lot::{Mutex, MutexGuard, RwLock}; +use redb::TableDefinition; +use std::collections::HashSet; +use std::{borrow::BorrowMut, marker::PhantomData, path::Path}; +use strum::IntoEnumIterator; +use types::EthSpec; + +use super::interface::WriteOptions; + +pub const DB_FILE_NAME: &str = "database.redb"; + +pub struct Redb { + db: RwLock, + transaction_mutex: Mutex<()>, + _phantom: PhantomData, +} + +impl From for redb::Durability { + fn from(options: WriteOptions) -> Self { + if options.sync { + redb::Durability::Immediate + } else { + redb::Durability::Eventual + } + } +} + +impl Redb { + pub fn open(path: &Path) -> Result { + let db_file = path.join(DB_FILE_NAME); + let db = redb::Database::create(db_file)?; + let transaction_mutex = Mutex::new(()); + + for column in DBColumn::iter() { + Redb::::create_table(&db, column.into())?; + } + + Ok(Self { + db: db.into(), + transaction_mutex, + _phantom: PhantomData, + }) + } + + fn create_table(db: &redb::Database, table_name: &str) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(table_name); + let tx = db.begin_write()?; + tx.open_table(table_definition)?; + tx.commit().map_err(Into::into) + } + + pub fn write_options(&self) -> WriteOptions { + WriteOptions::new() + } + + pub fn write_options_sync(&self) -> WriteOptions { + let mut opts = WriteOptions::new(); + opts.sync = true; + opts + } + + pub fn begin_rw_transaction(&self) -> MutexGuard<()> { + self.transaction_mutex.lock() + } + + pub fn put_bytes_with_options( + &self, + col: DBColumn, + key: &[u8], + val: &[u8], + opts: WriteOptions, + ) -> Result<(), Error> { + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[col.into()], + val.len() as u64, + ); + let timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + tx.set_durability(opts.into()); + let mut table = tx.open_table(table_definition)?; + + table.insert(key, val).map(|_| { + metrics::stop_timer(timer); + })?; + drop(table); + tx.commit().map_err(Into::into) + } + + /// Store some `value` in `column`, indexed with `key`. + pub fn put_bytes(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options()) + } + + pub fn put_bytes_sync(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options_sync()) + } + + pub fn sync(&self) -> Result<(), Error> { + self.put_bytes_sync(DBColumn::Dummy, b"sync", b"sync") + } + + // Retrieve some bytes in `column` with `key`. + pub fn get_bytes(&self, col: DBColumn, key: &[u8]) -> Result>, Error> { + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[col.into()]); + let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let tx = open_db.begin_read()?; + let table = tx.open_table(table_definition)?; + + let result = table.get(key)?; + + match result { + Some(access_guard) => { + let value = access_guard.value().to_vec(); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[col.into()], + value.len() as u64, + ); + metrics::stop_timer(timer); + Ok(Some(value)) + } + None => { + metrics::stop_timer(timer); + Ok(None) + } + } + } + + /// Return `true` if `key` exists in `column`. + pub fn key_exists(&self, col: DBColumn, key: &[u8]) -> Result { + metrics::inc_counter_vec(&metrics::DISK_DB_EXISTS_COUNT, &[col.into()]); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let tx = open_db.begin_read()?; + let table = tx.open_table(table_definition)?; + + table + .get(key) + .map_err(Into::into) + .map(|access_guard| access_guard.is_some()) + } + + /// Removes `key` from `column`. + pub fn key_delete(&self, col: DBColumn, key: &[u8]) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let tx = open_db.begin_write()?; + let mut table = tx.open_table(table_definition)?; + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col.into()]); + + table.remove(key).map(|_| ())?; + drop(table); + tx.commit().map_err(Into::into) + } + + pub fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + tx.set_durability(self.write_options().into()); + for op in ops_batch { + match op { + KeyValueStoreOp::PutKeyValue(column, key, value) => { + let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[column.into()], + value.len() as u64, + ); + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[column.into()]); + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let mut table = tx.open_table(table_definition)?; + table.insert(key.as_slice(), value.as_slice())?; + drop(table); + } + + KeyValueStoreOp::DeleteKey(column, key) => { + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[column.into()]); + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let mut table = tx.open_table(table_definition)?; + table.remove(key.as_slice())?; + drop(table); + } + } + } + + tx.commit()?; + Ok(()) + } + + /// Compact all values in the states and states flag columns. + pub fn compact(&self) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::DISK_DB_COMPACT_TIMES); + let mut open_db = self.db.write(); + let mut_db = open_db.borrow_mut(); + mut_db.compact().map_err(Into::into).map(|_| ()) + } + + pub fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let iter = { + let open_db = self.db.read(); + let read_txn = open_db.begin_read()?; + let table = read_txn.open_table(table_definition)?; + table.range(from..)?.map(move |res| { + let (key, _) = res?; + metrics::inc_counter_vec(&metrics::DISK_DB_KEY_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_KEY_READ_BYTES, + &[column.into()], + key.value().len() as u64, + ); + K::from_bytes(key.value()) + }) + }; + + Box::new(iter) + } + + /// Iterate through all keys and values in a particular column. + pub fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + self.iter_column_keys_from(column, &vec![0; column.key_size()]) + } + + pub fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let prefix = from.to_vec(); + + let iter = { + let open_db = self.db.read(); + let read_txn = open_db.begin_read()?; + let table = read_txn.open_table(table_definition)?; + + table + .range(from..)? + .take_while(move |res| match res.as_ref() { + Ok((_, _)) => true, + Err(_) => false, + }) + .map(move |res| { + let (key, value) = res?; + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[column.into()], + value.value().len() as u64, + ); + Ok((K::from_bytes(key.value())?, value.value().to_vec())) + }) + }; + + Ok(Box::new(iter)) + } + + pub fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()], |_, _| true) + } + + pub fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error> { + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + + tx.set_durability(redb::Durability::None); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + + let mut table = tx.open_table(table_definition)?; + table.retain(|key, _| !ops.contains(key))?; + + drop(table); + tx.commit()?; + Ok(()) + } + + pub fn delete_if( + &self, + column: DBColumn, + mut f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + + tx.set_durability(redb::Durability::None); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let mut table = tx.open_table(table_definition)?; + table.retain(|_, value| !f(value).unwrap_or(false))?; + + drop(table); + tx.commit()?; + Ok(()) + } +} diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 6bb4edee6b..41fd17ef43 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -2,6 +2,8 @@ use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use crate::{hdiff, DBColumn}; +#[cfg(feature = "leveldb")] +use leveldb::error::Error as LevelDBError; use ssz::DecodeError; use state_processing::BlockReplayError; use types::{milhouse, BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; @@ -48,6 +50,16 @@ pub enum Error { MissingGenesisState, MissingSnapshot(Slot), BlockReplayError(BlockReplayError), + AddPayloadLogicError, + InvalidKey, + InvalidBytes, + InconsistentFork(InconsistentFork), + #[cfg(feature = "leveldb")] + LevelDbError(LevelDBError), + #[cfg(feature = "redb")] + RedbError(redb::Error), + CacheBuildError(EpochCacheError), + RandaoMixOutOfBounds, MilhouseError(milhouse::Error), Compression(std::io::Error), FinalizedStateDecreasingSlot, @@ -56,17 +68,11 @@ pub enum Error { state_root: Hash256, slot: Slot, }, - AddPayloadLogicError, - InvalidKey, - InvalidBytes, - InconsistentFork(InconsistentFork), Hdiff(hdiff::Error), - CacheBuildError(EpochCacheError), ForwardsIterInvalidColumn(DBColumn), ForwardsIterGap(DBColumn, Slot, Slot), StateShouldNotBeRequired(Slot), MissingBlock(Hash256), - RandaoMixOutOfBounds, GenesisStateUnknown, ArithError(safe_arith::ArithError), } @@ -145,6 +151,62 @@ impl From for Error { } } +#[cfg(feature = "leveldb")] +impl From for Error { + fn from(e: LevelDBError) -> Error { + Error::LevelDbError(e) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::Error) -> Self { + Error::RedbError(e) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TableError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TransactionError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::DatabaseError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::StorageError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::CommitError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::CompactionError) -> Self { + Error::RedbError(e.into()) + } +} + impl From for Error { fn from(e: EpochCacheError) -> Error { Error::CacheBuildError(e) diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 955bd33b30..5300a74c06 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -4,7 +4,6 @@ use crate::{ColumnIter, DBColumn, HotColdDB, ItemStore}; use itertools::process_results; use std::marker::PhantomData; use types::{BeaconState, EthSpec, Hash256, Slot}; - pub type HybridForwardsBlockRootsIterator<'a, E, Hot, Cold> = HybridForwardsIterator<'a, E, Hot, Cold>; pub type HybridForwardsStateRootsIterator<'a, E, Hot, Cold> = diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 5f8ed8f5e7..06393f2d21 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -1,10 +1,11 @@ //! Garbage collection process that runs at start-up to clean up the database. +use crate::database::interface::BeaconNodeBackend; use crate::hot_cold_store::HotColdDB; -use crate::{Error, LevelDB, StoreOp}; +use crate::{DBColumn, Error}; use slog::debug; use types::EthSpec; -impl HotColdDB, LevelDB> +impl HotColdDB, BeaconNodeBackend> where E: EthSpec, { @@ -16,21 +17,22 @@ where /// Delete the temporary states that were leftover by failed block imports. pub fn delete_temp_states(&self) -> Result<(), Error> { - let delete_ops = - self.iter_temporary_state_roots() - .try_fold(vec![], |mut ops, state_root| { - let state_root = state_root?; - ops.push(StoreOp::DeleteState(state_root, None)); - Result::<_, Error>::Ok(ops) - })?; - - if !delete_ops.is_empty() { + let mut ops = vec![]; + self.iter_temporary_state_roots().for_each(|state_root| { + if let Ok(state_root) = state_root { + ops.push(state_root); + } + }); + if !ops.is_empty() { debug!( self.log, "Garbage collecting {} temporary states", - delete_ops.len() + ops.len() ); - self.do_atomically_with_block_and_blobs_cache(delete_ops)?; + + self.delete_batch(DBColumn::BeaconState, ops.clone())?; + self.delete_batch(DBColumn::BeaconStateSummary, ops.clone())?; + self.delete_batch(DBColumn::BeaconStateTemporary, ops)?; } Ok(()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c29305f983..75251cb5fb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,10 +1,10 @@ use crate::config::{OnDiskStoreConfig, StoreConfig}; +use crate::database::interface::BeaconNodeBackend; use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator}; use crate::hdiff::{HDiff, HDiffBuffer, HierarchyModuli, StorageStrategy}; use crate::historic_state_cache::HistoricStateCache; use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; -use crate::leveldb_store::{BytesKey, LevelDB}; use crate::memory_store::MemoryStore; use crate::metadata::{ AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, PruningCheckpoint, SchemaVersion, @@ -14,12 +14,10 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, get_key_for_col, BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, - ItemStore, KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, DBColumn, + DatabaseBlock, Error, ItemStore, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; -use crate::{metrics, parse_data_column_key}; use itertools::{process_results, Itertools}; -use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; @@ -231,7 +229,7 @@ impl HotColdDB, MemoryStore> { } } -impl HotColdDB, LevelDB> { +impl HotColdDB, BeaconNodeBackend> { /// Open a new or existing database, with the given paths to the hot and cold DBs. /// /// The `migrate_schema` function is passed in so that the parent `BeaconChain` can provide @@ -249,7 +247,7 @@ impl HotColdDB, LevelDB> { let hierarchy = config.hierarchy_config.to_moduli()?; - let hot_db = LevelDB::open(hot_path)?; + let hot_db = BeaconNodeBackend::open(&config, hot_path)?; let anchor_info = RwLock::new(Self::load_anchor_info(&hot_db)?); let db = HotColdDB { @@ -257,8 +255,8 @@ impl HotColdDB, LevelDB> { anchor_info, blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), - cold_db: LevelDB::open(cold_path)?, - blobs_db: LevelDB::open(blobs_db_path)?, + blobs_db: BeaconNodeBackend::open(&config, blobs_db_path)?, + cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(StateCache::new(config.state_cache_size)), @@ -408,23 +406,8 @@ impl HotColdDB, LevelDB> { /// Return an iterator over the state roots of all temporary states. pub fn iter_temporary_state_roots(&self) -> impl Iterator> + '_ { - let column = DBColumn::BeaconStateTemporary; - let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_slice())); - - let keys_iter = self.hot_db.keys_iter(); - keys_iter.seek(&start_key); - - keys_iter - .take_while(move |key| key.matches_column(column)) - .map(move |bytes_key| { - bytes_key.remove_column(column).ok_or_else(|| { - HotColdDBError::IterationError { - unexpected_key: bytes_key, - } - .into() - }) - }) + self.hot_db + .iter_column_keys::(DBColumn::BeaconStateTemporary) } } @@ -536,9 +519,9 @@ impl, Cold: ItemStore> HotColdDB blinded_block: &SignedBeaconBlock>, ops: &mut Vec, ) { - let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue( - db_key, + DBColumn::BeaconBlock, + key.as_slice().into(), blinded_block.as_ssz_bytes(), )); } @@ -660,7 +643,7 @@ impl, Cold: ItemStore> HotColdDB decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, ) -> Result>, Error> { self.hot_db - .get_bytes(DBColumn::BeaconBlock.into(), block_root.as_slice())? + .get_bytes(DBColumn::BeaconBlock, block_root.as_slice())? .map(|block_bytes| decoder(&block_bytes)) .transpose() .map_err(|e| e.into()) @@ -673,10 +656,12 @@ impl, Cold: ItemStore> HotColdDB block_root: &Hash256, fork_name: ForkName, ) -> Result>, Error> { - let column = ExecutionPayload::::db_column().into(); let key = block_root.as_slice(); - match self.hot_db.get_bytes(column, key)? { + match self + .hot_db + .get_bytes(ExecutionPayload::::db_column(), key)? + { Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes(&bytes, fork_name)?)), None => Ok(None), } @@ -705,10 +690,7 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Error> { let column = DBColumn::SyncCommitteeBranch; - if let Some(bytes) = self - .hot_db - .get_bytes(column.into(), &block_root.as_ssz_bytes())? - { + if let Some(bytes) = self.hot_db.get_bytes(column, &block_root.as_ssz_bytes())? { let sync_committee_branch = Vec::::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee_branch)); } @@ -725,7 +707,7 @@ impl, Cold: ItemStore> HotColdDB if let Some(bytes) = self .hot_db - .get_bytes(column.into(), &sync_committee_period.as_ssz_bytes())? + .get_bytes(column, &sync_committee_period.as_ssz_bytes())? { let sync_committee: SyncCommittee = SyncCommittee::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee)); @@ -741,7 +723,7 @@ impl, Cold: ItemStore> HotColdDB ) -> Result<(), Error> { let column = DBColumn::SyncCommitteeBranch; self.hot_db.put_bytes( - column.into(), + column, &block_root.as_ssz_bytes(), &sync_committee_branch.as_ssz_bytes(), )?; @@ -755,7 +737,7 @@ impl, Cold: ItemStore> HotColdDB ) -> Result<(), Error> { let column = DBColumn::SyncCommittee; self.hot_db.put_bytes( - column.into(), + column, &sync_committee_period.to_le_bytes(), &sync_committee.as_ssz_bytes(), )?; @@ -767,10 +749,10 @@ impl, Cold: ItemStore> HotColdDB &self, sync_committee_period: u64, ) -> Result>, Error> { - let column = DBColumn::LightClientUpdate; - let res = self - .hot_db - .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; + let res = self.hot_db.get_bytes( + DBColumn::LightClientUpdate, + &sync_committee_period.to_le_bytes(), + )?; if let Some(light_client_update_bytes) = res { let epoch = sync_committee_period @@ -822,10 +804,8 @@ impl, Cold: ItemStore> HotColdDB sync_committee_period: u64, light_client_update: &LightClientUpdate, ) -> Result<(), Error> { - let column = DBColumn::LightClientUpdate; - self.hot_db.put_bytes( - column.into(), + DBColumn::LightClientUpdate, &sync_committee_period.to_le_bytes(), &light_client_update.as_ssz_bytes(), )?; @@ -836,29 +816,29 @@ impl, Cold: ItemStore> HotColdDB /// Check if the blobs for a block exists on disk. pub fn blobs_exist(&self, block_root: &Hash256) -> Result { self.blobs_db - .key_exists(DBColumn::BeaconBlob.into(), block_root.as_slice()) + .key_exists(DBColumn::BeaconBlob, block_root.as_slice()) } /// Determine whether a block exists in the database. pub fn block_exists(&self, block_root: &Hash256) -> Result { self.hot_db - .key_exists(DBColumn::BeaconBlock.into(), block_root.as_slice()) + .key_exists(DBColumn::BeaconBlock, block_root.as_slice()) } /// Delete a block from the store and the block cache. pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { self.block_cache.lock().delete(block_root); self.hot_db - .key_delete(DBColumn::BeaconBlock.into(), block_root.as_slice())?; + .key_delete(DBColumn::BeaconBlock, block_root.as_slice())?; self.hot_db - .key_delete(DBColumn::ExecPayload.into(), block_root.as_slice())?; + .key_delete(DBColumn::ExecPayload, block_root.as_slice())?; self.blobs_db - .key_delete(DBColumn::BeaconBlob.into(), block_root.as_slice()) + .key_delete(DBColumn::BeaconBlob, block_root.as_slice()) } pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobSidecarList) -> Result<(), Error> { self.blobs_db.put_bytes( - DBColumn::BeaconBlob.into(), + DBColumn::BeaconBlob, block_root.as_slice(), &blobs.as_ssz_bytes(), )?; @@ -872,8 +852,11 @@ impl, Cold: ItemStore> HotColdDB blobs: BlobSidecarList, ops: &mut Vec, ) { - let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_slice()); - ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconBlob, + key.as_slice().to_vec(), + blobs.as_ssz_bytes(), + )); } pub fn data_columns_as_kv_store_ops( @@ -883,12 +866,9 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) { for data_column in data_columns { - let db_key = get_key_for_col( - DBColumn::BeaconDataColumn.into(), - &get_data_column_key(block_root, &data_column.index), - ); ops.push(KeyValueStoreOp::PutKeyValue( - db_key, + DBColumn::BeaconDataColumn, + get_data_column_key(block_root, &data_column.index), data_column.as_ssz_bytes(), )); } @@ -1202,63 +1182,68 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::DeleteStateTemporaryFlag(state_root) => { - let db_key = - get_key_for_col(TemporaryFlag::db_column().into(), state_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(db_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + TemporaryFlag::db_column(), + state_root.as_slice().to_vec(), + )); } StoreOp::DeleteBlock(block_root) => { - let key = get_key_for_col(DBColumn::BeaconBlock.into(), block_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconBlock, + block_root.as_slice().to_vec(), + )); } StoreOp::DeleteBlobs(block_root) => { - let key = get_key_for_col(DBColumn::BeaconBlob.into(), block_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconBlob, + block_root.as_slice().to_vec(), + )); } StoreOp::DeleteDataColumns(block_root, column_indices) => { for index in column_indices { - let key = get_key_for_col( - DBColumn::BeaconDataColumn.into(), - &get_data_column_key(&block_root, &index), - ); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + let key = get_data_column_key(&block_root, &index); + key_value_batch + .push(KeyValueStoreOp::DeleteKey(DBColumn::BeaconDataColumn, key)); } } StoreOp::DeleteState(state_root, slot) => { // Delete the hot state summary. - let state_summary_key = - get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateSummary, + state_root.as_slice().to_vec(), + )); // Delete the state temporary flag (if any). Temporary flags are commonly // created by the state advance routine. - let state_temp_key = get_key_for_col( - DBColumn::BeaconStateTemporary.into(), - state_root.as_slice(), - ); - key_value_batch.push(KeyValueStoreOp::DeleteKey(state_temp_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateTemporary, + state_root.as_slice().to_vec(), + )); if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { - let state_key = - get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(state_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + )); } } StoreOp::DeleteExecutionPayload(block_root) => { - let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::ExecPayload, + block_root.as_slice().to_vec(), + )); } StoreOp::DeleteSyncCommitteeBranch(block_root) => { - let key = get_key_for_col( - DBColumn::SyncCommitteeBranch.into(), - block_root.as_slice(), - ); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::SyncCommitteeBranch, + block_root.as_slice().to_vec(), + )); } StoreOp::KeyValueOp(kv_op) => { @@ -1269,6 +1254,19 @@ impl, Cold: ItemStore> HotColdDB Ok(key_value_batch) } + pub fn delete_batch(&self, col: DBColumn, ops: Vec) -> Result<(), Error> { + let new_ops: HashSet<&[u8]> = ops.iter().map(|v| v.as_slice()).collect(); + self.hot_db.delete_batch(col, new_ops) + } + + pub fn delete_if( + &self, + column: DBColumn, + f: impl Fn(&[u8]) -> Result, + ) -> Result<(), Error> { + self.hot_db.delete_if(column, f) + } + pub fn do_atomically_with_block_and_blobs_cache( &self, batch: Vec>, @@ -1608,10 +1606,8 @@ impl, Cold: ItemStore> HotColdDB ) -> Result<(), Error> { ops.push(ColdStateSummary { slot }.as_kv_store_op(*state_root)); ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col( - DBColumn::BeaconStateRoots.into(), - &slot.as_u64().to_be_bytes(), - ), + DBColumn::BeaconStateRoots, + slot.as_u64().to_be_bytes().to_vec(), state_root.as_slice().to_vec(), )); Ok(()) @@ -1678,19 +1674,19 @@ impl, Cold: ItemStore> HotColdDB out }; - let key = get_key_for_col( - DBColumn::BeaconStateSnapshot.into(), - &state.slot().as_u64().to_be_bytes(), - ); - ops.push(KeyValueStoreOp::PutKeyValue(key, compressed_value)); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconStateSnapshot, + state.slot().as_u64().to_be_bytes().to_vec(), + compressed_value, + )); Ok(()) } fn load_cold_state_bytes_as_snapshot(&self, slot: Slot) -> Result>, Error> { - match self.cold_db.get_bytes( - DBColumn::BeaconStateSnapshot.into(), - &slot.as_u64().to_be_bytes(), - )? { + match self + .cold_db + .get_bytes(DBColumn::BeaconStateSnapshot, &slot.as_u64().to_be_bytes())? + { Some(bytes) => { let _timer = metrics::start_timer(&metrics::STORE_BEACON_STATE_FREEZER_DECOMPRESS_TIME); @@ -1731,11 +1727,11 @@ impl, Cold: ItemStore> HotColdDB }; let diff_bytes = diff.as_ssz_bytes(); - let key = get_key_for_col( - DBColumn::BeaconStateDiff.into(), - &state.slot().as_u64().to_be_bytes(), - ); - ops.push(KeyValueStoreOp::PutKeyValue(key, diff_bytes)); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconStateDiff, + state.slot().as_u64().to_be_bytes().to_vec(), + diff_bytes, + )); Ok(()) } @@ -1858,10 +1854,7 @@ impl, Cold: ItemStore> HotColdDB let bytes = { let _t = metrics::start_timer(&metrics::BEACON_HDIFF_READ_TIMES); self.cold_db - .get_bytes( - DBColumn::BeaconStateDiff.into(), - &slot.as_u64().to_be_bytes(), - )? + .get_bytes(DBColumn::BeaconStateDiff, &slot.as_u64().to_be_bytes())? .ok_or(HotColdDBError::MissingHDiff(slot))? }; let hdiff = { @@ -2054,7 +2047,7 @@ impl, Cold: ItemStore> HotColdDB match self .blobs_db - .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_slice())? + .get_bytes(DBColumn::BeaconBlob, block_root.as_slice())? { Some(ref blobs_bytes) => { // We insert a VariableList of BlobSidecars into the db, but retrieve @@ -2084,8 +2077,17 @@ impl, Cold: ItemStore> HotColdDB /// Fetch all keys in the data_column column with prefix `block_root` pub fn get_data_column_keys(&self, block_root: Hash256) -> Result, Error> { self.blobs_db - .iter_raw_keys(DBColumn::BeaconDataColumn, block_root.as_slice()) - .map(|key| key.and_then(|key| parse_data_column_key(key).map(|key| key.1))) + .iter_column_from::>(DBColumn::BeaconDataColumn, block_root.as_slice()) + .take_while(|res| { + let Ok((key, _)) = res else { return false }; + + if !key.starts_with(block_root.as_slice()) { + return false; + } + + true + }) + .map(|key| key.and_then(|(key, _)| parse_data_column_key(key).map(|key| key.1))) .collect() } @@ -2106,7 +2108,7 @@ impl, Cold: ItemStore> HotColdDB } match self.blobs_db.get_bytes( - DBColumn::BeaconDataColumn.into(), + DBColumn::BeaconDataColumn, &get_data_column_key(block_root, column_index), )? { Some(ref data_column_bytes) => { @@ -2164,10 +2166,12 @@ impl, Cold: ItemStore> HotColdDB schema_version: SchemaVersion, mut ops: Vec, ) -> Result<(), Error> { - let column = SchemaVersion::db_column().into(); let key = SCHEMA_VERSION_KEY.as_slice(); - let db_key = get_key_for_col(column, key); - let op = KeyValueStoreOp::PutKeyValue(db_key, schema_version.as_store_bytes()); + let op = KeyValueStoreOp::PutKeyValue( + SchemaVersion::db_column(), + key.to_vec(), + schema_version.as_store_bytes(), + ); ops.push(op); self.hot_db.do_atomically(ops) @@ -2589,7 +2593,8 @@ impl, Cold: ItemStore> HotColdDB let mut ops = vec![]; for slot in start_slot.as_u64()..end_slot.as_u64() { ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); } @@ -2811,77 +2816,62 @@ impl, Cold: ItemStore> HotColdDB "data_availability_boundary" => data_availability_boundary, ); - let mut ops = vec![]; - let mut last_pruned_block_root = None; + // We collect block roots of deleted blobs in memory. Even for 10y of blob history this + // vec won't go beyond 1GB. We can probably optimise this out eventually. + let mut removed_block_roots = vec![]; - for res in self.forwards_block_roots_iterator_until(oldest_blob_slot, end_slot, || { - let (_, split_state) = self - .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - ))?; - - Ok((split_state, split.block_root)) - })? { - let (block_root, slot) = match res { - Ok(tuple) => tuple, - Err(e) => { - warn!( - self.log, - "Stopping blob pruning early"; - "error" => ?e, - ); - break; - } + let remove_blob_if = |blobs_bytes: &[u8]| { + let blobs = Vec::from_ssz_bytes(blobs_bytes)?; + let Some(blob): Option<&Arc>> = blobs.first() else { + return Ok(false); }; - if Some(block_root) != last_pruned_block_root { - if self - .spec - .is_peer_das_enabled_for_epoch(slot.epoch(E::slots_per_epoch())) - { - // data columns - let indices = self.get_data_column_keys(block_root)?; - if !indices.is_empty() { - trace!( - self.log, - "Pruning data columns of block"; - "slot" => slot, - "block_root" => ?block_root, - ); - last_pruned_block_root = Some(block_root); - ops.push(StoreOp::DeleteDataColumns(block_root, indices)); - } - } else if self.blobs_exist(&block_root)? { - trace!( - self.log, - "Pruning blobs of block"; - "slot" => slot, - "block_root" => ?block_root, - ); - last_pruned_block_root = Some(block_root); - ops.push(StoreOp::DeleteBlobs(block_root)); - } - } + if blob.slot() <= end_slot { + // Store the block root so we can delete from the blob cache + removed_block_roots.push(blob.block_root()); + // Delete from the on-disk db + return Ok(true); + }; + Ok(false) + }; - if slot >= end_slot { - break; - } + self.blobs_db + .delete_if(DBColumn::BeaconBlob, remove_blob_if)?; + + if self.spec.is_peer_das_enabled_for_epoch(start_epoch) { + let remove_data_column_if = |blobs_bytes: &[u8]| { + let data_column: DataColumnSidecar = + DataColumnSidecar::from_ssz_bytes(blobs_bytes)?; + + if data_column.slot() <= end_slot { + return Ok(true); + }; + + Ok(false) + }; + + self.blobs_db + .delete_if(DBColumn::BeaconDataColumn, remove_data_column_if)?; } - let blob_lists_pruned = ops.len(); + + // Remove deleted blobs from the cache. + let mut block_cache = self.block_cache.lock(); + for block_root in removed_block_roots { + block_cache.delete_blobs(&block_root); + } + drop(block_cache); + let new_blob_info = BlobInfo { oldest_blob_slot: Some(end_slot + 1), blobs_db: blob_info.blobs_db, }; - let update_blob_info = self.compare_and_set_blob_info(blob_info, new_blob_info)?; - ops.push(StoreOp::KeyValueOp(update_blob_info)); - self.do_atomically_with_block_and_blobs_cache(ops)?; + let op = self.compare_and_set_blob_info(blob_info, new_blob_info)?; + self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::KeyValueOp(op)])?; + debug!( self.log, "Blob pruning complete"; - "blob_lists_pruned" => blob_lists_pruned, ); Ok(()) @@ -2944,10 +2934,7 @@ impl, Cold: ItemStore> HotColdDB for column in columns { for res in self.cold_db.iter_column_keys::>(column) { let key = res?; - cold_ops.push(KeyValueStoreOp::DeleteKey(get_key_for_col( - column.as_str(), - &key, - ))); + cold_ops.push(KeyValueStoreOp::DeleteKey(column, key)); } } let delete_ops = cold_ops.len(); @@ -3085,10 +3072,8 @@ pub fn migrate_database, Cold: ItemStore>( // Store the slot to block root mapping. cold_db_block_ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col( - DBColumn::BeaconBlockRoots.into(), - &slot.as_u64().to_be_bytes(), - ), + DBColumn::BeaconBlockRoots, + slot.as_u64().to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); @@ -3339,3 +3324,57 @@ impl StoreItem for TemporaryFlag { Ok(TemporaryFlag) } } + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct BytesKey { + pub key: Vec, +} + +impl db_key::Key for BytesKey { + fn from_u8(key: &[u8]) -> Self { + Self { key: key.to_vec() } + } + + fn as_slice T>(&self, f: F) -> T { + f(self.key.as_slice()) + } +} + +impl BytesKey { + pub fn starts_with(&self, prefix: &Self) -> bool { + self.key.starts_with(&prefix.key) + } + + /// Return `true` iff this `BytesKey` was created with the given `column`. + pub fn matches_column(&self, column: DBColumn) -> bool { + self.key.starts_with(column.as_bytes()) + } + + /// Remove the column from a key, returning its `Hash256` portion. + pub fn remove_column(&self, column: DBColumn) -> Option { + if self.matches_column(column) { + let subkey = &self.key[column.as_bytes().len()..]; + if subkey.len() == 32 { + return Some(Hash256::from_slice(subkey)); + } + } + None + } + + /// Remove the column from a key. + /// + /// Will return `None` if the value doesn't match the column or has the wrong length. + pub fn remove_column_variable(&self, column: DBColumn) -> Option<&[u8]> { + if self.matches_column(column) { + let subkey = &self.key[column.as_bytes().len()..]; + if subkey.len() == column.key_size() { + return Some(subkey); + } + } + None + } + + pub fn from_vec(key: Vec) -> Self { + Self { key } + } +} diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index 48c289f2b2..fd08e547f1 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -13,8 +13,11 @@ pub fn store_full_state( }; metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as u64); metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); - let key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); - ops.push(KeyValueStoreOp::PutKeyValue(key, bytes)); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + bytes, + )); Ok(()) } @@ -25,7 +28,7 @@ pub fn get_full_state, E: EthSpec>( ) -> Result>, Error> { let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); - match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? { + match db.get_bytes(DBColumn::BeaconState, state_root.as_slice())? { Some(bytes) => { let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES); let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs deleted file mode 100644 index 720afd0f3f..0000000000 --- a/beacon_node/store/src/leveldb_store.rs +++ /dev/null @@ -1,310 +0,0 @@ -use super::*; -use crate::hot_cold_store::HotColdDBError; -use leveldb::compaction::Compaction; -use leveldb::database::batch::{Batch, Writebatch}; -use leveldb::database::kv::KV; -use leveldb::database::Database; -use leveldb::error::Error as LevelDBError; -use leveldb::iterator::{Iterable, KeyIterator, LevelDBIterator}; -use leveldb::options::{Options, ReadOptions, WriteOptions}; -use parking_lot::Mutex; -use std::marker::PhantomData; -use std::path::Path; - -/// A wrapped leveldb database. -pub struct LevelDB { - db: Database, - /// A mutex to synchronise sensitive read-write transactions. - transaction_mutex: Mutex<()>, - _phantom: PhantomData, -} - -impl LevelDB { - /// Open a database at `path`, creating a new database if one does not already exist. - pub fn open(path: &Path) -> Result { - let mut options = Options::new(); - - options.create_if_missing = true; - - let db = Database::open(path, options)?; - let transaction_mutex = Mutex::new(()); - - Ok(Self { - db, - transaction_mutex, - _phantom: PhantomData, - }) - } - - fn read_options(&self) -> ReadOptions { - ReadOptions::new() - } - - fn write_options(&self) -> WriteOptions { - WriteOptions::new() - } - - fn write_options_sync(&self) -> WriteOptions { - let mut opts = WriteOptions::new(); - opts.sync = true; - opts - } - - fn put_bytes_with_options( - &self, - col: &str, - key: &[u8], - val: &[u8], - opts: WriteOptions, - ) -> Result<(), Error> { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col]); - metrics::inc_counter_vec_by(&metrics::DISK_DB_WRITE_BYTES, &[col], val.len() as u64); - let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); - - self.db - .put(opts, BytesKey::from_vec(column_key), val) - .map_err(Into::into) - } - - pub fn keys_iter(&self) -> KeyIterator { - self.db.keys_iter(self.read_options()) - } -} - -impl KeyValueStore for LevelDB { - /// Store some `value` in `column`, indexed with `key`. - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - self.put_bytes_with_options(col, key, val, self.write_options()) - } - - fn put_bytes_sync(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - self.put_bytes_with_options(col, key, val, self.write_options_sync()) - } - - fn sync(&self) -> Result<(), Error> { - self.put_bytes_sync("sync", b"sync", b"sync") - } - - /// Retrieve some bytes in `column` with `key`. - fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[col]); - let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); - - self.db - .get(self.read_options(), BytesKey::from_vec(column_key)) - .map_err(Into::into) - .map(|opt| { - opt.inspect(|bytes| { - metrics::inc_counter_vec_by( - &metrics::DISK_DB_READ_BYTES, - &[col], - bytes.len() as u64, - ); - metrics::stop_timer(timer); - }) - }) - } - - /// Return `true` if `key` exists in `column`. - fn key_exists(&self, col: &str, key: &[u8]) -> Result { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_EXISTS_COUNT, &[col]); - - self.db - .get(self.read_options(), BytesKey::from_vec(column_key)) - .map_err(Into::into) - .map(|val| val.is_some()) - } - - /// Removes `key` from `column`. - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col]); - - self.db - .delete(self.write_options(), BytesKey::from_vec(column_key)) - .map_err(Into::into) - } - - fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { - let mut leveldb_batch = Writebatch::new(); - for op in ops_batch { - match op { - KeyValueStoreOp::PutKeyValue(key, value) => { - let col = get_col_from_key(&key).unwrap_or("unknown".to_owned()); - metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[&col]); - metrics::inc_counter_vec_by( - &metrics::DISK_DB_WRITE_BYTES, - &[&col], - value.len() as u64, - ); - - leveldb_batch.put(BytesKey::from_vec(key), &value); - } - - KeyValueStoreOp::DeleteKey(key) => { - let col = get_col_from_key(&key).unwrap_or("unknown".to_owned()); - metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[&col]); - - leveldb_batch.delete(BytesKey::from_vec(key)); - } - } - } - - let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); - - self.db.write(self.write_options(), &leveldb_batch)?; - Ok(()) - } - - fn begin_rw_transaction(&self) -> MutexGuard<()> { - self.transaction_mutex.lock() - } - - fn compact_column(&self, column: DBColumn) -> Result<(), Error> { - // Use key-size-agnostic keys [] and 0xff..ff with a minimum of 32 bytes to account for - // columns that may change size between sub-databases or schema versions. - let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), &[])); - let end_key = BytesKey::from_vec(get_key_for_col( - column.as_str(), - &vec![0xff; std::cmp::max(column.key_size(), 32)], - )); - self.db.compact(&start_key, &end_key); - Ok(()) - } - - fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.into(), from)); - let iter = self.db.iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |(key, _)| key.matches_column(column)) - .map(move |(bytes_key, value)| { - let key = bytes_key.remove_column_variable(column).ok_or_else(|| { - HotColdDBError::IterationError { - unexpected_key: bytes_key.clone(), - } - })?; - Ok((K::from_bytes(key)?, value)) - }), - ) - } - - fn iter_raw_entries(&self, column: DBColumn, prefix: &[u8]) -> RawEntryIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); - - let iter = self.db.iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |(key, _)| key.key.starts_with(start_key.key.as_slice())) - .map(move |(bytes_key, value)| { - let subkey = &bytes_key.key[column.as_bytes().len()..]; - Ok((Vec::from(subkey), value)) - }), - ) - } - - fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); - - let iter = self.db.keys_iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |key| key.key.starts_with(start_key.key.as_slice())) - .map(move |bytes_key| { - let subkey = &bytes_key.key[column.as_bytes().len()..]; - Ok(Vec::from(subkey)) - }), - ) - } - - /// Iterate through all keys and values in a particular column. - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { - let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), &vec![0; column.key_size()])); - - let iter = self.db.keys_iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |key| key.matches_column(column)) - .map(move |bytes_key| { - let key = bytes_key.remove_column_variable(column).ok_or_else(|| { - HotColdDBError::IterationError { - unexpected_key: bytes_key.clone(), - } - })?; - K::from_bytes(key) - }), - ) - } -} - -impl ItemStore for LevelDB {} - -/// Used for keying leveldb. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct BytesKey { - key: Vec, -} - -impl db_key::Key for BytesKey { - fn from_u8(key: &[u8]) -> Self { - Self { key: key.to_vec() } - } - - fn as_slice T>(&self, f: F) -> T { - f(self.key.as_slice()) - } -} - -impl BytesKey { - pub fn starts_with(&self, prefix: &Self) -> bool { - self.key.starts_with(&prefix.key) - } - - /// Return `true` iff this `BytesKey` was created with the given `column`. - pub fn matches_column(&self, column: DBColumn) -> bool { - self.key.starts_with(column.as_bytes()) - } - - /// Remove the column from a 32 byte key, yielding the `Hash256` key. - pub fn remove_column(&self, column: DBColumn) -> Option { - let key = self.remove_column_variable(column)?; - (column.key_size() == 32).then(|| Hash256::from_slice(key)) - } - - /// Remove the column from a key. - /// - /// Will return `None` if the value doesn't match the column or has the wrong length. - pub fn remove_column_variable(&self, column: DBColumn) -> Option<&[u8]> { - if self.matches_column(column) { - let subkey = &self.key[column.as_bytes().len()..]; - if subkey.len() == column.key_size() { - return Some(subkey); - } - } - None - } - - pub fn from_vec(key: Vec) -> Self { - Self { key } - } -} - -impl From for Error { - fn from(e: LevelDBError) -> Error { - Error::DBError { - message: format!("{:?}", e), - } - } -} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 1458fa846c..0cfc42ab15 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -19,7 +19,6 @@ pub mod hdiff; pub mod historic_state_cache; pub mod hot_cold_store; mod impls; -mod leveldb_store; mod memory_store; pub mod metadata; pub mod metrics; @@ -27,13 +26,13 @@ pub mod partial_beacon_state; pub mod reconstruct; pub mod state_cache; +pub mod database; pub mod iter; pub use self::blob_sidecar_list_from_root::BlobSidecarListFromRoot; pub use self::config::StoreConfig; pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; -pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; pub use crate::metadata::BlobInfo; pub use errors::Error; @@ -41,8 +40,9 @@ pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; +use std::collections::HashSet; use std::sync::Arc; -use strum::{EnumString, IntoStaticStr}; +use strum::{EnumIter, EnumString, IntoStaticStr}; pub use types::*; const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; @@ -50,18 +50,18 @@ const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a, K> = Box> + 'a>; -pub type RawEntryIter<'a> = Box, Vec), Error>> + 'a>; -pub type RawKeyIter<'a> = Box, Error>> + 'a>; +pub type RawEntryIter<'a> = + Result, Vec), Error>> + 'a>, Error>; pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Retrieve some bytes in `column` with `key`. - fn get_bytes(&self, column: &str, key: &[u8]) -> Result>, Error>; + fn get_bytes(&self, column: DBColumn, key: &[u8]) -> Result>, Error>; /// Store some `value` in `column`, indexed with `key`. - fn put_bytes(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error>; + fn put_bytes(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error>; /// Same as put_bytes() but also force a flush to disk - fn put_bytes_sync(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error>; + fn put_bytes_sync(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error>; /// Flush to disk. See /// https://chromium.googlesource.com/external/leveldb/+/HEAD/doc/index.md#synchronous-writes @@ -69,10 +69,10 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn sync(&self) -> Result<(), Error>; /// Return `true` if `key` exists in `column`. - fn key_exists(&self, column: &str, key: &[u8]) -> Result; + fn key_exists(&self, column: DBColumn, key: &[u8]) -> Result; /// Removes `key` from `column`. - fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>; + fn key_delete(&self, column: DBColumn, key: &[u8]) -> Result<(), Error>; /// Execute either all of the operations in `batch` or none at all, returning an error. fn do_atomically(&self, batch: Vec) -> Result<(), Error>; @@ -105,17 +105,21 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { self.iter_column_from(column, &vec![0; column.key_size()]) } - /// Iterate through all keys and values in a column from a given starting point. + /// Iterate through all keys and values in a column from a given starting point that fulfill the given predicate. fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter; - fn iter_raw_entries(&self, _column: DBColumn, _prefix: &[u8]) -> RawEntryIter { - Box::new(std::iter::empty()) - } - - fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter; + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; /// Iterate through all keys in a particular column. - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; + fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter; + + fn delete_batch(&self, column: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error>; + + fn delete_if( + &self, + column: DBColumn, + f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error>; } pub trait Key: Sized + 'static { @@ -138,7 +142,7 @@ impl Key for Vec { } } -pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { +pub fn get_key_for_col(column: DBColumn, key: &[u8]) -> Vec { let mut result = column.as_bytes().to_vec(); result.extend_from_slice(key); result @@ -176,14 +180,18 @@ pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Er #[must_use] #[derive(Clone)] pub enum KeyValueStoreOp { - PutKeyValue(Vec, Vec), - DeleteKey(Vec), + // Indicate that a PUT operation should be made + // to the db store for a (Column, Key, Value) + PutKeyValue(DBColumn, Vec, Vec), + // Indicate that a DELETE operation should be made + // to the db store for a (Column, Key) + DeleteKey(DBColumn, Vec), } pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'static { /// Store an item in `Self`. fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.put_bytes(column, key, &item.as_store_bytes()) @@ -191,7 +199,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati } fn put_sync(&self, key: &Hash256, item: &I) -> Result<(), Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.put_bytes_sync(column, key, &item.as_store_bytes()) @@ -200,7 +208,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Retrieve an item from `Self`. fn get(&self, key: &Hash256) -> Result, Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); match self.get_bytes(column, key)? { @@ -211,7 +219,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Returns `true` if the given key represents an item in `Self`. fn exists(&self, key: &Hash256) -> Result { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.key_exists(column, key) @@ -219,7 +227,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Remove an item from `Self`. fn delete(&self, key: &Hash256) -> Result<(), Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.key_delete(column, key) @@ -247,7 +255,7 @@ pub enum StoreOp<'a, E: EthSpec> { } /// A unique column identifier. -#[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr, EnumString)] +#[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr, EnumString, EnumIter)] pub enum DBColumn { /// For data related to the database itself. #[strum(serialize = "bma")] @@ -351,6 +359,9 @@ pub enum DBColumn { /// For helping persist eagerly computed light client bootstrap data #[strum(serialize = "scm")] SyncCommittee, + /// The dummy table is used to force the db to sync + #[strum(serialize = "dmy")] + Dummy, } /// A block from the database, which might have an execution payload or not. @@ -401,7 +412,8 @@ impl DBColumn { | Self::BeaconStateDiff | Self::SyncCommittee | Self::SyncCommitteeBranch - | Self::LightClientUpdate => 8, + | Self::LightClientUpdate + | Self::Dummy => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } } @@ -421,13 +433,18 @@ pub trait StoreItem: Sized { fn from_store_bytes(bytes: &[u8]) -> Result; fn as_kv_store_op(&self, key: Hash256) -> KeyValueStoreOp { - let db_key = get_key_for_col(Self::db_column().into(), key.as_slice()); - KeyValueStoreOp::PutKeyValue(db_key, self.as_store_bytes()) + KeyValueStoreOp::PutKeyValue( + Self::db_column(), + key.as_slice().to_vec(), + self.as_store_bytes(), + ) } } #[cfg(test)] mod tests { + use crate::database::interface::BeaconNodeBackend; + use super::*; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -477,7 +494,7 @@ mod tests { fn simplediskdb() { let dir = tempdir().unwrap(); let path = dir.path(); - let store = LevelDB::open(path).unwrap(); + let store = BeaconNodeBackend::open(&StoreConfig::default(), path).unwrap(); test_impl(store); } @@ -508,7 +525,7 @@ mod tests { #[test] fn test_get_col_from_key() { - let key = get_key_for_col(DBColumn::BeaconBlock.into(), &[1u8; 32]); + let key = get_key_for_col(DBColumn::BeaconBlock, &[1u8; 32]); let col = get_col_from_key(&key).unwrap(); assert_eq!(col, "blk"); } diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 4c7bfdf10f..6070a2d3f0 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,9 +1,9 @@ use crate::{ - get_key_for_col, leveldb_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, - ItemStore, Key, KeyValueStore, KeyValueStoreOp, RawKeyIter, + errors::Error as DBError, get_key_for_col, hot_cold_store::BytesKey, ColumnIter, ColumnKeyIter, + DBColumn, Error, ItemStore, Key, KeyValueStore, KeyValueStoreOp, }; use parking_lot::{Mutex, MutexGuard, RwLock}; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; use std::marker::PhantomData; use types::*; @@ -29,19 +29,19 @@ impl MemoryStore { impl KeyValueStore for MemoryStore { /// Get the value of some key from the database. Returns `None` if the key does not exist. - fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { + fn get_bytes(&self, col: DBColumn, key: &[u8]) -> Result>, Error> { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().get(&column_key).cloned()) } /// Puts a key in the database. - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + fn put_bytes(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().insert(column_key, val.to_vec()); Ok(()) } - fn put_bytes_sync(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + fn put_bytes_sync(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { self.put_bytes(col, key, val) } @@ -51,13 +51,13 @@ impl KeyValueStore for MemoryStore { } /// Return true if some key exists in some column. - fn key_exists(&self, col: &str, key: &[u8]) -> Result { + fn key_exists(&self, col: DBColumn, key: &[u8]) -> Result { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { + fn key_delete(&self, col: DBColumn, key: &[u8]) -> Result<(), Error> { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().remove(&column_key); Ok(()) @@ -66,12 +66,16 @@ impl KeyValueStore for MemoryStore { fn do_atomically(&self, batch: Vec) -> Result<(), Error> { for op in batch { match op { - KeyValueStoreOp::PutKeyValue(key, value) => { - self.db.write().insert(BytesKey::from_vec(key), value); + KeyValueStoreOp::PutKeyValue(col, key, value) => { + let column_key = get_key_for_col(col, &key); + self.db + .write() + .insert(BytesKey::from_vec(column_key), value); } - KeyValueStoreOp::DeleteKey(key) => { - self.db.write().remove(&BytesKey::from_vec(key)); + KeyValueStoreOp::DeleteKey(col, key) => { + let column_key = get_key_for_col(col, &key); + self.db.write().remove(&BytesKey::from_vec(column_key)); } } } @@ -82,8 +86,7 @@ impl KeyValueStore for MemoryStore { // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a // reference to the lock guard across calls to `.next()`. This would be require a // struct with a field (the iterator) which references another field (the lock guard). - let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), from)); - let col = column.as_str(); + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); let keys = self .db .read() @@ -92,7 +95,7 @@ impl KeyValueStore for MemoryStore { .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) .collect::>(); Box::new(keys.into_iter().filter_map(move |key| { - self.get_bytes(col, &key).transpose().map(|res| { + self.get_bytes(column, &key).transpose().map(|res| { let k = K::from_bytes(&key)?; let v = res?; Ok((k, v)) @@ -100,18 +103,6 @@ impl KeyValueStore for MemoryStore { })) } - fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), prefix)); - let keys = self - .db - .read() - .range(start_key.clone()..) - .take_while(|(k, _)| k.starts_with(&start_key)) - .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) - .collect::>(); - Box::new(keys.into_iter().map(Ok)) - } - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { Box::new(self.iter_column(column).map(|res| res.map(|(k, _)| k))) } @@ -123,6 +114,44 @@ impl KeyValueStore for MemoryStore { fn compact_column(&self, _column: DBColumn) -> Result<(), Error> { Ok(()) } + + fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a + // reference to the lock guard across calls to `.next()`. This would be require a + // struct with a field (the iterator) which references another field (the lock guard). + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); + let keys = self + .db + .read() + .range(start_key..) + .take_while(|(k, _)| k.remove_column_variable(column).is_some()) + .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) + .collect::>(); + Box::new(keys.into_iter().map(move |key| K::from_bytes(&key))) + } + + fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), DBError> { + for op in ops { + let column_key = get_key_for_col(col, op); + self.db.write().remove(&BytesKey::from_vec(column_key)); + } + Ok(()) + } + + fn delete_if( + &self, + column: DBColumn, + mut f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + self.db.write().retain(|key, value| { + if key.remove_column_variable(column).is_some() { + !f(value).unwrap_or(false) + } else { + true + } + }); + Ok(()) + } } impl ItemStore for MemoryStore {} diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index f0dd061790..6f9f667917 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -33,6 +33,13 @@ pub static DISK_DB_READ_BYTES: LazyLock> = LazyLock::new(| &["col"], ) }); +pub static DISK_DB_KEY_READ_BYTES: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "store_disk_db_key_read_bytes_total", + "Number of key bytes read from the hot on-disk DB", + &["col"], + ) +}); pub static DISK_DB_READ_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( "store_disk_db_read_count_total", @@ -40,6 +47,13 @@ pub static DISK_DB_READ_COUNT: LazyLock> = LazyLock::new(| &["col"], ) }); +pub static DISK_DB_KEY_READ_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "store_disk_db_read_count_total", + "Total number of key reads to the hot on-disk DB", + &["col"], + ) +}); pub static DISK_DB_WRITE_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( "store_disk_db_write_count_total", @@ -66,6 +80,12 @@ pub static DISK_DB_EXISTS_COUNT: LazyLock> = LazyLock::new &["col"], ) }); +pub static DISK_DB_DELETE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_disk_db_delete_seconds", + "Time taken to delete bytes from the store.", + ) +}); pub static DISK_DB_DELETE_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( "store_disk_db_delete_count_total", @@ -73,6 +93,19 @@ pub static DISK_DB_DELETE_COUNT: LazyLock> = LazyLock::new &["col"], ) }); +pub static DISK_DB_COMPACT_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_disk_db_compact_seconds", + "Time taken to run compaction on the DB.", + ) +}); +pub static DISK_DB_TYPE: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "store_disk_db_type", + "The on-disk database type being used", + &["db_type"], + ) +}); /* * Anchor Info */ diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 0b8bc2e0d4..d209512159 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -2,8 +2,8 @@ use crate::chunked_vector::{ load_variable_list_from_db, load_vector_from_db, BlockRootsChunked, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRootsChunked, }; -use crate::{Error, KeyValueStore}; -use ssz::{Decode, DecodeError}; +use crate::{DBColumn, Error, KeyValueStore, KeyValueStoreOp}; +use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use types::historical_summary::HistoricalSummary; @@ -172,6 +172,15 @@ impl PartialBeaconState { )) } + /// Prepare the partial state for storage in the KV database. + pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp { + KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + self.as_ssz_bytes(), + ) + } + pub fn load_block_roots>( &mut self, store: &S, diff --git a/book/src/help_bn.md b/book/src/help_bn.md index a4ab44748c..2d12010094 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -11,6 +11,9 @@ Options: --auto-compact-db Enable or disable automatic compaction of the database on finalization. [default: true] + --beacon-node-backend + Set the database backend to be used by the beacon node. [possible + values: leveldb] --blob-prune-margin-epochs The margin for blob pruning in epochs. The oldest blobs are pruned up until data_availability_boundary - blob_prune_margin_epochs. [default: diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 3c9f27d236..19098a5bc8 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -154,7 +154,7 @@ You can customise the features that Lighthouse is built with using the `FEATURES variable. E.g. ``` -FEATURES=gnosis,slasher-lmdb make +FEATURES=gnosis,slasher-lmdb,beacon-node-leveldb make ``` Commonly used features include: @@ -163,11 +163,12 @@ Commonly used features include: - `portable`: the default feature as Lighthouse now uses runtime detection of hardware CPU features. - `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. - `slasher-mdbx`: support for the MDBX slasher backend. +- `beacon-node-leveldb`: support for the leveldb backend. Enabled by default. - `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. Not supported on Windows. - `spec-minimal`: support for the minimal preset (useful for testing). -Default features (e.g. `slasher-lmdb`) may be opted out of using the `--no-default-features` +Default features (e.g. `slasher-lmdb`, `beacon-node-leveldb`) may be opted out of using the `--no-default-features` argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. E.g. diff --git a/database_manager/src/cli.rs b/database_manager/src/cli.rs index 4246a51f89..9db807df2c 100644 --- a/database_manager/src/cli.rs +++ b/database_manager/src/cli.rs @@ -57,6 +57,15 @@ pub struct DatabaseManager { )] pub blobs_dir: Option, + #[clap( + long, + value_name = "DATABASE", + help = "Set the database backend to be used by the beacon node.", + display_order = 0, + default_value_t = store::config::DatabaseBackend::LevelDb + )] + pub backend: store::config::DatabaseBackend, + #[clap( long, global = true, diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index fc15e98616..bed90df9df 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -16,10 +16,12 @@ use slog::{info, warn, Logger}; use std::fs; use std::io::Write; use std::path::PathBuf; +use store::KeyValueStore; use store::{ + database::interface::BeaconNodeBackend, errors::Error, metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}, - DBColumn, HotColdDB, KeyValueStore, LevelDB, + DBColumn, HotColdDB, }; use strum::{EnumString, EnumVariantNames}; use types::{BeaconState, EthSpec, Slot}; @@ -40,7 +42,7 @@ fn parse_client_config( .clone_from(&database_manager_config.blobs_dir); client_config.store.blob_prune_margin_epochs = database_manager_config.blob_prune_margin_epochs; client_config.store.hierarchy_config = database_manager_config.hierarchy_exponents.clone(); - + client_config.store.backend = database_manager_config.backend; Ok(client_config) } @@ -55,7 +57,7 @@ pub fn display_db_version( let blobs_path = client_config.get_blobs_db_path(); let mut version = CURRENT_SCHEMA_VERSION; - HotColdDB::, LevelDB>::open( + HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -145,11 +147,14 @@ pub fn inspect_db( let mut num_keys = 0; let sub_db = if inspect_config.freezer { - LevelDB::::open(&cold_path).map_err(|e| format!("Unable to open freezer DB: {e:?}"))? + BeaconNodeBackend::::open(&client_config.store, &cold_path) + .map_err(|e| format!("Unable to open freezer DB: {e:?}"))? } else if inspect_config.blobs_db { - LevelDB::::open(&blobs_path).map_err(|e| format!("Unable to open blobs DB: {e:?}"))? + BeaconNodeBackend::::open(&client_config.store, &blobs_path) + .map_err(|e| format!("Unable to open blobs DB: {e:?}"))? } else { - LevelDB::::open(&hot_path).map_err(|e| format!("Unable to open hot DB: {e:?}"))? + BeaconNodeBackend::::open(&client_config.store, &hot_path) + .map_err(|e| format!("Unable to open hot DB: {e:?}"))? }; let skip = inspect_config.skip.unwrap_or(0); @@ -263,11 +268,20 @@ pub fn compact_db( let column = compact_config.column; let (sub_db, db_name) = if compact_config.freezer { - (LevelDB::::open(&cold_path)?, "freezer_db") + ( + BeaconNodeBackend::::open(&client_config.store, &cold_path)?, + "freezer_db", + ) } else if compact_config.blobs_db { - (LevelDB::::open(&blobs_path)?, "blobs_db") + ( + BeaconNodeBackend::::open(&client_config.store, &blobs_path)?, + "blobs_db", + ) } else { - (LevelDB::::open(&hot_path)?, "hot_db") + ( + BeaconNodeBackend::::open(&client_config.store, &hot_path)?, + "hot_db", + ) }; info!( log, @@ -303,7 +317,7 @@ pub fn migrate_db( let mut from = CURRENT_SCHEMA_VERSION; let to = migrate_config.to; - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -343,7 +357,7 @@ pub fn prune_payloads( let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -369,7 +383,7 @@ pub fn prune_blobs( let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -406,7 +420,7 @@ pub fn prune_states( let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index eda9a2ebf2..c303511338 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -7,7 +7,7 @@ autotests = false rust-version = "1.80.0" [features] -default = ["slasher-lmdb"] +default = ["slasher-lmdb", "beacon-node-leveldb"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. @@ -24,6 +24,11 @@ slasher-mdbx = ["slasher/mdbx"] slasher-lmdb = ["slasher/lmdb"] # Support slasher redb backend. slasher-redb = ["slasher/redb"] +# Supports beacon node leveldb backend. +beacon-node-leveldb = ["store/leveldb"] +# Supports beacon node redb backend. +beacon-node-redb = ["store/redb"] + # Deprecated. This is now enabled by default on non windows targets. jemalloc = [] @@ -56,6 +61,7 @@ serde_json = { workspace = true } serde_yaml = { workspace = true } slasher = { workspace = true } slog = { workspace = true } +store = { workspace = true } task_executor = { workspace = true } types = { workspace = true } unused_port = { workspace = true } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 88e05dfa12..1063a80ff4 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,11 +1,12 @@ -use beacon_node::ClientConfig as Config; - use crate::exec::{CommandLineTestExec, CompletedTest}; use beacon_node::beacon_chain::chain_config::{ DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, }; -use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; +use beacon_node::{ + beacon_chain::graffiti_calculator::GraffitiOrigin, + beacon_chain::store::config::DatabaseBackend as BeaconNodeBackend, ClientConfig as Config, +}; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; @@ -2691,3 +2692,13 @@ fn genesis_state_url_value() { assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); }); } + +#[test] +fn beacon_node_backend_override() { + CommandLineTest::new() + .flag("beacon-node-backend", Some("leveldb")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.store.backend, BeaconNodeBackend::LevelDb); + }); +} diff --git a/wordlist.txt b/wordlist.txt index 6287366cbc..bb8b46b525 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -162,6 +162,7 @@ keypair keypairs keystore keystores +leveldb linter linux localhost @@ -191,6 +192,7 @@ pre pubkey pubkeys rc +redb reimport resync roadmap From 1781c5a75539e499dc5288246b22d06853f6b54f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 28 Jan 2025 12:30:53 +1100 Subject: [PATCH 07/52] Update to EF tests v1.5.0-beta.1 (#6871) No substantial changes in v1.5.0-beta.1, this PR just updates the tests. The optimisation described in this PR is already implemented in our single-pass epoch processing: - https://github.com/ethereum/consensus-specs/pull/4081 --- testing/ef_tests/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 7108e3e8f6..7b507f8c50 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-beta.0 +TESTS_TAG := v1.5.0-beta.1 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) From 33b8555d2c3ae00c48ab845b678384d643c9fbaa Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 28 Jan 2025 00:48:36 -0800 Subject: [PATCH 08/52] Add tests for ExecutionRequests decoding errors (#6832) N/A Cover all error cases for decoding JsonExecutionRequests --- .../src/engine_api/json_structures.rs | 151 ++++++++++++++++++ 1 file changed, 151 insertions(+) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 95b4b50925..96615297d8 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -991,3 +991,154 @@ impl TryFrom for ClientVersionV1 { }) } } + +#[cfg(test)] +mod tests { + use ssz::Encode; + use types::{ + ConsolidationRequest, DepositRequest, MainnetEthSpec, PublicKeyBytes, RequestType, + SignatureBytes, WithdrawalRequest, + }; + + use super::*; + + fn create_request_string(prefix: u8, request_bytes: &T) -> String { + format!( + "0x{:02x}{}", + prefix, + hex::encode(request_bytes.as_ssz_bytes()) + ) + } + + /// Tests all error conditions except ssz decoding errors + /// + /// *** + /// Elements of the list MUST be ordered by request_type in ascending order. + /// Elements with empty request_data MUST be excluded from the list. + /// If any element is out of order, has a length of 1-byte or shorter, + /// or more than one element has the same type byte, client software MUST return -32602: Invalid params error. + /// *** + #[test] + fn test_invalid_execution_requests() { + let deposit_request = DepositRequest { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::random(), + amount: 32, + signature: SignatureBytes::empty(), + index: 0, + }; + + let consolidation_request = ConsolidationRequest { + source_address: Address::random(), + source_pubkey: PublicKeyBytes::empty(), + target_pubkey: PublicKeyBytes::empty(), + }; + + let withdrawal_request = WithdrawalRequest { + amount: 32, + source_address: Address::random(), + validator_pubkey: PublicKeyBytes::empty(), + }; + + // First check a valid request with all requests + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + ])) + .is_ok() + ); + + // Single requests + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .is_ok() + ); + + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + ])) + .is_ok() + ); + + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + ])) + .is_ok() + ); + + // Out of order + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + // Multiple requests of same type + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + // Invalid prefix + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(42, &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidPrefix(42) + )); + + // Prefix followed by no data + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + create_request_string( + RequestType::Consolidation.to_u8(), + &Vec::::new() + ), + ])) + .unwrap_err(), + RequestsError::EmptyRequest(1) + )); + // Empty request + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + "0x".to_string() + ])) + .unwrap_err(), + RequestsError::EmptyRequest(1) + )); + } +} From c6ebaba8927086c0199b3b536f08c9146efb2606 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 28 Jan 2025 16:01:26 -0300 Subject: [PATCH 09/52] Detect invalid proposer signature on RPC block processing (#6519) Complements - https://github.com/sigp/lighthouse/pull/6321 by detecting if the proposer signature is valid or not during RPC block processing. In lookup sync, if the invalid signature signature is the proposer signature, it's not deterministic on the block root. So we should only penalize the sending peer and retry. Otherwise, if it's on the body we should drop the lookup and penalize all peers that claim to have imported the block --- .../beacon_chain/src/block_verification.rs | 58 +++++++++++++----- beacon_node/beacon_chain/src/lib.rs | 2 +- .../beacon_chain/tests/block_verification.rs | 59 +++++++++++-------- .../gossip_methods.rs | 3 +- beacon_node/network/src/sync/tests/lookups.rs | 6 +- 5 files changed, 84 insertions(+), 44 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 315105ac2b..1265276376 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -208,24 +208,18 @@ pub enum BlockError { /// /// The block is invalid and the peer is faulty. IncorrectBlockProposer { block: u64, local_shuffling: u64 }, - /// The proposal signature in invalid. - /// - /// ## Peer scoring - /// - /// The block is invalid and the peer is faulty. - ProposalSignatureInvalid, /// The `block.proposal_index` is not known. /// /// ## Peer scoring /// /// The block is invalid and the peer is faulty. UnknownValidator(u64), - /// A signature in the block is invalid (exactly which is unknown). + /// A signature in the block is invalid /// /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - InvalidSignature, + InvalidSignature(InvalidSignature), /// The provided block is not from a later slot than its parent. /// /// ## Peer scoring @@ -329,6 +323,17 @@ pub enum BlockError { InternalError(String), } +/// Which specific signature(s) are invalid in a SignedBeaconBlock +#[derive(Debug)] +pub enum InvalidSignature { + // The outer signature in a SignedBeaconBlock + ProposerSignature, + // One or more signatures in BeaconBlockBody + BlockBodySignatures, + // One or more signatures in SignedBeaconBlock + Unknown, +} + impl From for BlockError { fn from(e: AvailabilityCheckError) -> Self { Self::AvailabilityCheck(e) @@ -523,7 +528,9 @@ pub enum BlockSlashInfo { impl BlockSlashInfo { pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError) -> Self { match e { - BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), + BlockError::InvalidSignature(InvalidSignature::ProposerSignature) => { + BlockSlashInfo::SignatureInvalid(e) + } // `InvalidSignature` could indicate any signature in the block, so we want // to recheck the proposer signature alone. _ => BlockSlashInfo::SignatureNotChecked(header, e), @@ -652,7 +659,7 @@ pub fn signature_verify_chain_segment( } if signature_verifier.verify().is_err() { - return Err(BlockError::InvalidSignature); + return Err(BlockError::InvalidSignature(InvalidSignature::Unknown)); } drop(pubkey_cache); @@ -964,7 +971,9 @@ impl GossipVerifiedBlock { }; if !signature_is_valid { - return Err(BlockError::ProposalSignatureInvalid); + return Err(BlockError::InvalidSignature( + InvalidSignature::ProposerSignature, + )); } chain @@ -1098,7 +1107,26 @@ impl SignatureVerifiedBlock { parent: Some(parent), }) } else { - Err(BlockError::InvalidSignature) + // Re-verify the proposer signature in isolation to attribute fault + let pubkey = pubkey_cache + .get(block.message().proposer_index() as usize) + .ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?; + if block.as_block().verify_signature( + Some(block_root), + pubkey, + &state.fork(), + chain.genesis_validators_root, + &chain.spec, + ) { + // Proposer signature is valid, the invalid signature must be in the body + Err(BlockError::InvalidSignature( + InvalidSignature::BlockBodySignatures, + )) + } else { + Err(BlockError::InvalidSignature( + InvalidSignature::ProposerSignature, + )) + } } } @@ -1153,7 +1181,9 @@ impl SignatureVerifiedBlock { consensus_context, }) } else { - Err(BlockError::InvalidSignature) + Err(BlockError::InvalidSignature( + InvalidSignature::BlockBodySignatures, + )) } } @@ -1981,7 +2011,7 @@ impl BlockBlobError for BlockError { } fn proposer_signature_invalid() -> Self { - BlockError::ProposalSignatureInvalid + BlockError::InvalidSignature(InvalidSignature::ProposerSignature) } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4783945eb1..456b3c0dd8 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -78,7 +78,7 @@ pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceSto pub use block_verification::{ build_blob_data_column_sidecars, get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, IntoGossipVerifiedBlock, - PayloadVerificationOutcome, PayloadVerificationStatus, + InvalidSignature, PayloadVerificationOutcome, PayloadVerificationStatus, }; pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 1a651332ad..46f5befbba 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -9,7 +9,7 @@ use beacon_chain::{ }; use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, - NotifyExecutionLayer, + InvalidSignature, NotifyExecutionLayer, }; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; @@ -438,7 +438,7 @@ async fn assert_invalid_signature( .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), - Err(BlockError::InvalidSignature) + Err(BlockError::InvalidSignature(InvalidSignature::Unknown)) ), "should not import chain segment with an invalid {} signature", item @@ -480,7 +480,12 @@ async fn assert_invalid_signature( ) .await; assert!( - matches!(process_res, Err(BlockError::InvalidSignature)), + matches!( + process_res, + Err(BlockError::InvalidSignature( + InvalidSignature::BlockBodySignatures + )) + ), "should not import individual block with an invalid {} signature, got: {:?}", item, process_res @@ -536,21 +541,25 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); + let process_res = harness + .chain + .process_block( + signed_block.canonical_root(), + Arc::new(signed_block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await; assert!( matches!( - harness - .chain - .process_block( - signed_block.canonical_root(), - Arc::new(signed_block), - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ) - .await, - Err(BlockError::InvalidSignature) + process_res, + Err(BlockError::InvalidSignature( + InvalidSignature::ProposerSignature + )) ), - "should not import individual block with an invalid gossip signature", + "should not import individual block with an invalid gossip signature, got: {:?}", + process_res ); } } @@ -578,16 +587,18 @@ async fn invalid_signature_block_proposal() { }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. + let process_res = harness + .chain + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) + .await + .into_block_error(); assert!( matches!( - harness - .chain - .process_chain_segment(blocks, NotifyExecutionLayer::Yes) - .await - .into_block_error(), - Err(BlockError::InvalidSignature) + process_res, + Err(BlockError::InvalidSignature(InvalidSignature::Unknown)) ), - "should not import chain segment with an invalid block signature", + "should not import chain segment with an invalid block signature, got: {:?}", + process_res ); } } @@ -890,7 +901,7 @@ async fn invalid_signature_deposit() { .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), - Err(BlockError::InvalidSignature) + Err(BlockError::InvalidSignature(InvalidSignature::Unknown)) ), "should not throw an invalid signature error for a bad deposit signature" ); @@ -1086,7 +1097,7 @@ async fn block_gossip_verification() { ))) .await ), - BlockError::ProposalSignatureInvalid + BlockError::InvalidSignature(InvalidSignature::ProposerSignature) ), "should not import a block with an invalid proposal signature" ); diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 6b5753e96a..dc8d32800e 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1290,13 +1290,12 @@ impl NetworkBeaconProcessor { Err(e @ BlockError::StateRootMismatch { .. }) | Err(e @ BlockError::IncorrectBlockProposer { .. }) | Err(e @ BlockError::BlockSlotLimitReached) - | Err(e @ BlockError::ProposalSignatureInvalid) | Err(e @ BlockError::NonLinearSlots) | Err(e @ BlockError::UnknownValidator(_)) | Err(e @ BlockError::PerBlockProcessingError(_)) | Err(e @ BlockError::NonLinearParentRoots) | Err(e @ BlockError::BlockIsNotLaterThanParent { .. }) - | Err(e @ BlockError::InvalidSignature) + | Err(e @ BlockError::InvalidSignature(_)) | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index f623aa2c12..f772010500 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1677,7 +1677,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { rig.assert_not_failed_chain(block_root); // send the right parent but fail processing rig.parent_lookup_block_response(id, peer_id, Some(parent.clone().into())); - rig.parent_block_processed(block_root, BlockError::InvalidSignature.into()); + rig.parent_block_processed(block_root, BlockError::BlockSlotLimitReached.into()); rig.parent_lookup_block_response(id, peer_id, None); rig.expect_penalty(peer_id, "lookup_block_processing_failure"); } @@ -2575,7 +2575,7 @@ mod deneb_only { fn invalid_parent_processed(mut self) -> Self { self.rig.parent_block_processed( self.block_root, - BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + BlockProcessingResult::Err(BlockError::BlockSlotLimitReached), ); assert_eq!(self.rig.active_parent_lookups_count(), 1); self @@ -2584,7 +2584,7 @@ mod deneb_only { fn invalid_block_processed(mut self) -> Self { self.rig.single_block_component_processed( self.block_req_id.expect("block request id").lookup_id, - BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + BlockProcessingResult::Err(BlockError::BlockSlotLimitReached), ); self.rig.assert_single_lookups_count(1); self From 6973184b06017f19894ab0925898a205a2cfdace Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 29 Jan 2025 12:22:21 +0300 Subject: [PATCH 10/52] Fix Redb implementation and add CI checks (#6856) --- Makefile | 2 +- beacon_node/store/src/database/redb_impl.rs | 33 ++++++++++++--------- beacon_node/store/src/forwards_iter.rs | 1 + beacon_node/store/src/hot_cold_store.rs | 6 ++-- 4 files changed, 24 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index e8b44cb780..0f08afd168 100644 --- a/Makefile +++ b/Makefile @@ -222,7 +222,7 @@ lint-fix: # Also run the lints on the optimized-only tests lint-full: - RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint + TEST_FEATURES="beacon-node-leveldb,beacon-node-redb,${TEST_FEATURES}" RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint # Runs the makefile in the `ef_tests` repo. # diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs index 6a776da7b1..cbe575d184 100644 --- a/beacon_node/store/src/database/redb_impl.rs +++ b/beacon_node/store/src/database/redb_impl.rs @@ -215,11 +215,12 @@ impl Redb { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(column.into()); - let iter = { + let result = (|| { let open_db = self.db.read(); let read_txn = open_db.begin_read()?; let table = read_txn.open_table(table_definition)?; - table.range(from..)?.map(move |res| { + let range = table.range(from..)?; + Ok(range.map(move |res| { let (key, _) = res?; metrics::inc_counter_vec(&metrics::DISK_DB_KEY_READ_COUNT, &[column.into()]); metrics::inc_counter_vec_by( @@ -228,10 +229,13 @@ impl Redb { key.value().len() as u64, ); K::from_bytes(key.value()) - }) - }; + })) + })(); - Box::new(iter) + match result { + Ok(iter) => Box::new(iter), + Err(err) => Box::new(std::iter::once(Err(err))), + } } /// Iterate through all keys and values in a particular column. @@ -243,15 +247,13 @@ impl Redb { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(column.into()); - let prefix = from.to_vec(); - - let iter = { + let result = (|| { let open_db = self.db.read(); let read_txn = open_db.begin_read()?; let table = read_txn.open_table(table_definition)?; + let range = table.range(from..)?; - table - .range(from..)? + Ok(range .take_while(move |res| match res.as_ref() { Ok((_, _)) => true, Err(_) => false, @@ -265,14 +267,17 @@ impl Redb { value.value().len() as u64, ); Ok((K::from_bytes(key.value())?, value.value().to_vec())) - }) - }; + })) + })(); - Ok(Box::new(iter)) + match result { + Ok(iter) => Box::new(iter), + Err(err) => Box::new(std::iter::once(Err(err))), + } } pub fn iter_column(&self, column: DBColumn) -> ColumnIter { - self.iter_column_from(column, &vec![0; column.key_size()], |_, _| true) + self.iter_column_from(column, &vec![0; column.key_size()]) } pub fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error> { diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 5300a74c06..255b7d8eac 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -158,6 +158,7 @@ impl, Cold: ItemStore> Iterator return None; } self.inner + .as_mut() .next()? .and_then(|(slot_bytes, root_bytes)| { let slot = slot_bytes diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 75251cb5fb..45b1983492 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,8 +14,8 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, DBColumn, - DatabaseBlock, Error, ItemStore, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, ColumnKeyIter, + DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use itertools::{process_results, Itertools}; use lru::LruCache; @@ -405,7 +405,7 @@ impl HotColdDB, BeaconNodeBackend> { } /// Return an iterator over the state roots of all temporary states. - pub fn iter_temporary_state_roots(&self) -> impl Iterator> + '_ { + pub fn iter_temporary_state_roots(&self) -> ColumnKeyIter { self.hot_db .iter_column_keys::(DBColumn::BeaconStateTemporary) } From e7ea69647a4cb686d0c7a80f24b3f34d329a7e01 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 29 Jan 2025 11:42:10 -0800 Subject: [PATCH 11/52] More gossipsub metrics (#6873) N/A Add metrics that tell us if a duplicate message that we received was from a mesh peer or from a non mesh peer that we requested with iwant message. --- .../gossipsub/src/behaviour.rs | 24 ++++++++++++++ .../gossipsub/src/gossip_promises.rs | 7 ++++ .../gossipsub/src/metrics.rs | 32 +++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 6528e737a3..7eb35cc49b 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1841,6 +1841,30 @@ where peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); } self.mcache.observe_duplicate(&msg_id, propagation_source); + // track metrics for the source of the duplicates + if let Some(metrics) = self.metrics.as_mut() { + if self + .mesh + .get(&message.topic) + .is_some_and(|peers| peers.contains(propagation_source)) + { + // duplicate was received from a mesh peer + metrics.mesh_duplicates(&message.topic); + } else if self + .gossip_promises + .contains_peer(&msg_id, propagation_source) + { + // duplicate was received from an iwant request + metrics.iwant_duplicates(&message.topic); + } else { + tracing::warn!( + messsage=%msg_id, + peer=%propagation_source, + topic=%message.topic, + "Peer should not have sent message" + ); + } + } return; } diff --git a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs index 3f72709245..ce1dee2a72 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs @@ -41,6 +41,13 @@ impl GossipPromises { self.promises.contains_key(message) } + /// Returns true if the message id exists in the promises and contains the given peer. + pub(crate) fn contains_peer(&self, message: &MessageId, peer: &PeerId) -> bool { + self.promises + .get(message) + .is_some_and(|peers| peers.contains_key(peer)) + } + ///Get the peers we sent IWANT the input message id. pub(crate) fn peers_for_message(&self, message_id: &MessageId) -> Vec { self.promises diff --git a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs index d3ca6c299e..2989f95a26 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs @@ -194,6 +194,12 @@ pub(crate) struct Metrics { /// Number of full messages we received that we previously sent a IDONTWANT for. idontwant_messages_ignored_per_topic: Family, + /// Count of duplicate messages we have received from mesh peers for a given topic. + mesh_duplicates: Family, + + /// Count of duplicate messages we have received from by requesting them over iwant for a given topic. + iwant_duplicates: Family, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -359,6 +365,16 @@ impl Metrics { "IDONTWANT messages that were sent but we received the full message regardless" ); + let mesh_duplicates = register_family!( + "mesh_duplicates_per_topic", + "Count of duplicate messages received from mesh peers per topic" + ); + + let iwant_duplicates = register_family!( + "iwant_duplicates_per_topic", + "Count of duplicate messages received from non-mesh peers that we sent iwants for" + ); + let idontwant_bytes = { let metric = Counter::default(); registry.register( @@ -425,6 +441,8 @@ impl Metrics { idontwant_msgs_ids, idontwant_messages_sent_per_topic, idontwant_messages_ignored_per_topic, + mesh_duplicates, + iwant_duplicates, priority_queue_size, non_priority_queue_size, } @@ -597,6 +615,20 @@ impl Metrics { } } + /// Register a duplicate message received from a mesh peer. + pub(crate) fn mesh_duplicates(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.mesh_duplicates.get_or_create(topic).inc(); + } + } + + /// Register a duplicate message received from a non-mesh peer on an iwant request. + pub(crate) fn iwant_duplicates(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.iwant_duplicates.get_or_create(topic).inc(); + } + } + pub(crate) fn register_msg_validation( &mut self, topic: &TopicHash, From 4a07c08c4f515f7094828f62c856f6268dafaa58 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 29 Jan 2025 11:42:13 -0800 Subject: [PATCH 12/52] Fork aware max values in rpc (#6847) N/A In https://github.com/sigp/lighthouse/pull/6329 we changed `max_blobs_per_block` from a preset to a config value. We weren't using the right value based on fork in that PR. This is a follow up PR to use the fork dependent values. In the proces, I also updated other places where we weren't using fork dependent values from the ChainSpec. Note to reviewer: easier to go through by commit --- .../lighthouse_network/src/rpc/codec.rs | 61 +++++++++---------- .../lighthouse_network/src/rpc/handler.rs | 39 ++++++++++++ .../lighthouse_network/src/rpc/methods.rs | 26 +++++--- .../lighthouse_network/src/service/mod.rs | 2 +- .../lighthouse_network/src/service/utils.rs | 6 +- .../lighthouse_network/src/types/topics.rs | 4 +- .../lighthouse_network/tests/rpc_tests.rs | 60 ++++++++++-------- .../network_beacon_processor/rpc_methods.rs | 18 ------ beacon_node/network/src/router.rs | 4 +- beacon_node/network/src/service.rs | 1 + beacon_node/network/src/sync/manager.rs | 10 ++- .../network/src/sync/network_context.rs | 13 ++-- .../network_context/requests/blobs_by_root.rs | 4 +- .../requests/blocks_by_root.rs | 6 +- beacon_node/network/src/sync/tests/lookups.rs | 7 +++ consensus/types/src/chain_spec.rs | 56 ++++++++++++++--- 16 files changed, 203 insertions(+), 114 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 8981a75aed..6a70eef9bd 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -576,7 +576,7 @@ fn handle_rpc_request( BlocksByRootRequest::V2(BlocksByRootRequestV2 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, - spec.max_request_blocks as usize, + spec.max_request_blocks(current_fork), )?, }), ))), @@ -584,32 +584,18 @@ fn handle_rpc_request( BlocksByRootRequest::V1(BlocksByRootRequestV1 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, - spec.max_request_blocks as usize, + spec.max_request_blocks(current_fork), )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => { - let req = BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?; - let max_requested_blobs = req - .count - .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); - // TODO(pawan): change this to max_blobs_per_rpc_request in the alpha10 PR - if max_requested_blobs > spec.max_request_blob_sidecars { - return Err(RPCError::ErrorResponse( - RpcErrorResponse::InvalidRequest, - format!( - "requested exceeded limit. allowed: {}, requested: {}", - spec.max_request_blob_sidecars, max_requested_blobs - ), - )); - } - Ok(Some(RequestType::BlobsByRange(req))) - } + SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), SupportedProtocol::BlobsByRootV1 => { Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, - spec.max_request_blob_sidecars as usize, + spec.max_request_blob_sidecars(current_fork), )?, }))) } @@ -1097,21 +1083,21 @@ mod tests { } } - fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec) + fn bbroot_request_v1(fork_name: ForkName) -> BlocksByRootRequest { + BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name)) } - fn bbroot_request_v2(spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![Hash256::zero()], spec) + fn bbroot_request_v2(fork_name: ForkName) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name)) } - fn blbroot_request(spec: &ChainSpec) -> BlobsByRootRequest { + fn blbroot_request(fork_name: ForkName) -> BlobsByRootRequest { BlobsByRootRequest::new( vec![BlobIdentifier { block_root: Hash256::zero(), index: 0, }], - spec, + &fork_context(fork_name), ) } @@ -1909,7 +1895,8 @@ mod tests { #[test] fn test_encode_then_decode_request() { - let chain_spec = Spec::default_spec(); + let fork_context = fork_context(ForkName::Electra); + let chain_spec = fork_context.spec.clone(); let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), @@ -1917,21 +1904,33 @@ mod tests { RequestType::Goodbye(GoodbyeReason::Fault), RequestType::BlocksByRange(bbrange_request_v1()), RequestType::BlocksByRange(bbrange_request_v2()), - RequestType::BlocksByRoot(bbroot_request_v1(&chain_spec)), - RequestType::BlocksByRoot(bbroot_request_v2(&chain_spec)), RequestType::MetaData(MetadataRequest::new_v1()), RequestType::BlobsByRange(blbrange_request()), - RequestType::BlobsByRoot(blbroot_request(&chain_spec)), RequestType::DataColumnsByRange(dcbrange_request()), RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec)), RequestType::MetaData(MetadataRequest::new_v2()), ]; - for req in requests.iter() { for fork_name in ForkName::list_all() { encode_then_decode_request(req.clone(), fork_name, &chain_spec); } } + + // Request types that have different length limits depending on the fork + // Handled separately to have consistent `ForkName` across request and responses + let fork_dependent_requests = |fork_name| { + [ + RequestType::BlobsByRoot(blbroot_request(fork_name)), + RequestType::BlocksByRoot(bbroot_request_v1(fork_name)), + RequestType::BlocksByRoot(bbroot_request_v2(fork_name)), + ] + }; + for fork_name in ForkName::list_all() { + let requests = fork_dependent_requests(fork_name); + for req in requests { + encode_then_decode_request(req.clone(), fork_name, &chain_spec); + } + } } /// Test a malicious snappy encoding for a V1 `Status` message where the attacker diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 3a008df023..cb57a640bc 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -855,6 +855,45 @@ where } let (req, substream) = substream; + let current_fork = self.fork_context.current_fork(); + let spec = &self.fork_context.spec; + + match &req { + RequestType::BlocksByRange(request) => { + let max_allowed = spec.max_request_blocks(current_fork) as u64; + if *request.count() > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::BlocksByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, + request.count() + )), + })); + return self.shutdown(None); + } + } + RequestType::BlobsByRange(request) => { + let max_requested_blobs = request + .count + .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); + let max_allowed = spec.max_request_blob_sidecars(current_fork) as u64; + if max_requested_blobs > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::BlobsByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, max_requested_blobs + )), + })); + return self.shutdown(None); + } + } + _ => {} + }; + let max_responses = req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 958041c53f..ad6bea455e 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -15,12 +15,12 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; -use types::ForkName; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; +use types::{ForkContext, ForkName}; /// Maximum length of error message. pub type MaxErrorLen = U256; @@ -420,15 +420,19 @@ pub struct BlocksByRootRequest { } impl BlocksByRootRequest { - pub fn new(block_roots: Vec, spec: &ChainSpec) -> Self { - let block_roots = - RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); + pub fn new(block_roots: Vec, fork_context: &ForkContext) -> Self { + let max_request_blocks = fork_context + .spec + .max_request_blocks(fork_context.current_fork()); + let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V2(BlocksByRootRequestV2 { block_roots }) } - pub fn new_v1(block_roots: Vec, spec: &ChainSpec) -> Self { - let block_roots = - RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); + pub fn new_v1(block_roots: Vec, fork_context: &ForkContext) -> Self { + let max_request_blocks = fork_context + .spec + .max_request_blocks(fork_context.current_fork()); + let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V1(BlocksByRootRequestV1 { block_roots }) } } @@ -441,9 +445,11 @@ pub struct BlobsByRootRequest { } impl BlobsByRootRequest { - pub fn new(blob_ids: Vec, spec: &ChainSpec) -> Self { - let blob_ids = - RuntimeVariableList::from_vec(blob_ids, spec.max_request_blob_sidecars as usize); + pub fn new(blob_ids: Vec, fork_context: &ForkContext) -> Self { + let max_request_blob_sidecars = fork_context + .spec + .max_request_blob_sidecars(fork_context.current_fork()); + let blob_ids = RuntimeVariableList::from_vec(blob_ids, max_request_blob_sidecars); Self { blob_ids } } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 4738c76d0c..a18daa5791 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -282,7 +282,7 @@ impl Network { let max_topics = ctx.chain_spec.attestation_subnet_count as usize + SYNC_COMMITTEE_SUBNET_COUNT as usize - + ctx.chain_spec.blob_sidecar_subnet_count_electra as usize + + ctx.chain_spec.blob_sidecar_subnet_count_max() as usize + ctx.chain_spec.data_column_sidecar_subnet_count as usize + BASE_CORE_TOPICS.len() + ALTAIR_CORE_TOPICS.len() diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 5746c13c58..72c2b29102 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -263,11 +263,7 @@ pub(crate) fn create_whitelist_filter( for id in 0..sync_committee_subnet_count { add(SyncCommitteeMessage(SyncSubnetId::new(id))); } - let blob_subnet_count = if spec.electra_fork_epoch.is_some() { - spec.blob_sidecar_subnet_count_electra - } else { - spec.blob_sidecar_subnet_count - }; + let blob_subnet_count = spec.blob_sidecar_subnet_count_max(); for id in 0..blob_subnet_count { add(BlobSidecar(id)); } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 475b459ccb..2c79f93423 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -51,7 +51,7 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V ForkName::Deneb => { // All of deneb blob topics are core topics let mut deneb_blob_topics = Vec::new(); - for i in 0..spec.blob_sidecar_subnet_count { + for i in 0..spec.blob_sidecar_subnet_count(ForkName::Deneb) { deneb_blob_topics.push(GossipKind::BlobSidecar(i)); } deneb_blob_topics @@ -59,7 +59,7 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V ForkName::Electra => { // All of electra blob topics are core topics let mut electra_blob_topics = Vec::new(); - for i in 0..spec.blob_sidecar_subnet_count_electra { + for i in 0..spec.blob_sidecar_subnet_count(ForkName::Electra) { electra_blob_topics.push(GossipKind::BlobSidecar(i)); } electra_blob_topics diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index f721c8477c..4b54a24ddc 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -16,7 +16,7 @@ use tokio::time::sleep; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, - Signature, SignedBeaconBlock, Slot, + RuntimeVariableList, Signature, SignedBeaconBlock, Slot, }; type E = MinimalEthSpec; @@ -810,17 +810,20 @@ fn test_tcp_blocks_by_root_chunked_rpc() { .await; // BlocksByRoot Request - let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( - vec![ - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - ], - &spec, - )); + let rpc_request = + RequestType::BlocksByRoot(BlocksByRootRequest::V2(BlocksByRootRequestV2 { + block_roots: RuntimeVariableList::from_vec( + vec![ + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + ], + spec.max_request_blocks_upper_bound(), + ), + })); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -953,21 +956,24 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { .await; // BlocksByRoot Request - let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( - vec![ - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - ], - &spec, - )); + let rpc_request = + RequestType::BlocksByRoot(BlocksByRootRequest::V2(BlocksByRootRequestV2 { + block_roots: RuntimeVariableList::from_vec( + vec![ + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + ], + spec.max_request_blocks_upper_bound(), + ), + })); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index b4f19f668d..67a1570275 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -659,24 +659,6 @@ impl NetworkBeaconProcessor { "start_slot" => req.start_slot(), ); - // Should not send more than max request blocks - let max_request_size = - self.chain - .epoch() - .map_or(self.chain.spec.max_request_blocks, |epoch| { - if self.chain.spec.fork_name_at_epoch(epoch).deneb_enabled() { - self.chain.spec.max_request_blocks_deneb - } else { - self.chain.spec.max_request_blocks - } - }); - if *req.count() > max_request_size { - return Err(( - RpcErrorResponse::InvalidRequest, - "Request exceeded max size", - )); - } - let forwards_block_root_iter = match self .chain .forwards_iter_block_roots(Slot::from(*req.start_slot())) diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index d3da341e1c..41b9f2c91e 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -28,7 +28,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -90,6 +90,7 @@ impl Router { invalid_block_storage: InvalidBlockStorage, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, + fork_context: Arc, log: slog::Logger, ) -> Result>, String> { let message_handler_log = log.new(o!("service"=> "router")); @@ -122,6 +123,7 @@ impl Router { network_send.clone(), network_beacon_processor.clone(), sync_recv, + fork_context, sync_logger, ); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f89241b4ae..ab654ddf77 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -312,6 +312,7 @@ impl NetworkService { invalid_block_storage, beacon_processor_send, beacon_processor_reprocess_tx, + fork_context.clone(), network_log.clone(), )?; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 2df8b5f94c..fd91dc78b1 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -69,7 +69,9 @@ use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, +}; #[cfg(test)] use types::ColumnIndex; @@ -258,10 +260,11 @@ pub fn spawn( network_send: mpsc::UnboundedSender>, beacon_processor: Arc>, sync_recv: mpsc::UnboundedReceiver>, + fork_context: Arc, log: slog::Logger, ) { assert!( - beacon_chain.spec.max_request_blocks >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, + beacon_chain.spec.max_request_blocks(fork_context.current_fork()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); @@ -272,6 +275,7 @@ pub fn spawn( beacon_processor, sync_recv, SamplingConfig::Default, + fork_context, log.clone(), ); @@ -287,6 +291,7 @@ impl SyncManager { beacon_processor: Arc>, sync_recv: mpsc::UnboundedReceiver>, sampling_config: SamplingConfig, + fork_context: Arc, log: slog::Logger, ) -> Self { let network_globals = beacon_processor.network_globals.clone(); @@ -297,6 +302,7 @@ impl SyncManager { network_send, beacon_processor.clone(), beacon_chain.clone(), + fork_context.clone(), log.clone(), ), range_sync: RangeSync::new( diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index f899936128..e21041192d 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -43,8 +43,8 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, - SignedBeaconBlock, Slot, + BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, ForkContext, + Hash256, SignedBeaconBlock, Slot, }; pub mod custody; @@ -216,6 +216,8 @@ pub struct SyncNetworkContext { pub chain: Arc>, + fork_context: Arc, + /// Logger for the `SyncNetworkContext`. pub log: slog::Logger, } @@ -244,6 +246,7 @@ impl SyncNetworkContext { network_send: mpsc::UnboundedSender>, network_beacon_processor: Arc>, chain: Arc>, + fork_context: Arc, log: slog::Logger, ) -> Self { SyncNetworkContext { @@ -257,6 +260,7 @@ impl SyncNetworkContext { range_block_components_requests: FnvHashMap::default(), network_beacon_processor, chain, + fork_context, log, } } @@ -455,7 +459,6 @@ impl SyncNetworkContext { (None, None) }; - // TODO(pawan): this would break if a batch contains multiple epochs let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch); let info = RangeBlockComponentsRequest::new( expected_blobs, @@ -624,7 +627,7 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: RequestType::BlocksByRoot(request.into_request(&self.chain.spec)), + request: RequestType::BlocksByRoot(request.into_request(&self.fork_context)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -706,7 +709,7 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: RequestType::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request: RequestType::BlobsByRoot(request.clone().into_request(&self.fork_context)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index fefb27a5ef..a670229884 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -1,6 +1,6 @@ use lighthouse_network::rpc::methods::BlobsByRootRequest; use std::sync::Arc; -use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; +use types::{blob_sidecar::BlobIdentifier, BlobSidecar, EthSpec, ForkContext, Hash256}; use super::{ActiveRequestItems, LookupVerifyError}; @@ -11,7 +11,7 @@ pub struct BlobsByRootSingleBlockRequest { } impl BlobsByRootSingleBlockRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest { + pub fn into_request(self, spec: &ForkContext) -> BlobsByRootRequest { BlobsByRootRequest::new( self.indices .into_iter() diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs index f3cdcbe714..6d7eabf909 100644 --- a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -1,7 +1,7 @@ use beacon_chain::get_block_root; use lighthouse_network::rpc::BlocksByRootRequest; use std::sync::Arc; -use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; +use types::{EthSpec, ForkContext, Hash256, SignedBeaconBlock}; use super::{ActiveRequestItems, LookupVerifyError}; @@ -9,8 +9,8 @@ use super::{ActiveRequestItems, LookupVerifyError}; pub struct BlocksByRootSingleRequest(pub Hash256); impl BlocksByRootSingleRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![self.0], spec) + pub fn into_request(self, fork_context: &ForkContext) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![self.0], fork_context) } } diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index f772010500..341fe8667c 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -39,6 +39,7 @@ use lighthouse_network::{ use slog::info; use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; +use types::ForkContext; use types::{ data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, @@ -92,6 +93,11 @@ impl TestRig { .build(); let chain = harness.chain.clone(); + let fork_context = Arc::new(ForkContext::new::( + Slot::new(0), + chain.genesis_validators_root, + &chain.spec, + )); let (network_tx, network_rx) = mpsc::unbounded_channel(); let (sync_tx, sync_rx) = mpsc::unbounded_channel::>(); @@ -139,6 +145,7 @@ impl TestRig { SamplingConfig::Custom { required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], }, + fork_context, log.clone(), ), harness, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9177f66b94..91d64f5c8e 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -217,7 +217,7 @@ pub struct ChainSpec { pub network_id: u8, pub target_aggregators_per_committee: u64, pub gossip_max_size: u64, - pub max_request_blocks: u64, + max_request_blocks: u64, pub min_epochs_for_block_requests: u64, pub max_chunk_size: u64, pub ttfb_timeout: u64, @@ -233,19 +233,19 @@ pub struct ChainSpec { /* * Networking Deneb */ - pub max_request_blocks_deneb: u64, - pub max_request_blob_sidecars: u64, + max_request_blocks_deneb: u64, + max_request_blob_sidecars: u64, pub max_request_data_column_sidecars: u64, pub min_epochs_for_blob_sidecars_requests: u64, - pub blob_sidecar_subnet_count: u64, - pub max_blobs_per_block: u64, + blob_sidecar_subnet_count: u64, + max_blobs_per_block: u64, /* * Networking Electra */ max_blobs_per_block_electra: u64, - pub blob_sidecar_subnet_count_electra: u64, - pub max_request_blob_sidecars_electra: u64, + blob_sidecar_subnet_count_electra: u64, + max_request_blob_sidecars_electra: u64, /* * Networking Derived @@ -625,6 +625,17 @@ impl ChainSpec { } } + /// Returns the highest possible value for max_request_blocks based on enabled forks. + /// + /// This is useful for upper bounds in testing. + pub fn max_request_blocks_upper_bound(&self) -> usize { + if self.deneb_fork_epoch.is_some() { + self.max_request_blocks_deneb as usize + } else { + self.max_request_blocks as usize + } + } + pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize { if fork_name.electra_enabled() { self.max_request_blob_sidecars_electra as usize @@ -633,6 +644,17 @@ impl ChainSpec { } } + /// Returns the highest possible value for max_request_blobs based on enabled forks. + /// + /// This is useful for upper bounds in testing. + pub fn max_request_blobs_upper_bound(&self) -> usize { + if self.electra_fork_epoch.is_some() { + self.max_request_blob_sidecars_electra as usize + } else { + self.max_request_blob_sidecars as usize + } + } + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) @@ -647,6 +669,26 @@ impl ChainSpec { } } + /// Returns the `BLOB_SIDECAR_SUBNET_COUNT` at the given fork_name. + pub fn blob_sidecar_subnet_count(&self, fork_name: ForkName) -> u64 { + if fork_name.electra_enabled() { + self.blob_sidecar_subnet_count_electra + } else { + self.blob_sidecar_subnet_count + } + } + + /// Returns the highest possible value of blob sidecar subnet count based on enabled forks. + /// + /// This is useful for upper bounds for the subnet count during a given run of lighthouse. + pub fn blob_sidecar_subnet_count_max(&self) -> u64 { + if self.electra_fork_epoch.is_some() { + self.blob_sidecar_subnet_count_electra + } else { + self.blob_sidecar_subnet_count + } + } + /// Returns the number of data columns per custody group. pub fn data_columns_per_group(&self) -> u64 { self.number_of_columns From 66c6552e8cd5f20466b2717489ba91a333684361 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 30 Jan 2025 00:09:48 -0300 Subject: [PATCH 13/52] Some sync/backfill format nits (#6861) When working on unrelated changes I noted: - An unnecessary closure left by a commit of some guy named @dapplion that can be removed - match statements that can be simplified with the new let else syntax - instead of mapping a result to ignore the Ok value, return --- .../network/src/sync/backfill_sync/mod.rs | 140 ++++++++---------- 1 file changed, 63 insertions(+), 77 deletions(-) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 5703ed3504..a3d2c82642 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -388,67 +388,59 @@ impl BackFillSync { blocks: Vec>, ) -> Result { // check if we have this batch - let batch = match self.batches.get_mut(&batch_id) { - None => { - if !matches!(self.state(), BackFillState::Failed) { - // A batch might get removed when the chain advances, so this is non fatal. - debug!(self.log, "Received a block for unknown batch"; "epoch" => batch_id); - } - return Ok(ProcessResult::Successful); - } - Some(batch) => { - // A batch could be retried without the peer failing the request (disconnecting/ - // sending an error /timeout) if the peer is removed from the chain for other - // reasons. Check that this block belongs to the expected peer, and that the - // request_id matches - // TODO(das): removed peer_id matching as the node may request a different peer for data - // columns. - if !batch.is_expecting_block(&request_id) { - return Ok(ProcessResult::Successful); - } - batch + let Some(batch) = self.batches.get_mut(&batch_id) else { + if !matches!(self.state(), BackFillState::Failed) { + // A batch might get removed when the chain advances, so this is non fatal. + debug!(self.log, "Received a block for unknown batch"; "epoch" => batch_id); } + return Ok(ProcessResult::Successful); }; - { - // A stream termination has been sent. This batch has ended. Process a completed batch. - // Remove the request from the peer's active batches - self.active_requests - .get_mut(peer_id) - .map(|active_requests| active_requests.remove(&batch_id)); + // A batch could be retried without the peer failing the request (disconnecting/ + // sending an error /timeout) if the peer is removed from the chain for other + // reasons. Check that this block belongs to the expected peer, and that the + // request_id matches + // TODO(das): removed peer_id matching as the node may request a different peer for data + // columns. + if !batch.is_expecting_block(&request_id) { + return Ok(ProcessResult::Successful); + } - match batch.download_completed(blocks) { - Ok(received) => { - let awaiting_batches = - self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH; - debug!(self.log, "Completed batch received"; "epoch" => batch_id, "blocks" => received, "awaiting_batches" => awaiting_batches); + // A stream termination has been sent. This batch has ended. Process a completed batch. + // Remove the request from the peer's active batches + self.active_requests + .get_mut(peer_id) + .map(|active_requests| active_requests.remove(&batch_id)); - // pre-emptively request more blocks from peers whilst we process current blocks, - self.request_batches(network)?; - self.process_completed_batches(network) - } - Err(result) => { - let (expected_boundary, received_boundary, outcome) = match result { - Err(e) => { - return self - .fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) - .map(|_| ProcessResult::Successful); - } - Ok(v) => v, - }; - warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, + match batch.download_completed(blocks) { + Ok(received) => { + let awaiting_batches = + self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH; + debug!(self.log, "Completed batch received"; "epoch" => batch_id, "blocks" => received, "awaiting_batches" => awaiting_batches); + + // pre-emptively request more blocks from peers whilst we process current blocks, + self.request_batches(network)?; + self.process_completed_batches(network) + } + Err(result) => { + let (expected_boundary, received_boundary, outcome) = match result { + Err(e) => { + self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; + return Ok(ProcessResult::Successful); + } + Ok(v) => v, + }; + warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, "peer_id" => %peer_id, batch); - if let BatchOperationOutcome::Failed { blacklist: _ } = outcome { - error!(self.log, "Backfill failed"; "epoch" => batch_id, "received_boundary" => received_boundary, "expected_boundary" => expected_boundary); - return self - .fail_sync(BackFillError::BatchDownloadFailed(batch_id)) - .map(|_| ProcessResult::Successful); - } - // this batch can't be used, so we need to request it again. - self.retry_batch_download(network, batch_id) - .map(|_| ProcessResult::Successful) + if let BatchOperationOutcome::Failed { blacklist: _ } = outcome { + error!(self.log, "Backfill failed"; "epoch" => batch_id, "received_boundary" => received_boundary, "expected_boundary" => expected_boundary); + self.fail_sync(BackFillError::BatchDownloadFailed(batch_id))?; + return Ok(ProcessResult::Successful); } + // this batch can't be used, so we need to request it again. + self.retry_batch_download(network, batch_id)?; + Ok(ProcessResult::Successful) } } } @@ -582,20 +574,16 @@ impl BackFillSync { } }; - let peer = match batch.current_peer() { - Some(v) => *v, - None => { - return self - .fail_sync(BackFillError::BatchInvalidState( - batch_id, - String::from("Peer does not exist"), - )) - .map(|_| ProcessResult::Successful) - } + let Some(peer) = batch.current_peer() else { + self.fail_sync(BackFillError::BatchInvalidState( + batch_id, + String::from("Peer does not exist"), + ))?; + return Ok(ProcessResult::Successful); }; debug!(self.log, "Backfill batch processed"; "result" => ?result, &batch, - "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); + "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(peer)); match result { BatchProcessResult::Success { @@ -679,8 +667,8 @@ impl BackFillSync { { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; } - self.retry_batch_download(network, batch_id) - .map(|_| ProcessResult::Successful) + self.retry_batch_download(network, batch_id)?; + Ok(ProcessResult::Successful) } } } @@ -712,11 +700,10 @@ impl BackFillSync { // - AwaitingDownload -> A recoverable failed batch should have been // re-requested. // - Processing -> `self.current_processing_batch` is None - return self - .fail_sync(BackFillError::InvalidSyncState(String::from( - "Invalid expected batch state", - ))) - .map(|_| ProcessResult::Successful); + self.fail_sync(BackFillError::InvalidSyncState(String::from( + "Invalid expected batch state", + )))?; + return Ok(ProcessResult::Successful); } BatchState::AwaitingValidation(_) => { // TODO: I don't think this state is possible, log a CRIT just in case. @@ -731,12 +718,11 @@ impl BackFillSync { } } } else { - return self - .fail_sync(BackFillError::InvalidSyncState(format!( - "Batch not found for current processing target {}", - self.processing_target - ))) - .map(|_| ProcessResult::Successful); + self.fail_sync(BackFillError::InvalidSyncState(format!( + "Batch not found for current processing target {}", + self.processing_target + )))?; + return Ok(ProcessResult::Successful); } Ok(ProcessResult::Successful) } From d297d08c6b536cc1f33a7d6d5a5107eef0a9514f Mon Sep 17 00:00:00 2001 From: Janick Martinez Esturo Date: Thu, 30 Jan 2025 06:14:57 +0100 Subject: [PATCH 14/52] Increase jemalloc aarch64 page size limit (#5244) (#6831) #5244 Pass `JEMALLOC_SYS_WITH_LG_PAGE=16` env to aarch64 cross-compilation to support systems with up to 64-KiB page sizes. This is backwards-compatible for the current (most usual) 4-KiB systems. --- .cargo/config.toml | 1 - Cross.toml | 11 +++++++++++ Makefile | 10 ++++++++-- common/malloc_utils/src/jemalloc.rs | 17 ++++++++++++++++- common/malloc_utils/src/lib.rs | 4 ++-- lighthouse/src/main.rs | 14 +++++++++----- 6 files changed, 46 insertions(+), 11 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index dac0163003..a408305c4d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,4 +1,3 @@ [env] # Set the number of arenas to 16 when using jemalloc. JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" - diff --git a/Cross.toml b/Cross.toml index 871391253d..8181967f32 100644 --- a/Cross.toml +++ b/Cross.toml @@ -3,3 +3,14 @@ pre-build = ["apt-get install -y cmake clang-5.0"] [target.aarch64-unknown-linux-gnu] pre-build = ["apt-get install -y cmake clang-5.0"] + +# Allow setting page size limits for jemalloc at build time: +# For certain architectures (like aarch64), we must compile +# jemalloc with support for large page sizes, otherwise the host's +# system page size will be used, which may not work on the target systems. +# JEMALLOC_SYS_WITH_LG_PAGE=16 tells jemalloc to support up to 64-KiB +# pages. See: https://github.com/sigp/lighthouse/issues/5244 +[build.env] +passthrough = [ + "JEMALLOC_SYS_WITH_LG_PAGE", +] diff --git a/Makefile b/Makefile index 0f08afd168..81477634fe 100644 --- a/Makefile +++ b/Makefile @@ -63,12 +63,18 @@ install-lcli: build-x86_64: cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked + # JEMALLOC_SYS_WITH_LG_PAGE=16 tells jemalloc to support up to 64-KiB + # pages, which are commonly used by aarch64 systems. + # See: https://github.com/sigp/lighthouse/issues/5244 + JEMALLOC_SYS_WITH_LG_PAGE=16 cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-lcli-x86_64: cross build --bin lcli --target x86_64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked build-lcli-aarch64: - cross build --bin lcli --target aarch64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked + # JEMALLOC_SYS_WITH_LG_PAGE=16 tells jemalloc to support up to 64-KiB + # pages, which are commonly used by aarch64 systems. + # See: https://github.com/sigp/lighthouse/issues/5244 + JEMALLOC_SYS_WITH_LG_PAGE=16 cross build --bin lcli --target aarch64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index 0e2e00cb0e..f3a35fc41c 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -9,7 +9,7 @@ //! B) `_RJEM_MALLOC_CONF` at runtime. use metrics::{set_gauge, try_create_int_gauge, IntGauge}; use std::sync::LazyLock; -use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; +use tikv_jemalloc_ctl::{arenas, epoch, stats, Access, AsName, Error}; #[global_allocator] static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; @@ -52,3 +52,18 @@ pub fn scrape_jemalloc_metrics_fallible() -> Result<(), Error> { Ok(()) } + +pub fn page_size() -> Result { + // Full list of keys: https://jemalloc.net/jemalloc.3.html + "arenas.page\0".name().read() +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn page_size_ok() { + assert!(page_size().is_ok()); + } +} diff --git a/common/malloc_utils/src/lib.rs b/common/malloc_utils/src/lib.rs index 3bb242369f..50d2785a74 100644 --- a/common/malloc_utils/src/lib.rs +++ b/common/malloc_utils/src/lib.rs @@ -29,10 +29,10 @@ not(target_env = "musl"), not(feature = "jemalloc") ))] -mod glibc; +pub mod glibc; #[cfg(feature = "jemalloc")] -mod jemalloc; +pub mod jemalloc; pub use interface::*; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 43c5e1107c..dd7401d49e 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -66,11 +66,15 @@ fn bls_hardware_acceleration() -> bool { return std::arch::is_aarch64_feature_detected!("neon"); } -fn allocator_name() -> &'static str { - if cfg!(target_os = "windows") { - "system" - } else { - "jemalloc" +fn allocator_name() -> String { + #[cfg(target_os = "windows")] + { + "system".to_string() + } + #[cfg(not(target_os = "windows"))] + match malloc_utils::jemalloc::page_size() { + Ok(page_size) => format!("jemalloc ({}K)", page_size / 1024), + Err(e) => format!("jemalloc (error: {e:?})"), } } From 1fe0ac72be6e099a4e28994c81e62bd9ccd64dae Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 30 Jan 2025 17:22:59 +1100 Subject: [PATCH 15/52] Underflow and Typo (#6885) I was looking at sync and noticed a potential underflow and a typo, so just fixed those whilst I was in there. --- beacon_node/network/src/sync/peer_sync_info.rs | 4 ++-- beacon_node/network/src/sync/range_sync/chain_collection.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs index c01366f1be..5ea1533d35 100644 --- a/beacon_node/network/src/sync/peer_sync_info.rs +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -30,8 +30,8 @@ pub fn remote_sync_type( ) -> PeerSyncType { // auxiliary variables for clarity: Inclusive boundaries of the range in which we consider a peer's // head "near" ours. - let near_range_start = local.head_slot - SLOT_IMPORT_TOLERANCE as u64; - let near_range_end = local.head_slot + SLOT_IMPORT_TOLERANCE as u64; + let near_range_start = local.head_slot.saturating_sub(SLOT_IMPORT_TOLERANCE); + let near_range_end = local.head_slot.saturating_add(SLOT_IMPORT_TOLERANCE); match remote.finalized_epoch.cmp(&local.finalized_epoch) { Ordering::Less => { diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index c030d0a19e..16dadb3660 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -86,7 +86,7 @@ impl ChainCollection { RangeSyncState::Head(syncing_head_ids) }; } else { - // we removed a head chain, or an stoped finalized chain + // we removed a head chain, or a stopped finalized chain debug_assert!(!was_syncing || sync_type != RangeSyncType::Finalized); } } From 7d54a43243905b62e1ced8f56cd5ad0575b8638b Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 30 Jan 2025 04:01:32 -0300 Subject: [PATCH 16/52] Make range sync chain Id sequential (#6868) Currently, we set the `chain_id` of range sync chains to `u64(hash(target_root, target_slot))`, which results in a long integer. ``` Jan 27 00:43:27.246 DEBG Batch downloaded, chain: 4223372036854775807, awaiting_batches: 0, batch_state: [p,E,E,E,E], blocks: 0, epoch: 0, service: range_sync ``` Instead, we can use `network_context.next_id()` as we do for all other sync items and get a unique sequential (not too big) integer as id. ``` Jan 27 00:43:27.246 DEBG Batch downloaded, chain: 4, awaiting_batches: 0, batch_state: [p,E,E,E,E], blocks: 0, epoch: 0, service: range_sync ``` Also, if a specific chain for the same target is retried later, it won't get the same ID so we can more clearly differentiate the logs associated with each attempt. --- .../network/src/sync/range_sync/chain.rs | 19 +++++------- .../src/sync/range_sync/chain_collection.rs | 31 +++++++++++-------- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 51d9d9da37..4eb73f5483 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -15,7 +15,6 @@ use rand::seq::SliceRandom; use rand::Rng; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; -use std::hash::{Hash, Hasher}; use strum::IntoStaticStr; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -56,7 +55,7 @@ pub enum RemoveChain { pub struct KeepChain; /// A chain identifier -pub type ChainId = u64; +pub type ChainId = Id; pub type BatchId = Epoch; #[derive(Debug, Copy, Clone, IntoStaticStr)] @@ -127,14 +126,9 @@ pub enum ChainSyncingState { } impl SyncingChain { - pub fn id(target_root: &Hash256, target_slot: &Slot) -> u64 { - let mut hasher = std::collections::hash_map::DefaultHasher::new(); - (target_root, target_slot).hash(&mut hasher); - hasher.finish() - } - #[allow(clippy::too_many_arguments)] pub fn new( + id: Id, start_epoch: Epoch, target_head_slot: Slot, target_head_root: Hash256, @@ -145,8 +139,6 @@ impl SyncingChain { let mut peers = FnvHashMap::default(); peers.insert(peer_id, Default::default()); - let id = SyncingChain::::id(&target_head_root, &target_head_slot); - SyncingChain { id, chain_type, @@ -165,6 +157,11 @@ impl SyncingChain { } } + /// Returns true if this chain has the same target + pub fn has_same_target(&self, target_head_slot: Slot, target_head_root: Hash256) -> bool { + self.target_head_slot == target_head_slot && self.target_head_root == target_head_root + } + /// Check if the chain has peers from which to process batches. pub fn available_peers(&self) -> usize { self.peers.len() @@ -1258,7 +1255,7 @@ impl slog::KV for SyncingChain { serializer: &mut dyn slog::Serializer, ) -> slog::Result { use slog::Value; - serializer.emit_u64("id", self.id)?; + serializer.emit_u32("id", self.id)?; Value::serialize(&self.start_epoch, record, "from", serializer)?; Value::serialize( &self.target_head_slot.epoch(T::EthSpec::slots_per_epoch()), diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 16dadb3660..15bdf85e20 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -9,6 +9,7 @@ use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; +use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use slog::{crit, debug, error}; @@ -29,9 +30,9 @@ const MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS: u64 = 10; #[derive(Clone)] pub enum RangeSyncState { /// A finalized chain is being synced. - Finalized(u64), + Finalized(Id), /// There are no finalized chains and we are syncing one more head chains. - Head(SmallVec<[u64; PARALLEL_HEAD_CHAINS]>), + Head(SmallVec<[Id; PARALLEL_HEAD_CHAINS]>), /// There are no head or finalized chains and no long range sync is in progress. Idle, } @@ -74,7 +75,7 @@ impl ChainCollection { if syncing_id == id { // the finalized chain that was syncing was removed debug_assert!(was_syncing && sync_type == RangeSyncType::Finalized); - let syncing_head_ids: SmallVec<[u64; PARALLEL_HEAD_CHAINS]> = self + let syncing_head_ids: SmallVec<[Id; PARALLEL_HEAD_CHAINS]> = self .head_chains .iter() .filter(|(_id, chain)| chain.is_syncing()) @@ -355,7 +356,7 @@ impl ChainCollection { .collect::>(); preferred_ids.sort_unstable(); - let mut syncing_chains = SmallVec::<[u64; PARALLEL_HEAD_CHAINS]>::new(); + let mut syncing_chains = SmallVec::<[Id; PARALLEL_HEAD_CHAINS]>::new(); for (_, _, id) in preferred_ids { let chain = self.head_chains.get_mut(&id).expect("known chain"); if syncing_chains.len() < PARALLEL_HEAD_CHAINS { @@ -465,15 +466,17 @@ impl ChainCollection { sync_type: RangeSyncType, network: &mut SyncNetworkContext, ) { - let id = SyncingChain::::id(&target_head_root, &target_head_slot); let collection = if let RangeSyncType::Finalized = sync_type { &mut self.finalized_chains } else { &mut self.head_chains }; - match collection.entry(id) { - Entry::Occupied(mut entry) => { - let chain = entry.get_mut(); + + match collection + .iter_mut() + .find(|(_, chain)| chain.has_same_target(target_head_slot, target_head_root)) + { + Some((&id, chain)) => { debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, &chain); debug_assert_eq!(chain.target_head_root, target_head_root); debug_assert_eq!(chain.target_head_slot, target_head_slot); @@ -483,13 +486,16 @@ impl ChainCollection { } else { error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } - let chain = entry.remove(); - self.on_chain_removed(&id, chain.is_syncing(), sync_type); + let is_syncing = chain.is_syncing(); + collection.remove(&id); + self.on_chain_removed(&id, is_syncing, sync_type); } } - Entry::Vacant(entry) => { + None => { let peer_rpr = peer.to_string(); + let id = network.next_id(); let new_chain = SyncingChain::new( + id, start_epoch, target_head_slot, target_head_root, @@ -497,9 +503,8 @@ impl ChainCollection { sync_type.into(), &self.log, ); - debug_assert_eq!(new_chain.get_id(), id); debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); - entry.insert(new_chain); + collection.insert(id, new_chain); metrics::inc_counter_vec(&metrics::SYNCING_CHAINS_ADDED, &[sync_type.as_str()]); self.update_metrics(); } From 70194dfc6a3f4d10c9059610f889ff5a4e863a6a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 30 Jan 2025 18:01:34 +1100 Subject: [PATCH 17/52] Implement PeerDAS Fulu fork activation (#6795) Addresses #6706 This PR activates PeerDAS at the Fulu fork epoch instead of `EIP_7594_FORK_EPOCH`. This means we no longer support testing PeerDAS with Deneb / Electrs, as it's now part of a hard fork. --- beacon_node/beacon_chain/src/beacon_chain.rs | 51 +++ .../src/data_column_verification.rs | 2 +- beacon_node/beacon_chain/src/errors.rs | 4 + .../beacon_chain/src/fulu_readiness.rs | 11 +- beacon_node/beacon_chain/src/kzg_utils.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 221 +++++++++++-- .../fixtures/test_data_column_sidecars.ssz | Bin 0 -> 320512 bytes .../tests/attestation_production.rs | 15 +- .../beacon_chain/tests/block_verification.rs | 292 +++++++++++------- beacon_node/beacon_chain/tests/store_tests.rs | 63 +++- beacon_node/beacon_processor/src/lib.rs | 2 +- .../execution_layer/src/engine_api/http.rs | 30 +- .../src/test_utils/handle_rpc.rs | 43 ++- beacon_node/http_api/src/test_utils.rs | 21 +- .../tests/broadcast_validation_tests.rs | 126 ++++---- .../lighthouse_network/src/discovery/enr.rs | 8 +- .../lighthouse_network/src/rpc/codec.rs | 40 +-- .../lighthouse_network/src/rpc/protocol.rs | 15 +- .../lighthouse_network/src/types/globals.rs | 4 +- .../lighthouse_network/src/types/pubsub.rs | 22 +- .../src/network_beacon_processor/mod.rs | 5 + .../src/network_beacon_processor/tests.rs | 25 +- beacon_node/network/src/service.rs | 53 ++-- .../src/sync/block_sidecar_coupling.rs | 4 +- .../network/src/sync/network_context.rs | 2 + beacon_node/network/src/sync/tests/lookups.rs | 64 ++-- beacon_node/network/src/sync/tests/range.rs | 243 ++++++++++++--- beacon_node/store/src/hot_cold_store.rs | 61 +++- beacon_node/store/src/metadata.rs | 4 +- .../mainnet/config.yaml | 2 - consensus/fork_choice/tests/tests.rs | 2 +- consensus/types/presets/gnosis/deneb.yaml | 2 - consensus/types/presets/gnosis/eip7594.yaml | 10 - consensus/types/presets/gnosis/fulu.yaml | 9 +- consensus/types/presets/mainnet/eip7594.yaml | 10 - consensus/types/presets/mainnet/fulu.yaml | 9 +- consensus/types/presets/minimal/eip7594.yaml | 10 - consensus/types/presets/minimal/fulu.yaml | 9 +- consensus/types/src/chain_spec.rs | 51 +-- consensus/types/src/data_column_sidecar.rs | 14 - consensus/types/src/fork_name.rs | 7 + consensus/types/src/preset.rs | 20 +- crypto/kzg/src/lib.rs | 3 + scripts/local_testnet/network_params_das.yaml | 8 +- testing/ef_tests/check_all_files_accessed.py | 7 +- testing/ef_tests/src/cases.rs | 3 + .../compute_columns_for_custody_groups.rs | 8 +- .../ef_tests/src/cases/get_custody_groups.rs | 8 +- .../cases/kzg_compute_cells_and_kzg_proofs.rs | 8 +- .../cases/kzg_recover_cells_and_kzg_proofs.rs | 8 +- .../cases/kzg_verify_cell_kzg_proof_batch.rs | 8 +- testing/ef_tests/src/handler.rs | 31 +- testing/ef_tests/src/type_name.rs | 11 + testing/ef_tests/tests/tests.rs | 75 +++-- 54 files changed, 1126 insertions(+), 640 deletions(-) create mode 100644 beacon_node/beacon_chain/src/test_utils/fixtures/test_data_column_sidecars.ssz delete mode 100644 consensus/types/presets/gnosis/eip7594.yaml delete mode 100644 consensus/types/presets/mainnet/eip7594.yaml delete mode 100644 consensus/types/presets/minimal/eip7594.yaml diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d0c294b44f..ca21b519f1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -34,6 +34,7 @@ use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, Prep use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; +use crate::kzg_utils::reconstruct_blobs; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, }; @@ -1249,6 +1250,55 @@ impl BeaconChain { self.store.get_blobs(block_root).map_err(Error::from) } + /// Returns the data columns at the given root, if any. + /// + /// ## Errors + /// May return a database error. + pub fn get_data_columns( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + self.store.get_data_columns(block_root).map_err(Error::from) + } + + /// Returns the blobs at the given root, if any. + /// + /// Uses the `block.epoch()` to determine whether to retrieve blobs or columns from the store. + /// + /// If at least 50% of columns are retrieved, blobs will be reconstructed and returned, + /// otherwise an error `InsufficientColumnsToReconstructBlobs` is returned. + /// + /// ## Errors + /// May return a database error. + pub fn get_or_reconstruct_blobs( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + let Some(block) = self.store.get_blinded_block(block_root)? else { + return Ok(None); + }; + + if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + if let Some(columns) = self.store.get_data_columns(block_root)? { + let num_required_columns = self.spec.number_of_columns / 2; + let reconstruction_possible = columns.len() >= num_required_columns as usize; + if reconstruction_possible { + reconstruct_blobs(&self.kzg, &columns, None, &block, &self.spec) + .map(Some) + .map_err(Error::FailedToReconstructBlobs) + } else { + Err(Error::InsufficientColumnsToReconstructBlobs { + columns_found: columns.len(), + }) + } + } else { + Ok(None) + } + } else { + self.get_blobs(block_root).map(|b| b.blobs()) + } + } + /// Returns the data columns at the given root, if any. /// /// ## Errors @@ -5850,6 +5900,7 @@ impl BeaconChain { let kzg = self.kzg.as_ref(); + // TODO(fulu): we no longer need blob proofs from PeerDAS and could avoid computing. kzg_utils::validate_blobs::( kzg, expected_kzg_commitments, diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 1bd17485ab..565e76704e 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -699,7 +699,7 @@ mod test { #[tokio::test] async fn empty_data_column_sidecars_fails_validation() { - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec.into()) .deterministic_keypairs(64) diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2a8fd4cd01..2e13ab4090 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -226,6 +226,10 @@ pub enum BeaconChainError { EmptyRpcCustodyColumns, AttestationError(AttestationError), AttestationCommitteeIndexNotSet, + InsufficientColumnsToReconstructBlobs { + columns_found: usize, + }, + FailedToReconstructBlobs(String), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/fulu_readiness.rs b/beacon_node/beacon_chain/src/fulu_readiness.rs index 71494623f8..872fe58f2b 100644 --- a/beacon_node/beacon_chain/src/fulu_readiness.rs +++ b/beacon_node/beacon_chain/src/fulu_readiness.rs @@ -1,7 +1,7 @@ //! Provides tools for checking if a node is ready for the Fulu upgrade. use crate::{BeaconChain, BeaconChainTypes}; -use execution_layer::http::{ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V5}; +use execution_layer::http::{ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V4}; use serde::{Deserialize, Serialize}; use std::fmt; use std::time::Duration; @@ -87,14 +87,15 @@ impl BeaconChain { Ok(capabilities) => { let mut missing_methods = String::from("Required Methods Unsupported:"); let mut all_good = true; - if !capabilities.get_payload_v5 { + // TODO(fulu) switch to v5 when the EL is ready + if !capabilities.get_payload_v4 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_GET_PAYLOAD_V5); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V4); all_good = false; } - if !capabilities.new_payload_v5 { + if !capabilities.new_payload_v4 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_NEW_PAYLOAD_V5); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V4); all_good = false; } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index dcb3864f78..06cce14144 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -186,7 +186,7 @@ pub fn blobs_to_data_column_sidecars( .map_err(DataColumnSidecarError::BuildSidecarFailed) } -fn build_data_column_sidecars( +pub(crate) fn build_data_column_sidecars( kzg_commitments: KzgCommitments, kzg_commitments_inclusion_proof: FixedVector, signed_block_header: SignedBeaconBlockHeader, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index ba0a2159da..e88ce71a7b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,8 +1,9 @@ +use crate::blob_verification::GossipVerifiedBlob; use crate::block_verification_types::{AsBlock, RpcBlock}; -use crate::kzg_utils::blobs_to_data_column_sidecars; +use crate::data_column_verification::CustodyDataColumn; +use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; -use crate::BeaconBlockResponseWrapper; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, @@ -16,6 +17,7 @@ use crate::{ BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, StateSkipConfig, }; +use crate::{get_block_root, BeaconBlockResponseWrapper}; use bls::get_withdrawal_credentials; use eth2::types::SignedBlockContentsTuple; use execution_layer::test_utils::generate_genesis_header; @@ -74,6 +76,11 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Environment variable to read if `ci_logger` feature is enabled. pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR"; +// Pre-computed data column sidecar using a single static blob from: +// `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz` +const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = + include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz"); + // Default target aggregators to set during testing, this ensures an aggregator at each slot. // // You should mutate the `ChainSpec` prior to initialising the harness if you would like to use @@ -105,7 +112,7 @@ static KZG_NO_PRECOMP: LazyLock> = LazyLock::new(|| { }); pub fn get_kzg(spec: &ChainSpec) -> Arc { - if spec.eip7594_fork_epoch.is_some() { + if spec.fulu_fork_epoch.is_some() { KZG_PEERDAS.clone() } else if spec.deneb_fork_epoch.is_some() { KZG.clone() @@ -224,6 +231,7 @@ pub struct Builder { mock_execution_layer: Option>, testing_slot_clock: Option, validator_monitor_config: Option, + import_all_data_columns: bool, runtime: TestRuntime, log: Logger, } @@ -366,6 +374,7 @@ where mock_execution_layer: None, testing_slot_clock: None, validator_monitor_config: None, + import_all_data_columns: false, runtime, log, } @@ -458,6 +467,11 @@ where self } + pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { + self.import_all_data_columns = import_all_data_columns; + self + } + pub fn execution_layer_from_url(mut self, url: &str) -> Self { assert!( self.execution_layer.is_none(), @@ -575,6 +589,7 @@ where .expect("should build dummy backend") .shutdown_sender(shutdown_tx) .chain_config(chain_config) + .import_all_data_columns(self.import_all_data_columns) .event_handler(Some(ServerSentEventHandler::new_with_capacity( log.clone(), 5, @@ -762,15 +777,13 @@ where pub fn get_head_block(&self) -> RpcBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); - RpcBlock::new(Some(block_root), block, blobs).unwrap() + self.build_rpc_block_from_store_blobs(Some(block_root), block) } pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - let blobs = self.chain.get_blobs(block_root).unwrap().blobs(); - RpcBlock::new(Some(*block_root), Arc::new(full_block), blobs).unwrap() + self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) } pub fn get_all_validators(&self) -> Vec { @@ -2271,22 +2284,19 @@ where self.set_current_slot(slot); let (block, blob_items) = block_contents; - let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) - .transpose() - .unwrap(); + let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), block, sidecars).unwrap(), + rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), ) .await? .try_into() - .unwrap(); + .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2297,16 +2307,13 @@ where ) -> Result { let (block, blob_items) = block_contents; - let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) - .transpose() - .unwrap(); let block_root = block.canonical_root(); + let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), block, sidecars).unwrap(), + rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), @@ -2318,6 +2325,75 @@ where Ok(block_hash) } + /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from + /// the database. + pub fn build_rpc_block_from_store_blobs( + &self, + block_root: Option, + block: Arc>, + ) -> RpcBlock { + let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + let has_blobs = block + .message() + .body() + .blob_kzg_commitments() + .is_ok_and(|c| !c.is_empty()); + if !has_blobs { + return RpcBlock::new_without_blobs(Some(block_root), block); + } + + // Blobs are stored as data columns from Fulu (PeerDAS) + if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + let columns = self.chain.get_data_columns(&block_root).unwrap().unwrap(); + let custody_columns = columns + .into_iter() + .map(CustodyDataColumn::from_asserted_custody) + .collect::>(); + RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, &self.spec) + .unwrap() + } else { + let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); + RpcBlock::new(Some(block_root), block, blobs).unwrap() + } + } + + /// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`. + fn build_rpc_block_from_blobs( + &self, + block_root: Hash256, + block: Arc>>, + blob_items: Option<(KzgProofs, BlobsList)>, + ) -> Result, BlockError> { + Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + let sampling_column_count = self + .chain + .data_availability_checker + .get_sampling_column_count(); + + if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) { + // Note: this method ignores the actual custody columns and just take the first + // `sampling_column_count` for testing purpose only, because the chain does not + // currently have any knowledge of the columns being custodied. + let columns = generate_data_column_sidecars_from_block(&block, &self.spec) + .into_iter() + .take(sampling_column_count) + .map(CustodyDataColumn::from_asserted_custody) + .collect::>(); + RpcBlock::new_with_custody_columns(Some(block_root), block, columns, &self.spec)? + } else { + RpcBlock::new_without_blobs(Some(block_root), block) + } + } else { + let blobs = blob_items + .map(|(proofs, blobs)| { + BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec) + }) + .transpose() + .unwrap(); + RpcBlock::new(Some(block_root), block, blobs)? + }) + } + pub fn process_attestations(&self, attestations: HarnessAttestations) { let num_validators = self.validator_keypairs.len(); let mut unaggregated = Vec::with_capacity(num_validators); @@ -2991,6 +3067,56 @@ where Ok(()) } + + /// Simulate some of the blobs / data columns being seen on gossip. + /// Converts the blobs to data columns if the slot is Fulu or later. + pub async fn process_gossip_blobs_or_columns<'a>( + &self, + block: &SignedBeaconBlock, + blobs: impl Iterator>, + proofs: impl Iterator, + custody_columns_opt: Option>, + ) { + let is_peerdas_enabled = self.chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); + if is_peerdas_enabled { + let custody_columns = custody_columns_opt.unwrap_or_else(|| { + let sampling_column_count = self + .chain + .data_availability_checker + .get_sampling_column_count() as u64; + (0..sampling_column_count).collect() + }); + + let verified_columns = generate_data_column_sidecars_from_block(block, &self.spec) + .into_iter() + .filter(|c| custody_columns.contains(&c.index)) + .map(|sidecar| { + let column_index = sidecar.index; + self.chain + .verify_data_column_sidecar_for_gossip(sidecar, column_index) + }) + .collect::, _>>() + .unwrap(); + + if !verified_columns.is_empty() { + self.chain + .process_gossip_data_columns(verified_columns, || Ok(())) + .await + .unwrap(); + } + } else { + for (i, (kzg_proof, blob)) in proofs.into_iter().zip(blobs).enumerate() { + let sidecar = + Arc::new(BlobSidecar::new(i, blob.clone(), block, *kzg_proof).unwrap()); + let gossip_blob = GossipVerifiedBlob::new(sidecar, i as u64, &self.chain) + .expect("should obtain gossip verified blob"); + self.chain + .process_gossip_blob(gossip_blob) + .await + .expect("should import valid gossip verified blob"); + } + } + } } // Junk `Debug` impl to satistfy certain trait bounds during testing. @@ -3176,10 +3302,59 @@ pub fn generate_rand_block_and_data_columns( SignedBeaconBlock>, DataColumnSidecarList, ) { - let kzg = get_kzg(spec); - let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); - let blob_refs = blobs.iter().map(|b| &b.blob).collect::>(); - let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap(); - + let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); + let data_columns = generate_data_column_sidecars_from_block(&block, spec); (block, data_columns) } + +/// Generate data column sidecars from pre-computed cells and proofs. +fn generate_data_column_sidecars_from_block( + block: &SignedBeaconBlock, + spec: &ChainSpec, +) -> DataColumnSidecarList { + let kzg_commitments = block.message().body().blob_kzg_commitments().unwrap(); + if kzg_commitments.is_empty() { + return vec![]; + } + + let kzg_commitments_inclusion_proof = block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(); + let signed_block_header = block.signed_block_header(); + + // load the precomputed column sidecar to avoid computing them for every block in the tests. + let template_data_columns = RuntimeVariableList::>::from_ssz_bytes( + TEST_DATA_COLUMN_SIDECARS_SSZ, + spec.number_of_columns as usize, + ) + .unwrap(); + + let (cells, proofs) = template_data_columns + .into_iter() + .map(|sidecar| { + let DataColumnSidecar { + column, kzg_proofs, .. + } = sidecar; + // There's only one cell per column for a single blob + let cell_bytes: Vec = column.into_iter().next().unwrap().into(); + let kzg_cell = cell_bytes.try_into().unwrap(); + let kzg_proof = kzg_proofs.into_iter().next().unwrap(); + (kzg_cell, kzg_proof) + }) + .collect::<(Vec<_>, Vec<_>)>(); + + // Repeat the cells and proofs for every blob + let blob_cells_and_proofs_vec = + vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()]; + + build_data_column_sidecars( + kzg_commitments.clone(), + kzg_commitments_inclusion_proof, + signed_block_header, + blob_cells_and_proofs_vec, + spec, + ) + .unwrap() +} diff --git a/beacon_node/beacon_chain/src/test_utils/fixtures/test_data_column_sidecars.ssz b/beacon_node/beacon_chain/src/test_utils/fixtures/test_data_column_sidecars.ssz new file mode 100644 index 0000000000000000000000000000000000000000..112dd43b0474b1d1263d951429f7865f24bf1033 GIT binary patch literal 320512 zcmeGDMNn8xv<7N7PLSa4?i$?PJ-E9QT!RO9hv04@xVyW%y9Embf)nmnf7PA*vr~1? z?5wJu?p?cju%BLUF8~R^AR+(}SO6%H0Dwvdz#=;U83F+4O94Qt0>GgT0Oe)?*gF8g z>IuMIAONjV0QjZ=Ad(9JbU6T{bpXV40H87i0Okw;tE&KH?*m|b0RZJa0H^N&RDFN| zP8bk?g9rjVQbT}FRtONt2LU7`AOO4)1enx<00|}#K-~@k;JQPAO@9cG9{~Z(lOO|VoNU%!@ z35uB^fi({#U=)J{*Yc2{Q3Dcq89@R8oBuxE6%q{kLV}1eNT84a2~aa3!D104$oL5f z^jjbSX&)pw9ESwui;%#68xpXdLV~*+NYMHM34CFofCvf{fX0IYqvTK^h7k&=a6th~ z5h$=K3k9-2Ljhw0C_rfi1x}ryK$Q;^a0-C}9C1+KF&zqY7C?c(A5cJ|2@1gXK!M3o zD3CA@1=Kg80PZmq*!&Fz@}Ho9ITSRYMTQ0!*wCPs6dJhELjxWTXz(fs4SJ=aL5M0e zkkNw%NEXmw_6syf@qz~0!O(y(1{&<9LWANwXkc9d4Hz4s!F4AzXdH$HUbE0ZU>zF# zzhw>qfcXaiQvO^2UsvjB)S7n4^Ob8)%4dfN7gM$PMiF{W^|jbqbmjEAE#CjH-}nFj zJa0PIj-~c0@y?NqE7{7R(BB>%{1WY^ZA4+!nz`~#oTSaXDsTjNw*ez!Vw*mP-4-I} zZ8M6JRV3O^cYir>>NYgdl)&3!-=lsc!>#UEc~WHMMcw&qh5fd%ty{E#^JFunu?@rS zMG(Jcclv!)@Ql`vKFIyAcey>H3Gd`rTY!>=+{|+_%un^FFb2YP)EtNEwzKi?!g^MZ zH!r4NRv-`Uai?}1zB~vew_QeZc=X70NAF7nTGeaZ?f&2L_@pK6Db>WdFoMu$Y*Ppsmfrm$-31oqD=h$!DfCa9| zSDqK@cHtu(neS!Dp{A~fPvp(y}ZWb2aH|7fO6(DL2q~v zl`w*>@VBRrU}74_Y;{9NjYmn5L@e25DPe}9aaw<3P?<87Z5y8hd^!u6s#&&N_yI@% z?!a{fe z;SXK8udT^5pMY+*5v=$!jxJFs~0l>n@-?;%cq< zX!{1|TWfTaU)}BJu!X_8MftHPNCX1L8%W1D z)%|)TMY82=(vnwqP?v)2Xv{SZ!ZCP zM@vAG3s74k+}K9BYKY319O|$^7>BtRrIMcYb$wA?oPuuc15m|r96b-J;!3VQdoQ(G zAE(%4dG!%Y`Ri!1-ZEZW!S&ESq_%L)AO*Jx^muTx3-fE1@2noCW2`z0^p&DH2uo-& zgwJih$D+FDXk5H{@+}uNYxTiKp3VvE1QOwtxt;L>oU22lCXC(k9;i92HyKqxJ=UpLGf7Ff*myk{ zyCAEt_w!KT!}kkT$(b{K(c&(Euw0Vm;vgX`d|ojO%TErM;t*cPf3c$1J5fDs%}WDx z;p@nD@~w|x9$4lWMlImJkGfm5cBH*vwtQht0{eglj^jB%ffD*TAV~@HDi-qkSq-=Q zlQ*}a(E#PH-Y|F|?tEr)kDAh^VaL%-Jmga0g(+WsB7(2{(|_cwxdKop@Nw0I8HG_T zLRgOT{|3y*9><;vy!m()q@e|mk%4pH8$Bi3rz~!e_JODL;L-V`_IzpRr@9|8I9s`O zJ^=IBOVR5RC+5>S_Y5gfj_;jPw}n5Nj;hH`Vjl{ zO$cFrk!VWN6e`Q_6S(q7=`hDb!M?%c7_@-Hq$W_l^XOp-CeiPT!PYV-2M2-|-pQBZ zW+XZ{%BY^TZbgYD!qRmrORTvxadb9>AjE_8;LM$&(o>Qsl(VN@R9vJ!lHk;RAM%$< zaczVcK#Aw&U5JiV$<~`|DS1iBvXziFFZ8n6qg)XAr7O4xk|@wii3a^1GI{XFq{5)B z>w1dxV>)v5zK-CSW}MZhbFX>e9t#fAFUC59_U6G#g`Nnh=& zja%u2)+-HIo0MJuLZWw6NX5}_b6n?Vm0ZS+wZ3a?s>U+$$?3Zx?y4NGlzHsFWF&ro#v`Ei z8a8y8EZ8Z1VyD|i2yE@KJF|!j#Td`y7U_U&^al_O37OUEjlY^TfLA}wvn~2EA*+H! zICC<(O_Snf8@N{S{nnSv{+S;Q*Og@yhko@K>aSlQU|mGqyJZP(1JID2JebT`kr1Rc zHg28r-hpJ48GCG?b&XC6WaFJzM-u7+Xfb48etg=~g<%vc z%*x{YCmRO)nQmMIAm_xWJQ(<3*lAx?3g#ualM^-sSPOsa$w!@{<>IU zVTH|jP|spyK1@qE7!JOG0gu7I=yD4q__z-Yka)JBUm(~# zq$4$J)rsv}jD-#svciwwz-7IwM@5_dQ%Gap9xI9pWHRks@LdsX3-p+2cV5r$=->pRVy@BXdMO z=iX}*+t`}BW}yqS52n;?QZ=}zhqD_L_ZG&>UG?`*7q~aANf+>-Yi+N*Np2j%AKR{VZB(p@LXHuvrEz$71P2|Y<6%)+YlauCjJ1}d z$I|VBS{{kYc=d@g67mm^IuV!k%IE*+_W!H@Gu;28;lAI8v;11;%NSNHN z@H9gg)c=7meQS`C4TRttuh|!x2-Y>vGymRC?BT4MRCMML?-5gfYZ4wJ181Cg_}Jq9 z>;7Lp-UY1wP>HIXhbkUcBN{TfeZIAB0(dAiL-BtIg(f_Ig?$-qj50~hwrTk5PBT!G z6n1QC463*ge|c7|2l(oV{Astm;fa?WICx%{N-AaygmEf*1G3d6Rp$GAzi5#)CAJ%o z^Pl8eLkaKu{R9x+;nur$06G3h_6HiIK4~@yY0E;=yGZCeb*>~2*ww?3bb6KyAb~#283*Yaz&1-fRYzY>q=G}?&Ck%rgH;j;W+g_U~g@~}(+fNyq26~NnU&C@B< zQQeR#t}^qwbvb}}We~YS%|HZlp+pS667l`r#;qc=s54eJj%iw*M9$Ebgb}Spchf*z ze@2j5lsxMIG)3*^}P0ieGska9Q>$ zJuY@)S>4v-TR{6<+u312Cyq)>)V@jFZy?>La{aeLt9Tq%*#Z{p=KCVVu z#>DLxPy2Z~4hxO~B<|rj8LaUuJennl&D{e`Erk{RLe|)> zFYZ9KGSNeBxsOoya0#Kt7xbMmZ`y~LL7OF{Qf3-us49?W5=LUQ0`a3*Pw~~|i^MRq zOrw@5wUUw)6`Xe$H5b71`n4|yJR3Tc$pxngEI*9j?$Cd$5CRk4ty>rb z3CNO1m=Od2ypnvH?ka~m==*Ix)=hfJ{C9|lE}a&@qes!>uGcaS-K6PMcTjKqYF2@{ zXiZq*dJ7m`)ehZBXx;c9S@we%q=p23eP&?cZB<8nfCJ*DDZ3fZj8OcY`#J`T*wlk^2k7#UBktLN{XfcPN{8S#~siDBv;iB z+an;GK%723*aZ_u0g8wMMw zKs?EtOFx6oOuXizMB{5m88);%E7IqU^^yR~Yhdan7JTC!3Fe+n^frT;##sMa_AVa-8x0gji# zn6Kho$|$oMREpVB;#A^sI@+(KN(t5l|E5F=LFaT_VcSP zA-ppe-`Hn5klQTyd=>MW8uQtPTPvzx5F&2y1F9qLvE$+nZ9g^_z{nF4B`S!WqL8BR zztndz&X1`2eq2u3GrQny-j{|6;s+JDzkm4seN1a%I<_t5vs@q!v05P>S_$BK*6ODR z6dUlCoadG>nhUW+1M!~y?}n|U8frmqF{)U(6Zbr| zx#zy&pUsgQTP$*d2MbzgB=K@m&tE{V_NUbq?JN5it=hB+!Ab4AaThe$IOG77=`GPn zItB2?^Y10JVN8!{;X(A&K|{`1d-BGuK9r=jxQSZpS30P8{%~CNcQW=6>*?0#c8b@j zdK|hb!#B-J!FLB<-5(tM-QoD{VrJb`N0EIM|O!Q&hkjWR{3d)#1$yYw%FKa zu5HYRDkLf>9{T6bf^8-StCutV*#FdUvH68+gDFz{>6w0x zwVF5S>|w%oj^a^67xgb8Uj(BN*g|Q=**dhC${l^~yRmz z36fI1jls9-5%Efke&$NnUUXP$M^#$$+pPO<{dvjk2i#>bNFZ+Am}uBz%;x*jZyOco zh<`WwSz)`mNfmWjvqiYfaG)W-YJq1u3K`eda&UGCcRLD4yA1XH2UI>6XYS9bzDk zrO(+})(yvW!e|}DmI(wPU4&=+O2;6lUGpTb)IutCd5@$>hqb*#)gA{O;F$98Z>tSq zcW{l?d4yKGt1umt%Si7>gypfm4O4J8`<%2UDSB&aKgs4G*^>Tr*oH1h|5VH;=$j1h zV+)RV$0EuL!jd|^eCc%EIg#&nIrod2Psx-E%V!c*hQkczImArIXBNB}`fZDD_5`dfi`3n0Jwr|Yr?J2) zEq%cz*SFup@*dS6(-_5a;m1g(2T1L8vzT-Mx+%oAwiMszLKJ52!-13Q41JHlAc*7T zqE0?-;c8I@zRvC!X^k#r`#=lk}nEYIz8|NH{-emFt4Pq#M_qF%kVMKE(&T zl2>$F^d&j%rJn|<%QgKDL@|JTO|$jJX#`<{a5ViFztS14sqn2#%yywi4S@d}jt!_q zUd_m8RlZMf9I1!jg>b6xzWtcGC5AE8DKtCSbptS+3wf3hx}P`ciq3b#ycL>|1VnP(LN=>6TiuHIqG z`!Y2dvrDXszKWVXzbTTp{Ga5H1jMPpc8WZ@=D4ckwBSE@drbE@(<*|!DlZ-H@;tQP zWr7)?Q`YbiYbXKRU~^K(+p#(;&CG=(66VJ5QzNzTZuK%q#(S^DIYiNT4kD=?g}3HK zzV_>}T{{iiKR8aQH_!xB^C9SM!6DYdSqXTCvLJ_%{fE79^2^baucYkl%WsfrHiPF( z7LW1WL|t4n5N=F+{2uR9SyhSal%H(h0BJJ90EjOo{}iKd@d#0Lz@*128+YNy zxW9Yn(V(>f_~P=Y9fGfW1Dd+o!i?DNw2mSpDsxw7PSTi)B07nHdR;Kv5zhRFG4=$! zVZzXYiHkj)()SPue@5@`Xw?Q_$KQ>!`LV1Bc>~2_ff7I6`zZV3x5+hrcuYs~rQ%QE zJ4ik{u$2;Je|};c>NOm>7|pxBgWmMJ!fqS8@0}IYel5RbR_Ca8M8lBNYH?mj`A0t) znJrU%>1IIDtGNk;ZPiOxg#F~ikQ3}?7iESq70UGg$v#Zs9EtR^{dfc>toh}HKUey^ zk+LxOGO71j4|hHtAD&JU=nV^sjHrR{1)ZZ*jjs;z^~od5?1C6E5R>S8E58u)ZpK#J4n+>mQHBJiCbI_s5-!trTMRD3@eWkO_fD*jtn(T zj1peAySQledqLANN)kvNhT+X!(0%6p2Hate5%>jFpDr_5j=OlO);Kj$D^d{j4nAKbHi1@D5IHqJPWM zwO*RW1p_u4+UXEhxf&}3VM^PcT>gFqd|eeI@far&seIzc6|fUTb-Y?>ZvH#w^>W>I z^Wvqwg}JqwsxM9Vr%qhX8gPD}u7!O=gBD$^cq(7VWkigYBbQWpMtbYo=f*(m`JWoD zqVT;MW&>9h11-X@mx4LySH6jIxV2 ztl{tkZi-ReR*b(GSWJ1(BRFAuTt?7KgSkg2hvL}lC=G)x9Le*@MFzv`D=|BGw zr82YCm_{`41rObtSa=AXpD(aGyIH^aiPA zrl~}ccnU^qZjTGP#Z!bNkF+{~z9BPyH*|lEXP52FsD0zpd+Dd7s$nE(W^FYlh~NS{ ziL;C60|hNtSdsU%e5STY)XLG-&X0d9QHI`1sJ(#EMXtv^yf3eAokO&R9|;qi{4Q&x&NH_Zge{#$gbt>VqGZ{e$SB443jub*gmNZ20!U4~ z{1Zu}CZ77S@kJNy9|U~A+PZW3KfrYBgVqeb{`@;aOEfOP^d)zpfm+ooZQ*n95_7Mma5VN&FpKlyK+5x2p&u-8U?&cQ)Q;`rFR znY2I-2CrhxkQdvAYnRroRUWt-4TK=-0N3!8?x?PzTh58mj+J9Jwe=PEE=!+Zhuu5A+lQz-~GZQ@II(Zkcmec)M+3TB= zHpWo=dYAzUgpPeAX)@f1E^bCad5h;U>~H?NWK2BLt}rI1&YvLM^)XB@$2l#M&D0%v zQ#NQf+FeILn`fpF0b6l!Z55F3fA9T$!b|q^#KBef`SQ0JpZuw_$v)<2J2t~QDJd|2 z(=q1A!+}%^;&p2_4!);Bb%V2G2;Ma!=QJ6b{R8@(`vQ{}2VR`b6G0($1Io~TGxFb5Xk=))2v}Zp+*m=PjeesR+2eFPKMxx-VgeM8p z7XIV$GEOB1++A$30!N4ds0H$UV?wCqCoeGG6lcF#&G6ij1a27|54&gMU&_F9WGueh0+O5B zUZ?ENVuZ>507>!P&=^UMbY)kSakEPs@g6nvVJ=yz&z3{8biim)$ z{s9nt$!?5Fx^iX5#^1+4njK=c8E7>wfv0z;vN*D!ln3`Jhlt<*YF3iXyRHO437*#G zjZ!8>;Wth% z==J)96fi-@AxX5S$Vd7Kjst??gZLekiiTdyF=w+Si+1zpOK|puVcDoPJ78=ij7}UI z=L%_Ovd3)dxfYiwE#RAI9*FCu~xkih~+bgI>L-|2wYF7&h7#ca(%&2Rh; z#;gdvxhMwH^x5}NQBf66{0%W?;C#$t2gy8Lj12Mt@<^9g) z|2+`ujUDNF&eJV|{F7~SZYdIK?F}H@(tZ$rs7yUKm>Ga-Kr+dtLEbM_seDMPVWECP zjRpZJzOo6q>aP>7P?{m6r|5 zcSpty6K}d?%^;D=B#?l#ue(&V)6F?xQoAUO`BBd=iMX@$PdOF^!MCcc2JoE2g`m#` z<=5lg4XYcFC%bO^k4sHp<_p!F-9(KyGr0aTRn0jWmQe0zh@;l7=`C0EmMBM}x>p&D z{{HpI2oMP{zF>zlhL_h$`U@l{z-1jT$=*1BN{93vT~yR$0}28Rh4u0JTZ&O?bEuUl zPswyYcTddVM}Nb`I`53VrZhbzN*dH8UTm)rUR3Urp)CbT3XzFhjK2h*JL*n zlqE8dTt`l6bM0Y|@LN=+ZCviZx!l1{B?n;4z?>3Ll?=SZRB^aS0)P+LyfbhJZ ztJ~zw_<ElC zjB-`PcsK8<=wD9#BlW&=!|+h5sx-2J5C`NJTImHPFJ7bX#H}5F7{io^C65xvNOr#< zeVptV%_BoBq-^$;r69J9fjsq6u#7e86I6Vl!*&Q1K;`F zG8E^~2IPqLKI=eB^x)-!`tK4dL>O}sx`H_^z3K(!Nm})B|8`>!#-#FQD&|N)Jw+QX zf)3Wr%vCSSau^QX%hbo1QQMGMRmHxitJgFj{~gi>kvo;ZC{VqlpECM-Y{sZK#43mV z>l#H54C_8vjL|cr9C9YoY5p={^+RrC&NKKg$~;Ac(@@dlQ~@*?B|~Q<)3xq_SmLH| zB3gMf@%D`iSuJ%+_58Y65%d-C61m}>Xhgb;Dq$HcPx2m5)BUt$KsE9#=tmOnn}c(f0|nU-K(}QKSRZIGcZH4+FE(LIS|wh_rm_QROwxpXooO96}H?I4-b{q1b;iYux9kPqiQ!`njk7Z!*A`PT?-(4+!^d$}3q zPW)k>6J33+u&UE7#Wd;=vUAg~G=VD>B(go6AF9GcT-flk?CxkMbE{n!1(IE|P@fr? z;A>C?kX>PvQIgO<%;RUl`6)Uil_dvjWrXD^JL5rw z;?l3gN~j<;5%W3oqk28iO&2JRFiP@9th4h)HE>$JVp`~Q=<<;O7i!~=x8OV&NwjM& z@0ifOPwEn@yu=bznYZM_(yqgO$zd+%)CmG0!I-xIxd?@|=7-D??F0mKcPbJazY^}X z3Y(Ry;S*48-npAtCznqd&1h_COG&FyAX{$9QaMg1a_M+RlLo#ml|l=5jN@Gb)%uhd zvflSogg$gRx6m!rI;UYs{@^3JjA~Ez+kFfH`B`;v)QIEPXxG1^*`zNGiz@;hJb)l} z6Pi8sTgQdekLvm^12qm9jZaK8XuIoN+x}OM9H7^yn4f@vf9rS5*e*<-mY6>gA=~0- zxa_&Q)Xx<}q+r)dDtbM86FoU#L^sZZ5Tf|Yt5Xx~I*JzzDt}<1F}RTQ-A{Sa>#=^C zAZS-eBiv)6PF<#XTZ)iR+lb(l1djnSH54x04xz=Q86RQpZTL_Xz5K>KJ^Qru|>gJyX zde?r3OxpZXbk`e#;$6KUHpIL6Ia|lUe+`Z)F-)skC$Sy0`01@65vtm+isIe+}u zJ72~T^;-cRK62B1Lov{Ied&yu$Ev-UgML`P*HRxD4i7q>$BrR z8O#9VeSo2nIs48= zvnu1DZoJdb{~LsWVJ!)fhrY2-3;OGmDJ=u7MVLNfXtbW}G)9hhvJHlcAbp*_ei0Dv zAD*I$7W1sb9&62<^HX8TdIX!|$G}D=`@?UR)WVEh3I0j+=DIm5Vl(>wF0$uju0m@? z9Dt&&`TU@lX;M!;iY03Bn3e_Qi?Z!|=VMe}LjF%?3Vb^6twAJ|W*sx6_WPL;fnj{) zBw(q}bmCy+)c8C4zd;M{g?U=20|{tcMw`N&aSflQY@S3k0s$pm)!#-(y};DMme6-H zkAv^0Pw+@@TnODkk$5aqpC3CE#@pSEdNgWIL}(~H?&ZAk zEY|ClJwz>8kP+I69SrW-L#;FXnRuC?Vg9cV_rE{W@aT6i&m*)@2oY_0OWem|*yl|; za*j?q5|%S%xgh)iuxQZjsPZsvBred}F8jVFH*rQ4B(J$V<3#_Bo`Y8eW72bS7IOna z@`#I4e5>0zBc5g7`iNoKZ&7Ibw1r}T@nGu$k6*_8Hjm_U>gG&~7?%M|^nFoTSTrrXJ@$}7wo7V zAFS#Lp2aGwAbga^jQIUuOyGd+QQ-B!gwWs}gjQ>U2A?u`*&CnvNZMA0x4f6zQmBPc^)P&D@*vhsdXi53ghtGvo7-*5N z$Yo8*|Hy%5TX$4j7m7AwU*O6z34wq0v~^U5@V^FYgdb2ECH|m8=iH|1_xze>;0pt4 z)8FMozB3h!>pssO1LlN2y(VD!xSXTg!+1v4YTw{{>+_Heu{oXE(poJ3el2o35ejtw zL8NKZzYI1wgk(aN)wDxl=>8JKff-bE^&9wflmS>Z#g&72%CPyoQHI^!!`T zi8i4(d58~amYU)J?tpG1!l16^3!uRx6mtn!v;TfF@wZ*yV&uYkCODg>u^iVg@$uPW z1Jpe?AnCFS-NbznSpJCvGl3B~>zK{Tl{2}}l}cI005VUt4)-}-@5Y__9(@!l>7cqd zTgJlSBW8|Q?qAVc^nd8a!ZM#ylF%iiJu-j_yY@f`j` z5L?*?@V^L0apc165j)^IkhpcFiuRSWNUw+E8ha0Ur9K6ouef3q!JFO>XX` zbqdFcM$Ci6d7Fe$z6P=&>XCAYPk)*nvU0pvG z^2ruB3qQsvcwibx>+&Pt%OXTd)48d@*SW$i5rW^GvIeY8N9X3jSgL3|+jFOy^(HV) zwv3r4-gRpPV_@#{Yq$B-o5OZ1_T{O@2Zm{re}=y_;dX63Jm1&!T*095UIfpzies{X z5%ZM3<}mJ1I~>?9jc zXB8hYeQ}mr$>N}%!8IC|dZwT_2kO)4WUgkMdNT|$Bql;!83FOOOMN{gpjONs8IVVP z!u*TGddg}^=ug<~Xn5gfOg1K|`4C78=3%g;4oQ#h)A-!;dTQ#i*{%%#ma5e%v@BIz zI!i%!Y{Z4o&#S~XMwjvvEh3KeCqE`+Pk&>$G;4v$N794o~S_Q<-U zawqM4(#R(4Bv|?^5MDcs+5lQq?q;PoMlPK2!e)uhAJj5q3FDS_T@N7yeZDDo#DLK$ z5dwkBkc#*-#o+OGZskUweIqajZ0R!9fm-B*DS=y`> zBxZv;-cnAqYOBYp)Ii}%MgNj9K$>C`i&}zD!L_~CkZei*wP~1)2zu&QVhHI2=hn>` zNVrMn4~QD7iB65$@86zYaQl@VRQ~gFip=|i5UXc0m=a+tva?O5^zS4N>=pD-_erLg z^d#015W)!lI%RqY!nA#T-p-K?e;Ab}oEZAPbN7`rrXA)dykL3lZ86w^v?XXwnT@K! zOy^5(=0Cq|vnv`zf3Pz$sgt5ZJi=Llcwu)jLBD`nC9lLAX1$;7z2CaCXFjNN5i)$S zG)*-G$G2rJc(9dX$Qx8!?GPjP4YZpXeuzkAJU@G{!XW@a-*JIPYn?CNvuS+u*z5m^ z2wDX<`EPxjZ_$0#52HY+{wcn4L@&r7w4ra;rLjKmf}_h3_N?Z>TyB}!odeio0SY3S zpF`zgKwa`N@_LP|hM%Z!z^DwJYYPX(A*i(x8{e1TOV)fx^)hCkNEBm$f&Ekji<8qK zVGoX~fR9wO&DcMC8y%={Kd8cxzW&9$D@tf~{-mgDTprxJ|Eb{`=F&vy8j<6TCa%m+ z_h@NnK;AwZcH1G*Ktz12>6jSxJ|L;Smqd2hs45;c z0_ya0MI#0s#q#3*SjtaomCq&NJlmM)B^^}cMSET119D_oiFxISpyAFaYlRUeevH)* z|N5p#S}HfN{|J{*0%L?pPElppI*CGWD?9J|=ZQG6y*^RuIVi>5cqR%zkj`~{eG7dT zQ_Mg&y8f4-b#ZyKZR7Q;76O@}Op1IDK&Rf9ZSXS5m?viu{YRT7A&X#vy+I=Ull8H| zqRTY{JjF?T7!9A)FxaU^+!Q6k^(5Hb@1A8cO@-T+U_jk z5%q{Ur0Xt8ehO3V9bYv8j4Kni_1?dEQZ7x8cst~0U8C3K4t}PxT$EmHFP&9@xh6Vt zqE@w+<-l0*7B>2g4Ldb*&LuCJr=IMOZE_BvN`LPEGB}Q(Wk8U3J4Q8k`XSLSnLU$@ z*S#PZZmkt)Wz>JpzDM>WB%bp6jrrD{P>e+58Dz}T=#uy@2EKuxJSX|Q z(X9yFYpEo*QOc;Q%Xt;uBR62(s!JnJ0mIcZ#MUoUwyW2=ka7-nU>5fdQ$Ic@lLBzf zC?up9i)j2n9DUpxfQ*ElN5cPMm*=GYhdZzEV;KN1=E}T}r?lpQ&J&)R?m5}6tY^Nv z*{ysdc``N4gTU9|Ct9i(C3=wu-)HyoX}l$uc8!NfB7w)Bng8lc^MPH-%Ap${p?7qg z6!olJeN#cRo=g|(P!gE^?V^8941E41TXk|HS^ zuY9g0`jRE1;@N}Rpg5iK+C)ZaTf>%4+VuNHtS;O%wRHLp!5`~QX9*!(x$-Usz!ZD zVGFDtmff^Up}OHfc0OF=7_58}Vz;G^i3%8MSBrchBrdq9L@@g6)*q#txvV5CNQV?a z?`hdQiVIkzF&%pQ?LVl?->+5_*5|@sVZyZ(wQS{|8u2h^F#v@~w(wcCc--T_+Jota z3V3I;qA*N&Qlf}33U=rOS->Oc`BjZwl?Vbm3UUoknQ(a1L8O_Rob*?L zmcDvyQdC^E+gXVVc#U$89H9?A>*klh{X}@2JFOjim2uS1P$7H)?<_dHvCe(`)BOYY z&C3~xFht`@A8iadhKE>8mEsYHi0C4p8|?G=)HPfau-gbIbE!&2%?nS;BsxYYM_4Iy zA{aa#2A!j(t@Y3pOg90gi6K<;rTd2bm=r#H)G(@swGZ*^=NKY*8z8Dw zeFIlNUCf+u(ieL9*Hu^b0Ok?0%g+z|Kd<0qHNm}cd@=8C%6JG-*d>l^%)jwbuB{Xe z`34HX|90lToQJA9OmiXGlG28^7x~H933e{v^9-(opS;d6$?f9p5~$hI51MCwB44WU zvQ73r<-~ejgLTuTP{iSZ_dyMT22ieeyF`X%*AK((htR?O=y6n~9n@S|tkiIi#GUvG zz;t*^B+M>_iFC=s(U_LKdIu32Lfuds-x0kW{ zhLM>mcz>x?SKt};d4yrF&K%G`na2wgl!Vi+dG7pz@4vs z!zcyDLUeyeMSjM`x@I1P7fFDKWU{xY71>+mNNzs#X&z(s`er;}4g^F7@E8Sa1I za5tesT>eHLv*&YAlm8{3@j*s!|Ik`pCfNjvZ1(@_b*4wmw+m9tq3-!ME=r zOI;nA@3(qu58wU(0V*dd?{a7HLZ$K=(p$>oH|Pmiy)sw_t(7ys@6Ja%fr6604I!oi z&M%?nKNwsnGTOD7h2?5->(iqdwr48J0Xe16Ra4`c=b(ibZnJZjAXbv2&Pv`-AH2_ z-W9k$x-W3~or#{k!>IOww$Roh&TjITww{7vH?ELaad;_` zT-GQCg}z{`(;rE8Fh-!vl-_0X#}^|dtrS_19Ma5}%p)NKix+}EES}J}nEqZX&ECmw z!b9o_pH~LHT#qCyuMqK?%iy4qLz zR8!gLUsz@vk_Gb{evlnXSX$x<7~qJ~!GhPZAuxxdkGk9!^_^-YQ=!5k>M3s?!7=N6#` z%D#v7D`00-t8wa`DnnK-+$66zWPv;JZFdsSV)+28BJ-FLiM7`d65#w6pW zrINgnz}dJWcK#Icvrl8wHiAZ-72Tr-)%04$+Oe;}*I&VLU<~16nH1WO8eNQi$3pk| zl(XHVCbyEsI&Q9uti}NbEV;xUM7To0WT+S%CWoPCX1B5yS`L*nri z9_~yZl^$_s`Ex2H*~isfwKyi+ko=_uy>YS zL4|AAz3Gr{X=$XSySp1ix>LHl8>G9tLsGh1x)G#Hx{-Pu?-=h7I3Lcpvwy_ib6;bv zxiA%SfXpC@NzYYber9I@_qFw&AK~gXPdQa*^A{}*YVFHTKoEA3*G*wcSVVAiM(>gd zk3lJ6esgEf)iPY|xD^AyUYL+1*P0Y5@`d!veyEda_Lv>CvInc0#0p4`cm$a44^^D+`*%W3`6wY z;yptUM!a2?DMToPJ(f+?Gmf&d00}+In~?BO8W04AORhoM$L~-+Y4q|H9kly#Z=9m& zPue)@WD(!*C4?*mfv)-DejrAs=-tPPZs~3pDY3}Ijg^%eriL*KD8x2Y5Yit89s0Ja zSl9ohZLzJ=*ZH|IQ}D|jry{lD(Gf-oa1y(RJa<>$?!XT`eW+T$)}P{K?Uf5^)VNjH z;TCiNEECb`!sspap>Sq@X+D0LVC43z%>5YED-?XwB6NiU{&*xMWJ`q))<7_QvCU(! zkV}NN%vh%93z7cn|8)orT%xSxBq6OR_wqUOQbc_b9WuOfSbu0SG1`8>{?`%)_#uI1 z=2AuY7u={;Ug4S2+^C7{Xssg;8v=cC@%XV|MDFK0Iji{7_X*#=ukYXF5^*nyA=RO! z$JT#+9EQIID|p{uPJXAsz^fIpHdQUrka2HZ?MR5OM;_?zM!b;$j2+yhNmQ;JJP&z= zp#o973HQBJjz%LjK2|Z7?=aC|{+eBKw-?^-{>vaP1%X*bi!lmDj-7qx=?>fX7~B4#$jJrEg9 zE(K=H;qE$-=RoKE9{m3p?!V7)YHf~cKC;?p{E+bsqvqIUf(Vb(>KziYA(^kqkp%jH z2WFLlH_AYnx+J7CSp`-a<7JYu_5k~=or6G{{z^D=p$oF9+4cL_$JSx727U2t9 z@SH2*;BlW#v2X!R_xvvzHJE+9@r+8H?-0mgUNqlPOn++Kz+s6^T*w0iNqp?K7cCuY z*YO&jQBiub9FlN>E@@gnMXKgZHc7yMdlJ!Vr{8kA78kgH7nVwVaVFfx+gA}f`F#-3 z%mUs`rTwKg%G|)+#;7FxMR)$~T3_MykH!i;@7FXYs#Bl>tx=_Un4o5^G_`e>6fG;v zW62VH9ncy1^24=_5i28t3c^E10bl8y$P)~7K z{o&|KB7ScT=+a@oC)KffRt}jC!-Kmb`ZzU$eWIYzryp^a*9(OLX6u4DN->Z$S=(sR zO*iV<;$rn;CYVn`{w7WMClj@TpC3#W<=IkR&xK+PJxmzX^AhZfHgc_`k4hH}&kLi# zLrDcWo6|>eib_$cOKVYu9;6guIUIYn0Ev)gmz+HCY&GRFTtR+X6C;GmMM+0#F*|c5 z_QvvO3fnmP+5Qt4r4cu-V*2PM;LfjVQkyQ|K&ANY6>ET&KuVK%1&18KKY6S%%fgyV zyGxdbb3SESN;j6-`%>Y%X+5^1vkm|iZ9lADbc{ZpjtbWVPnxVlW1DXzSz7P70*iK_ZZ?I#_Z)EyrWP6|}fgz~WDV!_Qz;OT{` zZ9_d(`Tg!Ywo7{w%9L?jyXAfwZp=BALCJCZ8@#VGarI{%$;XXN zjMrdaMRRBVrnvB$GHFZ!AfZ=0_>0R0miQ+;k*+f{ZLDNRXw0+X8BR}8X6V?ErsDmQR(DWq z+)t5E120^jT&txB&qScaRj4nbBpJ=5e8tJTHi7R))z{hb>Q9kT%QcxTy$;^)C*tNc z`BIhWKiNyX5Q{E^WmsNt@<9ew!tZM*vjY}?Nmnheu`E-G7Sp`M*p4pRYUwyzkE=@q0azb8$TN8bEQJc~xI?=di87If+gz?tsMPmUukW5O0LG?F z8qW9R_!kotxG#?Uh=+Pi8kssHgK-9;>uRUcp!%>@woFk~C34|7XHuEYh{y_HNGA<*AyJPrD2?z|NIddNv(rZr>ic7xOk855 zPeTJJ+{dOI?YOrIb21HfP#peDOKlk7gJiTEFI^ZoTb&5%;|mH*sAjEl++zChB!0>a z{oOTDZX^c8J^WDe-hQBmXi2K#+Ey0|$198rr8b60{xq}8T-VSbL5wgkr~uqLefV2{ z$>&HY5|v?S4na(CGu9oXGNOEnjmw%ne)yj<+z+vl&_U)Q-u!E)qINp$#ZUq4d?PLx ziS5~riD~4nFe*Gh)h@!iu7GBIZ(Nzw-~V6f`@c@Zf3efB9@SqvBPX)vL}#-AQ<3wo z0j?Ie2qwf@g^uAnMJ)iEyAD|26DNzwR|A`VF_U+Y(w{P9;2t`WjX*GXed`01M^5ja zZ>XXi!(oZ_Ed0f|-SbBl=|3AFPp7oC)H#7(EmUqTSqX@3U4e_=VqN~7bYynCnPu5H znu(5V19f0N_Sn~qasD^6c0Uu8(Gyya+r8SCnsSJut4dm}d@F!oZRy?f*$|>KHaN4T z0@-7<+SYI0UG_z6QKi=ffdbUKs+&DiEH?ao2}*`=nY0mnQgXfaEXuG{xoIo!SOcML z?1-BE9nAar!^D4+1z?#o(6ZmJJVg0sSD@doT!5(uT{%x1jm4M=p*|DAp?+V2q7kNL zXj*iOvoAh#_ke->djb(3JG4Z7cy_`sSL@HMXYo-GiCbQ-;-e&R5`ZM%c@hIn#VMxw z(+36&ou-jlm_85r?2x_Nl5d?nn*dqHHkjtm?``R+_~&%?3BgHHeu+X|*JNosZ3xb% z?thth+Eiq8mYxyrMrU{#ucnWZ)!Fm%5Xg7ol0uNMA>fB9dDSZFk#k*9Yt8piew-nT z?t^X%>SHDD(>ld6Pq6m;$g(Vga|K6FofeZXtkcx~rncOm0p%w=O&@0yB@mfV9Iv;! zWctAgp%Lq(KmbcEPqgqZK$mo3ayC@f8kF^KVo)UpCBV1w*z zQ>nsk?uz@e*v;#s?wU0~k3>io)MM8C;>ODe_TyNt^(eDZw$O4!P^-nBW7A=yus_Ne zFX2kW7^Uz4E$!yA!KPwmYS39$ZgU*)P+F{g9ndMTo{9B_?a>c%|HgMB>K4g4dksI@ zQkLpMSorfle}HRK_ZOPiHr@d_lMeP!_8&VtL*c4Go!CF= z`SknLxM0=0>o*GQ^hRpQn73eTDj@Tnu=ivwySQ2&*+$ zhfureE{?F7vcBUz`gss#XGQw=lMOiCzO7fm5)I34Ey3fOPio4jgcH7EZ^#Rnu3V4* z=nCk5Gxxl|jKfXwvFm7_cnK)%X{omyGh=MHGq>VoOat{R8qfrfm3S0U%gX&1wx}1@ z;XDvjE@cf4#?RWxF92EW2qU%x(s<@quvj9c!%joePC=W{!H%(hQwB185Fr1+bHswf zKQw14Nb$w@LfG_`D1WX`!O`_hEbi;p4^YG-K8KdYm*;PnX{&VnWk4Op)_bsv-o_Nx zfpc{y6g2YZNvonoHdlE-_={PV*=l*;-@*wE5R^LiF3OTH0mipP|2R``gCfC&r@b!9 z@y|5qdsZF@8`Uqch@Xl4!Om8};ezPi5W^^hbr`-i(!r}39N0eQ>h|A~=Y$ynBlB)L z*Zm{k(r40Goe}Vei=$c@mzMJS$_LafMIM*H`_LBrdzbG$5*s7S7wn9c^SP=}1%!Dc zS?J5gOP6PWOHjP)-`iu>=iDn%Aj_U+UvT}?%IAW#lk`^jlj0CiL%EzGN%GqHTxjiZ z*syFR>B2OiYV#wPQ2!!LG9rh8XIj;;Z7>#rARLo*!n1yMOq_I}Ml8z3PlKq}R@dHWeIRkNYZ*fq*rFB`YClKoPcipT7F z4;(u4B8j zQ1rY!Rk!Yi{T-D6qZAdkfY_3o6k01*7_T$i2j2>9MJBpNt^l z9`2>~-+z0Bi?3dk1cEJ(Zn>S{GJll$(>d*#%$~Oz9imAYz&T}jhkUka^OFgZPkpkc z$iV-xe_zJWGUh}k00(~Hef2STOeon1+doP=QC8lo9-+zsw1pI_?`l5YtsXi}*`qfpA;*&YL!xEI z4L@;J2+2EwA?p|_b&ve~rwCUTL*azO4)!roUlOHcS)WOIy4wkW-z;H0qkNV=-jDc@ z$Ew$|+AAvL{NmlwKF<6a35fuj5h)623~LE@4hKhl?L3xq({-{JKP%c5>8ghwbcO*P zR}V%Gvl{2L-&$wJrv+*gEyZknK?D4-YaZ}nzX!lOi=n;KqdLMy{oI!uHGF8sIfRJ- zRGIn|nJCl(Xj8zS=5UX$ezL5fa2u(DOTW;rNss=q{e^=W4$^Ml#O6O`xTu`K!&B8e zs&`DR^Gyn`iT+j85L$?gHDpmty>72}j1{m#zpv@F;ma6C$UW(fXAu8)hx_l};WqxM z*o!-i_Ed~v;l7TW7eVd%W+A>Wc=t^tSa>n)6`WvADfSXV)((0#L+_94l#w-}94Onj z@IIvrxYsns0E>6oihi)V0T<;5+BywRkhokCWR(I+2{}=yy7PxD0KQjiVrmTORD6jI zv!qH_YAcN;0cFWYIRqiQ06&%kP-knh zF)Cw-rPnjwM{<43EY0PRZeH3yrBAFOF15|hDunDn)Ci$>I0O4@wh zQ&QEi-W+aBXUK1zhS>hu3n%4E-w^?<#wO=Pn76(~Ibx!ua9O>M1uJ3Cv_6+*@r;fU zjWPq#51f`eR^=1x&9y;?<6YI#AKoN_zh%dkeck+;ri}p@E24TdPvz8L#(myDh52Q@ zkwDK2Vam^v27f`wz{drLw(-|QiIbi&K{6lmBJLwHd_Jz~dTu|>Yh$SUIYxmo9J+V@ z2}fh{=$DL=QHN0y85!^}{8dbHB(Qrcc;CR7vXip)jkFjX@!Er(jpGWpk=m-7Mcp2f z$6CYmuoHM@xmP_w3%iB~s;+w}s!~xGUE%Sti!m4iVZUg?{(xhF?>{w4`K!hlLr&Kd3T0|#8Fgt12A<9 z+;nZBhoq*@hdR)I#S_+9#SPV{@}O=bvNxN31o``Bcg&QZo{^34dxQPUHLnaKH;HQL zDoS%1B{Q(~q0y7C8DHiVV9nM7GvfFtCK|^uw@ejYCLTGUJ z3SJyFd*HX-(P-l`*!S_#e?rh+zzA5Id;Z4ig!v6Z6n_1)Iwk%?n~}5%NMZig7f@y$ zRH#aaS(`Sf&B@Wh$TIx^N3;{Hh=p1X*e1proF?g5W#`{+MqMpfJHrz{SmHz{(IDMF zDqq=x4lH9Dr2_+B(-m^})rSL(p9ur+@j*OX_5s7E};x7@% zaP~L|J9sP^gn&HE`922nMEEF5$_=QXi#flmO@(6mP+?ue>Ml{C76%17sv&@`<>cMn z-?f34*=eyT(;ZIP9_bKPruG7(md1Ql#1i03lX5ldC*n;rw9|mxC}VoX4p``^JK`W# zT-aXk>0Lk$>E-ra=d{ja*7em>Y3sCGg`a34>&4Z=P-kQTYN4EZ&|En z04{ZAaKBHOp^H_}_JZehMAVoGzop}fGOGK&$488t9a0?pr(vb8hU%+--qPk&^z`@MjqE!<2RFM$^JI4(iTQe1J zFUKN;NLu_TubK`C3Ux!v2#8(Qk6eFReX zd1p6(h|#8g{;dV+%-*|LnG8 zoI$}N*SYcZA9bh0m_iEa3K+ahvgI4Ki@Lst*6i;ggelxI5YSmtLEyJ!`2f49wE)oMD2S^|GB9RDUMqUGhq(~p8;SE~3FjTu)05tIi*eE~Y&C%J| z2@>H;mss^$@Nhim1-`Z=xEgOD@#Xvldj>WA(Q0C_(f_wpc7ep!(%;0&4kzR@x?a;9bh7Mn=sRRwibV%) z#8~p5GF+7#zhFi2ORlO4zd+vUTq(>Np~`1AG9dQYq(`xL_34*-It*nQ%V^o(lm$rf zSGa#OYya>0-yQD1c!!H2E?w@w`^q1~Rv}p4duQ2(!$Pt+F#Su?Yjt24e-Kn4&~CxV zHZh4mvCFW3*u{%>o&s$*LCi2g7+*}x>Q#gj-XXX|ezIf9x&HHlK~IlV(u1h2j5 zq7HziW|P|!pZ^>P`J@AaxrH>Cpiwc>tYGJF zuE91jO?cSpKxbyuIN3V7*|a*4e_fx8f0@W~<$aXNU$}7;(H@v&pap4>x&Xf&fd{Le zr5<(`&Bs5V{M3cv9YFibsnn(uBwOWKR~+60EeHA^+ules9~bnx0KF8vNGeVWK1~Qf z$m||_`Z=!qtE-Om>|eNR(29aYSkML!xkFlTP7)d5$L^u=r{(^3fFW%4#r&j}L~I3R zq9z?=H}*}&NGu6lSih6^34cd7kJ{9VvHKb#^&zA!pLtxi`2J9?kR|#n#y5nk_ucX?KFg z>@NJE?ajSG^zj9{<~KlX`A{17k+n`iae-CyP!>_I*Qb7tWIWePiQZIOWfU}95-GeR zD|)!E!?t?Hef5PFn<)#6`aJ6a&n&|0gz#@C0M`ADTJnkys>xRFGQ;p9YpSb)e0}^+ zjP6rZvOhpg@be&Z%U!v!;;a~G^+!v&{IxL0@KNXF??uGdd@eA{rjUy^7G0%P$k;cC zW?h|Pnt|<5AGI+*A{8GM=m72+bM0@AYe}{~V))pU-*{UDyOD@YT0D=H;vt~?NCW|g zA;;Pg7=oQbYZcs^-y|ZOZ=l?|qi<{-Xp{7Qu+}mN_;u){)gsSPL>H z?VJy5l%QfgZ*>35w6|9T!TXLS<;CR(?6DMMWX;LUezFc1moH01*x1A-0^V;jWzqc_ zg=A>P35XctL~pza+9Sd)6|GI^1?9TBfYi4I7N_8JeD)|Vp(2+mwaeEZ*tsu79LVUr z=#x$Z_3DpO?m3Z1xF*`t_DW`dPF71&rcfwHQ;%NWzZg(}Q{u_(9-rxK%P(5$kZ_3`eZO6pofb&1M`6*U=z{2AbT+%jm=C`LO{FAqIxh?_gHt z(++Eo_-ME5J=bNBP-SohR4&=$1llIE|If;Yd{7r)%a8HOm>Pzcu`}@^L8dcmMNM}; zLKMNS8C16N0u+%ShZayQI5}8`{71Mk2;w1b?oU_t5p2W}C{}J`z_#F5wKWs9zNX8Ek0fA&?mpd*%>-MyG83)CbN`G!E ziqxDFQ`g6j)s+H3u1k1yiOwMA99}}D7{0Rd$QK4w(gLEWOj1F3l70X!vaKa!tf@Wr zg5(=8PMa%{hKKL|;o<)FIN`xWP8vMYx&AeSUuTTqZ$mR=}$;3W?~`cWE9RdpJPE*S>Z0Cp7}??(9>%s;fY1ub|8$<)rq0O~gbRvn|kLC6v0$WjG1w@sAw6jW)ejDAH)6k?{F_Z%^#`gdN-{E{S{i@MSXKk#`Y3MY9(YC zxFY8hfCiTM6$)X&aD*(3x27sy0>>{ogBPEX#-e?ej^&^gMnQ(_(vrB+f^36TK!~Gk zQ+eL_^v@SggS!sfVz#WYArM7>EkRs5O+XYt{evGWR2Y$1qM}COw$J-rQwb{K$3Q%Rgtjs|O z7ViBKK#t!-4WMDMIIG=Cu`YQqmI5JkthyfQv<{x6m-}L>_G!lGsM#w_O*q&f!Bf9MLO7HEOjEBDA^;R+m{rK_|lH*<6qj8^`W7}B+g4$Tf z3>#EPH6Oyd*nb{_R4DjdPK+(M>Wu0j1{)%)J2VsTRGs$&3X-dPjvlYT`sgAfGfj#* z3#YoNP0%Dgbbq*n6H!*4e#+QPQFIeP``RCuRJKMyj;2G`8dOsfZXQ(j;5K5L^J`TS zp}+!>G-|P{*wrGq_Mh^GT*=P+bBH)TWu2Cj_~Tq`>YHFgrv^sq=f9?~=D*;tC1g-B zev3sVj30L5Yn%W(3@OO0ofGn#e>INM!=fBuI(IR^A8>hrN2acfI9Rg_`3W?9djqZ2 zJ@}c9^7e6ti!}Z~@o&?~VzO;2TB9HjU4sJp>b}otsZcS7IVNTf2@8ZxD~3D}>es<% z3mer_{(wu1T$GES!g2D8V0YM=5M*g-B#DUDg-wPG>GAE6GB~GJ!2NV`Qqq|Zzl34m zL6u5z#_1X!#!2Z`7sNTe2nt~Q85whkCkGDG(X`$>Lnn9OQZb48H`ecn^|l>30%fUK z9tTNRC$)UDmlr&gO`$Z*m`MzZeXAaO+uz_C;G;1? zOJ&!Gv?6kOH0`9l)66GAiGrQ&wc{AStYW;B6NtVy` zTWNBHc=7O1EZR~JGF{hLV|*YC)*ex2^s z9y141AsY2jtpl(Q{LNTNXj%gCA=J3;_2-~a=2YU)^q*fwH_7dm&l?nx zS75T>_)Nef7w@+vnd!t)ctQ$ZdzY%BS37JAc9rL0VL-hA$SXt#rM$FI06Vcq&;!+6G*HU+4u2fw*pjv{xYv%9s*Z9fU5E+2bfh# zt$*Mdg&2w!D1Gr>tm<_=nhCQaCe6_FkuMz70h-^4nu1|!5rki1ug?#{w~2>6lG!#O zoQ)Wq%nCK_0ouZH;ivAM&XLCf!z8b%bskHu1eVVaX_kWF0ljuuaTQ$gdb!AW5^ zT?>hdQ@+!k6gflu}iK<5bvlgvA#72|!27=#L9rZv)Y z-!uaG@YcnShN^96++3{7K2 zx$G#~Jin0-!tmrA#CD-bw*v7X1JgT;(LgN+{Pl^!pAzvMs~E+*hYco8BIm!R(O8HP zu0ng2Nx)}&X;IjE21;!YL&jXX8}rL}AYz}N-qo{^z-}<#0}x_k zkKMjenlP)JdulcXx9(NTu(J5$T+dCx3Y|a~e5-9Xa;)WT|C3dEsd&=*_sAY|3q4{Z z6o}mZTLp{(thKxN#PVWNCo%QrdhZne=+U)ie2lLy=6&xL)boiIa39Ae$Q(Ksuawdp z`Tme{`swn?W+Z0(U09PJhj_6V5P`(SAlq@vTr*7lvl&@!kbXp4T!t(DoBf#1r-LCB zY=wA#I!!^$?`BWE%+x_57K!L^7N&xh2!RX!b@CYfpE8_(>}tQpCm54uKb@(2gAb=S zZtaC5v{JSE@57AhXD_-rKH1XRz==`E`K^=)RejGz{vX5r_ZjXE_`!`+qi;;UAE6vJ zEI)mRiv;}_QCE_G%gXd`pvXx!(-xe+ac68 zuvdrACM!`)3?;C1H5g_wJ>uxdD9Q5C(k3>}S zt$*F?Md&_=+qkJVJ^H?s@S9*C;iHfPJ|;l|mk+*QaHsDHPGTNb^VJG|i=%dYViY`6 z)_C9tc*8#2d?K5i4{?u>NA`iwHPORYeZG`o&o?3O$GiUZF42aQlBwB~tk5N=2M>#G z{t7+pM7K$G+NTM3n)^L~(4y}eRkD%Yzb)CJ9)&H#VmiINGwiIZI2h-aTQ4_&TLe>r&7{#VOr~vajLDNm1 zHB^6}(J>d`R#w$rYvb*^IvA41c16l4YO2A?%a5y>LRu(1L?{6cmbV+M=wePb8(G6^ zv9tV@WAdQ2{*c`}MUk|>k0k}ZNIKa6KET)FAyb_#eZ@_q_@$+`$+7KrkICyI|6p?)UkwFsmNaWN@TuPI7Xyw@27hf^`;Vnu|r9 z#udM&aFnEC0{(!+dkzXOH77L$%4iIV$G4BjUB(m3e&Vu~ zxgl<(pT2^g^NCE-udh#eCLP*g#ZAAIdgW6;-SaJG9)2c2gAoMAEjzAd8hK^v`D#x0 zWBOILo+Ml-Y2#DHx+(!>rO<#(ASg6M-$W0}R1r_fD{Ae1qunl)68+-yiv>5Yp*O&) zKh{oVip7LSR*sn%ndAMOs7%9UbYTur^EV|P9tmJT&9aPk)O?cDPM}=-ttmuj$w5w3 ztZb#?U{(E{={D>x-IHUNv@WM5|JW&{B$20E-o}uBJj}o}n3{*GzONGDV z;@~Z0ynRubsgQi^wp{!O@vgfj)e~rl6lOE<9Hope(O7+bTdVpU;Z~1(B)1~Ps+9!= zg$-73DkF= zXTet~uLvj{pltpHfZ}t?;a{==x+4t&=6tr|!_{ikNndC1@*Ij4ERY^!L9=;YMc0Sg znR^(*cLp6*{y%m#%e7N5A{R}K{$A}d0&yO?x!2p(47;dmaw9}iw=IbNfWLm0`(>NX zH7IF8z|gbGpn@SgJEsh+F5_bcfls9RBgGBOo6$paPYapgSP%aa4ow_`o~IXYm_lHI z^DLXCM3QZWIPc;{bi4gRfALQ7Zu>+j6ef`SC*@35WR9vrSwwDV!);+NEb%1oD}ye| z_ssJ94Fwu}X5$537^j(}Jx}bP^WcXVy}#@5kH(`Vz=?e1s{!&~QpSgVcG@WjClGPA z1Q<%C2~K!BtcHfNea|}atp(rLg~-AZN*;?XlFIg_Q{e_VKXV}_Jx)>9A51D91%jf7 zrMX@a*^^F-a*D{w<#CC>!a*d*6^jYoRVYb~lmM?Wnw6ZW5OaWb0n(+sM#lm=BBYvq zGq)uUnTpar5-=24T}Z{Q-w?v8cD{CWk25X^Ls7Vr9JXVcqn9G&1IaS)-|v^9!n@$h zESnN6+4;CB3-j__6W4#c>@wZ`1zc(*O*y}r06s5*k|b82Ya2bTf0Y^cIIVV$IOYBZ z5Vv_k5?kzuM?_C_RfV&Swyj86K5e5s>`w{wV6rv@;lAW~fB$;?)JROpAPXBp_oKx_ z&z0O}AkfR<54a%(&T}#%TJ4$?F|76FJR$F54Vn&<(>}NIAKkpd)3rVRrwrGu;xzBa z(El7ed>ePzQC6B@yXxmO!q(zni^CSCBeqcRPp>?>Sb#Ec%4N%v-gt!aKZg77Gn~vX zZmmi$Z4mbUS7>Pj6@Q;Vi4~eNVGq|Xah6TJ3c&r@DaeEQYl)r_CTCQ~Aq~q@0_z$_ zK|b8phlJSbsSyH3NW5Y`dzi^^e?UEvWE97VZv{S$1_bx#MZLGJoGP9^CV9_dMH zk#eWg-B*};l$-BQe*CJ+=$IqC|9VZ+)0m1r-G!Z#D%!>$WvuIH1;q4;8`ovxkH?Ll z0e8Cst67J1@qmsW7U@hIBjye4T0e@5cvz zRBr@|!*&j=5q8U0j>p455@px1)xsim?;*#|hA!cmG6wUGR4yuUAOuiy#swTuJJ05p$!;2%aN7)#IaW$ z3RlK8@(ZNjJj3mriP!e`|VT$zB3dfHcSXT*kZ@xoG?g1C}MPAW6c=zHxZi zA5xO6yWC4?Kv#v3A`3MiG>LT(X!QC>de*`})OED-{Z2F8_FBRVuoAtGpz-|rtR8Xm zfWgh+`7im=K7Ue+n^{4vzZvZhsuzl1e*9X}Xw$~=>AxN0E-a4htXdIZy)##Uu$un> zCfx+F)dylk`47~{ggx&}#qTtJyFoi{x*G-*Ca$3a(by;srIc87*W65a%#?>hze%<) zcljT_F~|27%Ne79>@3`BJ$sI3JDA&MJx znxmN&+(i-hS(>oI(IPw&Fv8UQg)k8U=S>tgu%*=MuiT4g!3 z!zemH)s|zu3-;)^+L;OumE^!b_0tG^RQD@h97q*EBKQfY>2c&k2MAg-snSz9svueW zVmfS*S&q=(T6>8_Ohdp=x7jR?*5T`U;>~e)>ZXmIBIaP5UOM**G&S-(+cQd{t6(>B>J(^@3+r=s@TDFyj|#Kh=xb zuOyoz*RsBk{sVM+9KRM&ZUFmSpX7%duC92GJdU;o=eZ!VK+;Z&7#FI*{ITs-evo_4 zewZ4o$5AQF5}IofwbzlNg&@#1`71(@t;59(4O9!Jln3&9{b`z75LDT=(lhaMg6@O^mJ6NJNoMXmhQDI5Jc zGUMkqP18ee2x3)pvVJJf51xM+fg^H@GyK}x#cc&V!&&iw??SoL9d-eZ(v?*C3XXv{ zppYadzw2ZF*p{L3!}f+qL3^AOLTsI8{;lun);}u^=t3zBb|$FsQWq>+XKoc=-+8H)3S9Zu>JY8o+z!ocR_bo1er;IW)MQ}cIs-$ z@tw5p-q$P&Zi;;pHmruIrd4AzBdSag<~b`K7<=RWwBW-q{3yfZSm*qC@sRPkm0i=JbIVE)#7yt~lakZZd~E0J-io=MCu>|t{?u~!HB`VdEMuPj2=TWzR}7L7f} zrga5q=J8Sp=1AEi)=+@sx}+1so5{^-97AGGxsa%rP9;ZkY^26E<_TEhB7H$0 z;iI59i~xuTDwWVSO!BrT>=beR;Sn=mbU2=cP7f0q&GPl@&H(BKMzFLh7Vqmo#h#ao zcIR6ukxOeqloK5BXz|!~4ER!kV*sCm?TNDEUoEc=)~Dm26yde1GShEo(WV+y!51T+ z5+`Q^-W0Aj2$2s5jS>*iE(M)Gyi87#OPPm|06uEEOr{4CioA2=d~nY@!T@gZdvY@i zQ3Yby;hl*+@Hw8KmBGetBe0uS;!kOuFh|Wvdtof&EY7HU}KyfHe=N=FnTeu2I zL5!+{nNoz?)q2SRZ~p13ZWJ>EG^!q1L2bm}^&0IOKa5HYrxy5cKK+pqM8SCpZKW~hTZ+awa{*8Fnt z^*z(YBlPhJPi9VoLoUv-B;79&ZwGLdd%WR-r@*Y&FP;Vog-F24a@(z~SK<%bxSE0G zO#h!UT-j;b`V%!Ip2Qe2yyZ8)oX;VDGE`$GCyVviJL+VH8#Bxq=KM>Rlye56TU^5= z`B4AIaQ}UVyWq16PtT*xcX@~~$`Gy356QR*Dm5KfP*FrNdmNzw7(*-2%DWbHFS`{_ z`8*PD6;9q^CP^|XNvMA~e71AIGAEp)fNn=AGOIJ21DaD>P0TczK2d6!0Kv8(y16b` z4Vx`iKs*1iIa6vs@&N)iD57>1i){#hN@6jqT*w`Kz3g}&ng=yVQTcSRp1ttnrs2F@ zLP`bOBTGwIKz|g}ze?SPYkl1xX_vKDW#ewc;~K)Jy*rj(Snv9*(=G=j zB#v|+efvi=URcAjD{*Uz71GEpE(gF(r?=kz_$&czP0^W8Jx-G{>TCfn%4w**9n0sI zs%oI-Q*f;&)l<8}`7mKP4KO@Tf+Wj}@` zlOFpmDVW*q%05^(cNPI(R_%dguTlPId+-8DU2N9yIuC{B)q6;K8$FPk%DZp%kE0+3IaRz^W&h)J{^4@Q_ zzkiS>w$L7x4`s%p5tI8f3nJ}_D0?O`w$jY*i>4_XjPIg5W{Z3M`3$}45 z54Bj-BGm~a;N+GsE-)>Futwv%{$GE12@^OWNFza?8`aLW`qbJ&0PDxY8?NS&+wm7! z1k!2MV~{0-G)q2ZgblBgiQQrqEDhQx1brZz z(v5(mba%ION=uh?cXxLqA>Ab@Al)Gi(%p@8kAGnv=53z8_xr7T&pvw*zNd>8{ZwV< zAz=!J_I?0|iSTA=-l=^TGi5s(Vv41z8|WA3R_d8TO%ia=?&(2>feXLIpA%RB_kGLH z8OYQ`)WO;{8UY)qW|dHKBYR)1s1D-7o*S_w zSr3@tbMDS~xfO3{EOwaEFqh%b0?M3xPJpweYG@?lh83CeWy334%>4VE#4c;J2?M(t0ggr1Ua$_p24 z-dTL-V>A%ZkK+5y^aDviELb0^2Tp_wgEuzP!sLd3W~<^CSomPNJ2dwA5m>h?L8YS` z@AuSo8VE}wbW1k|v!i^~xO)ZG#+P>{BfjBW;OFQipl@MJfYnRjBu6tw+eaKk#6h0f z8X(FH#x&*xCeeJ_$5l277PXYe%(uglG)vH%!}=$I>7{WTkPaw-AtNl#9f{+8J)6%r zH$$haLFeM=k%;0YZ(J7AUZzw+2hX~M(8}g9H6Y5jpFSyDIG=Oz9aQ9o|FUm z4(_JjAFG5GBsxdin3zJjd&%}RJ?Udz$x0++#j$~A+H5>=yn2K8)P=Uo`i-T!*}@J} zCLD&2=Zi~I+#T59GG4Y2(LK|{n|iqJVhes}Dv0nWa2qNJ8!y#5xe4t23yA_orKwu6 zAG~AtU*(0kt;8#+MeR(A3 z^QvyH4XiXxJY-;%6`E*T-vR6UDtynsK(L>%&985x#Cf@3TjV2wquW|r{Jq?u#Xat z^!k_Wd1e?|)|PBQiv8QWZ6V+Ggn3W*AcD11H(TFjm^o-mw=S;dfrin=`aMAgQ2NvF zq5RD6b=ZI9v75pnt@xcqB^d?r<1*aD?f3`>WJIh?&h9m*zgUo}@2$N`LI&9M&2_x| zCAbg7D=dxyxOw#)_#gHmU(C1Za9HflT>S-EmVzwxed@ly`R=KJT3k)SL{IbRc#wLXoG>LPesN03-jGq83Q=3y=jiRI4zaN6~`s(WYRhoQ0it9{Vb zB-rlH!l7s`*?q%*5YjVC^SD}@>=o1|=o7KacAR2R2Iiy2f5XP-2*0|pA2$C;7f_Cj zUJ8d>6W^R1jtwBh1mv{mzsxNI`c!k!-khtBrlt-u3dynjov5?=ON4G&!GIX5OOuCl zhf?1xRUMNjWEyWxPh{Z(gA~;d!^me(V6mJPQP2LKT(N9k&7)v|uzWP(oB2Z($pOz} zjnD=JaQI%G$X_uCeK=02=A5F!xmKUxf*$i24$V>@8!o%>&t>N}OA~T)tuEX}v}32Y zWxVtOIh#kM%LTPs4mN@Gzh$`C)q$7tPoG71bidtd_8A?JTAUDD38wtM;w8@b8RDLS z@{=S$CHm#hmN>W_>zlLLZ+Z!+&>4WCGVf48{c8;NH0rPojr_J?h1j=IB@Nbuuf-Q~|K|C4ZJ#OY zK_pzzb?vd2(K7v|1`Ku7+?1J?WUeQL?-rPtyA!#LMxnon556Pv%=Y_46}cPACTPC7 zSW&8<$^t~-MPtIoCXR89?9x*?w#AbJV5}5=oZ8S}3w&-H;Rk~4QBxPDG{4`Cr8M#1 zQ~$Xr)wI`Rxfeab2GoB>1OQ_(lqSS`)O30t73uFAX$SS3ZOHBQIq%>P@`dKX6ZkQU zaW(-l`;fm)SkmllUl*lPn80^k(UIftH3xAP2E3V*$oyrlu6@X3q!j^o#E$uB2*$Oy zOpnsLrdGxYP^7*N)0~04-hDxYlymM^cg!WgkS_Y>mBaNZj9eZa==pAE)K!ce8)ppb&(J1ftqBCQ=B z2+C$Vsd%+}3^9BgqL?=D)orN9)Zpk;7tUf|lg~8;$}cR?$_x-6QCsa=<^0EtU(i*& zR&#AalTmPR?Qh5c+fvuBwQMPW_`*MH`l6mr{Oqjg?7G=1(r>o7nb`4w%ShYYL&0p2 z@J-nKXTyu6>)Y9j@~q}eh1qn|+*Cc7gG+`?{%=qO{hP25SCZKJ5itSp50n;ll-guDRbznS+EnbAJPO?;q6JK+#ECona)?SLOr-EK#V`xH z8>vnSVSQ)OW7EOahzWXCiKem>*Cus0>1~T|3-6%&|4q?8+pGyz&dTO}U`P;|bil9wjzR;!cs6}>TG67a7lY-ga4)ZX^Gd|Q? zObX#I4VYo_!_X{b@K{}{!=UTYBM>b4W83xAZp8@w@stK}64cqTtwPvP-lnC_7@8m* zUnRf_zmvm?-O)WS?;#a<0(7vZv`}_9gF8*Ui`NnhJR9Qy<)NI;C7;6V3r)4$0QXNf zH%v;bXo~H{n8+D^uMr&>#+h`6d?Eawfpe<-z{9*s%=!;glEul{pJ5TcEL%AUErwe& z&Y&2lL_4GduxQ7IvkHlK;nj$Cbx-xhp3`wR_id%v*Vc(RQDe~?K;sHVq^cE2qwZ@4 zD+qrR|K9h?4UIw1UR9&ExknWQ92%(yd^`;3V&`8r5LXx>;)zOfV@(IjMiHvI&9;66 zfnCbOCdT)7pLMh8OnC=TSDwypRE>ppJ$nx0*L-?GH9V>B%}#X%nzDu=&tOb}wHGG= z`YTzGS*WlaD$XNV`05mox^#iz+A|X6Uhs#j97>9kiatcLh*Gg&KTQcB5&m%9szBN{ z{S6s|cg=YBgL>2eu9$DBC&HrSgT^tKznbv-_-HNy!zU|ayO(=r{dlAeQH|Bb+kakI1Vk*j$wn=%$3W1;sK=4R4@#I&EgDD40@Gxk_ydbyu?xa1YdJa;nf z>FdjdCHWY&Q;U}OS|?~F4*4Au0;iIFe0XxlY%hx>Dza5nAL^+mnu}b76$h}YW~Okw zhmEp^yqxlsZMeJRzUU^!F(EFu%G)BXIROZa#p58CfShnSWkKFzIfo;xYa%lJqt^ao z1PfApQczv%SSzq~s{AWFGnjY#F-Z0=PR3L8#Z|k;@n#mDD_Cf(b$QmKRyeSc#%U0h zB%b0$mAkg4SJT0fcqy&10$rq;h1~-)(TNnSe>ODF;2u=%RJnv55m|{Z zM3nV84ThR~%#sU7egm-ip8IA#zH*Kd@kV9|P^=QBy~g!oMt|2G#{6HT&GY z$%R`AfAE41S|F(wh`npw2qR9gaN8Lk8%cd^Zv6}IJq7+u;7hRp=;+!YrKrDi+yenC z3SWLgf1%R!A+S)A_9{^LEyZmEqzi|$Z3V7}W%;eJ9Zq-J_nK-ocZX3~=ZGqO1{Y6( z>s$;m!s(P+i{ZC-mm87%g6W(ml3Aq^KF(h!k*rcdgTa$v$(XeAND{^ey_l1CB2zYp z)D{Y?;^vOZ)hQVOKcza$l8)wn=j{!RKSl+rkJYYAM5ha5_<1jikLwI5JkUc$9r2Nt zaWGY?GGfnTR^9JGf$xSKP6Us#^xoj+uQh0ShN|KmS-k94vv!?a z4h6VE4?}u~)sJow@0sVZ{Sd>5aO^dz<4(#z&T^)33))%`=sM{dPhGC?h^? zrJ1F)Ah{AF1cb^}L|1)HXwPNAw9ydWdT$i`-!fbQ`WH-?9T949Ar7Mky|F%xdL?%# z8$H{m$0ZAe$oP~~;60{qOB`wHAih7uFUN}he}?8D3(%d zU${=EP=wbe-dJtL2>_pBZY`ydsv20eg1GO7nA==4iW+MBttCx_5334(5RkXJ4*7j*5bS-V~h<@{bvWpRj4A8u-;=<@UIhg82?LqR=M_G zSP!u_@?;4#WUAC+HX27D(+pT_?nNCKZi~$oZ;#g2PI?RX8SVo2+hH2-iR#}$p4(Iy zNRU#J`z=;)Zs>SYYdp0SZ#bY!QK0`-PgC_T%-=aRil67ci10M|2ry33a@`;=L=y-e z4A8+0#%3;H8GH;V<3{^^+=53~r+~fGzHMZK{syjM!{w{za&q*_bb{70JoZ$l9Sl(j zaQ;$}|IpKZX!zZ3g5J32zCu@|7x19$=vpEl$o06I%EAu10Xv`O(2zO|z zc|gT8Rgi^j3=`ZcNIBu0=O3qCp&)a{g-(VbkLTH@0;2pb&f^k3CR}b@nTRs7D<4p4 z9uMvP#lW1m@@R$B0c}mK*avC4N!)95<34>3sscak#_A?y!?U?eBT_?k0QFLm)~R(c ze|7rftzU=7#2Jqu28nSdZn+ch=jT{0T7Q*%bMuG^>CI?W zuwVYp-H|92;-Fm9W#(cFRC`5kgLBQc1PX%4rsgK3cPUT=IIbWeMmD5k?P<<{?yuUW z>+Hb6xm!8AEZH2H*-{u;+!$XaNRjLU;cZD^az#0Kiv1V}UagK;ooUZl=5-~!#cuyR zh=?bde*fpJ-VFDAY#EQJewlG8O?5p*O}eC%D%bLOM{d-Q3Gp3Z5h;-8ir#c8DaIq{ zZiO-xt|R$<9^~sPbM8Qr3wZ|j0uxTGWz|dZ9~{%zs~ZYzR_RyqVk!K#e)R^j2_pg$ zTBO7XS5Zez=A>|k75$SL3ME1)) zR;QQJKq^Ec9st(G9Wr3bHLRWhqgXnEMNYjRwnkqTwRq=4x_a!v46y%K5nX>3H(jW_ z!4O>hRhg$*?UNccbs!ZA;nVinJ_xtWUErR~UqdJ3qmO0IeW;#=u}DHV<`2{P8M33n z0N@O^M0Oo^xWrAMkRTTpVk(bHWt4YDAjXl6f3HXogDG7@9M=;=r{mve*pV=QOH+O8 z2XuSU9v>AGg>|eYK<8OBZlqUVMwqg~RGDD9r81W7K2D{PAZA*{Q$5!qaJY8P5n|I zA36I$U?`mgrz)@v5m!+A&Yy~nbUYA1c{ySl?2Wn+7WnDpE!dT>86y* z^OsN@yYkCi!(Wyq(ckTsrndU#pbF`q9;^YAFNZ-$nlVM(3{^TSZSkkN-;ILM`?fd` zm|q`42L5%g*t4+Icd$r?!-g8FCl=OFGaB#ib+#cre3j=D2cjD7zHCst_Z8X3w{meDIe_h zb1()G%V;`B9}9bK#m1@dmRTn#I`A)g`26ltZEjEHNnU1?!v>1&b`>>^ij^G6GFwUX zJNR1qo_UBWYOW-^F#e8hQfbVc)J1z}-!M3N!3mnO40JTsbf!jcU-o5t+Az3w27A4m zr!rsAGBP6g&Oq3%28 zw$vUAW;2-$E(eRUMIF5Ye_$m(FN})kB-wIY>Nq-&(!D8z!q?BuLd+O3Fvl;4l--X9 z(fl4rC3znrED<8d5eee}#K8ta5LFc`BMk9KUvnPaJdaE1q8+Siw|YswaGXB)rRgpL z3Hc89+jsA4`RGJ_Eo)Z2QY~^n#w53VZPP{Y?m*57@;sTo{-ZHRzXzof%l3vy@92Or zyP>J4?BX5({V0-krgsQF2#puj8a73s!RfzcxC-8+wf!3ZY|KbF z(pPoi_Q`MY%?Mlt(;m@zn)XGGOjRpCwem^AVsqfgbG>SbjxheuaQ}OTQ+RxN&yq|L z*6_$q?4yInVYBFKvlulPLmVAATNkSYTrSrMc!+0yAbo*M_1qZx-sLrpHcw`aJyz2? zVzguhqT3_fVVq%lOQVj1G#mXr^`=PED_8a z{0%G*sO4o^v;oV-uOrxMyjXi8DhIgBVBKA2Z$TJVIOCuaKdRm}K9 z>l5X|B6)#&1-M`lE7eesUScNmx2)zxedFx0{ah7P{F1oDmyspN@b4e3cMk9W9>?<} zvxH6dB|7Yy8dJLk`wuB*Q1K=730PDyUwDziurGZLQ#OQZ*P)#kxm)#7Mys3PM#T*i z0Lqed$TQ>gnVnxsRA*hA_iNm`OunG8x0U1IHpiML0YdE4^_UOtDk#o5)Qus+G%s`g z^%}Dm2=t!}O3Yr|LGiA5PLA2e87wA&=UG{2EeN2(Q&7CbTaZDuvn(0)!B9$s~JwaoInDTX= zlthF2;XRMs8=rOSVJlRR()67H**6b>2J`M6 zq1@vAqQ%6vnakG~kNmjMhWsXITZfr>u@S_ZVUi{Z&rw;pUQA zA-a%i{&_yy`SNGfS0yHIF!cHJh|Am00#)nw2=`U;jq#x+xo&_~LN6;@#atBHSuUZe z6&^KGNp*sXqF7e+34Wdid!vh$&;s zyRK-JVBq2Nr?R!p(N!IwdlL*qwLzkR;fR~A@{;3PPcW>JfxUVsO)TBhjS_$B=-$9v z7SMd-UP)W9a_whb_3}S>21W!2DBS_4|)@LQ;oqY+ulJUB; zO}ZP+LW)K4lg1k&tf)b8aKad(Gtv7xD_?m|yDC)&|DrTP#dKQQ(=ur7J(SAoRPe`tC zZ_z677n_qI3wS7pJQ#?902zmkK2#N^4<3A)?C~2`w613Bx->-3o2S>1ENBD3m(s9f zd3{z87ZP)zSm070zArMw%V||oj^P9zBoqSgzpL@P)aRa9(O;{sCkgqZX3=5WvC$jR z1QogSBh-Onad_f)9dsCmUGB_mDZ6@OB!l=?@m^1hVTO2@)*OJCLaB5Y@cTnns@>Wc z_Y>`>nU8;>tJN8jYO{^?XhwnS3qe(9@fR(ICTIH^QL`6hQS1fIUfTlfq4&}j^2Gps z_;>8DP`aXVEn9q{{xy98+oV>5HJIVu4vYOZZ*hp6#O|XL$6ZferoCvQY zygFQsHwPn36%KeIc+F_r4K_Qc+ySh6Fe-L`l3%THqnN!9)WsFBt2<|hycvMrLFxa>2oR^O=dnXKLhW-ot4mS zw*XgE9Vx`jq<0!xmEGmrVNxD}AMtl515wuMx!;3vZh%zrJN)+U_p;?L)aDKQo$t=of0|<)To^tVz zOpUOD_-pi1zoIulvA9;ISJ?=3=6m(k0Mx0@9**G5lT8xyY-RL3w$U(0760*5qr9Jf zHlu$l0R;uLbFEBUe`7DLf6ZYMhuGt18wlz?cdB44`uNd$uz;;hz_bP-Gj=5Fg(gVl zFUB}DF#{(Uy`x6*xp(Q`-v96zr$BEh+a-x25p8IZ5Fx(wj3pX=hI(=#7jpC!Xq$FN ziyqfppIL}s+(B@p`;<9CnCt)Bt{&F)0{K@xXvSp9h@0RS&gD7LXj#O{43)BZ^pEc2 z?JTV3T@s@N`{M{oy_)f5%R0z)wwc-RU5vc z@ZPpT)-Gp{;P7wEBMW^6y1ag{IBs!e1^er>!|Arf2uK$l81gRt*75LQ6g9L3w=DMG zGF%})QeR@SP@s3I?_=|#dWbuKQZ4V_{L|n20s1Y+EYFa!D9!D;Vd=;z&}@{ivL5sQ z4EMihxSBQjsgf1}3WRJ37r{olW#!T*F2~1u`pNSP7D%IeAT*OFrk#6Ni4dU-qp{r) zi02p;$PJf$j->1PwOP{&jKKa&&5X%zPpI=TM3H+zJuM8GVv#m@#%7$wB5!+u*9}V3 zb+S5$niFdwT)DK4YCOj{3e25mS)J7iXJK54h z_C{o2!hI?rULY?lVW_~6;HIM@pfV|;2}io)bU3Lq0@?-TMV9Fy2)55Kf({H=;wREmwUhl zgH(#G37zvhmCmPEsLc6e{7l;HjD;N5w;|H`SW+;FDJ(C}nzt;f|AznnC#bj(m!3;<^wIdJZm_oD=MsXW%g%;_TG2a=_h>)$VOxvJiLe^Ds7I%xgal0wLjJX@~#SYQ) zpz&3vIWI35S;p3;@xJaV>x9x27UD&cv z6Tj4KiR9(fIvK(-MNMp|<`xgP_ zN9eW@`dZa8f>NC}-WW1dk?P&}zm910Fx|5~=SgYPM5&7d4_(wmYo)flSH3PLH2&u1 zR22|^g>!>MNlDL+{!n`N`b;DI2IwBGm$hRHaHuf%y^Z3$rU#G8*Z! zbiiWYMi8Ox^4q>vE{K=w;!D@HEDD~0zPY*o+JbiL{2al5K>U?>k3Xy z#0x>|t7oOA!a5zK*@L>!^S(I7TKceI4EhdQN<9_Ip2LkY89c6N9IVdXL#EZ!K81>* zzb6=;1&diNoI26UbaTk4Y#tSZ+T7(96qrrh^E(wBoEA65Akv`S__dE9G+;CS;>bU@ zB57Ztgf{u8{-1O9OZEFm0O<+g=0)yx?*8s9lZTn?nJC^-F{ z&ma}NSMF+vNkFzWrx~BNE9gn_nuT3WGa@4g^6djoEwFk5pV{mN4!N7p`*#x|(|ue$4O;A5UeM%9vs_eV%Yq|hz ze*9>g-C;EgGRR=G>(RZSs!Inj#f}_^_IjHMIga^tGJhoYT0;57-PPeRKmQh*S@b=) zi>30;q4qu1e+dZAez{Jr-b1EGxGv8h;QX{{_)mbKF~4EX6r*5mVqU0Hw3sc0$#){b z!<*^Gk)yD!Ab|*qB7f=KC{SbFue#ON1(rw);dA!bASHEiIJ9NWOLqY;{72)?PX45U zlF$({lHz(>j4&&HXhHfyRw4;p*d2fYU2M(p{d|ZjY)p^GUW@(Mj111wTUFFZNZ5WK zgA0t!_L1!!e)6md@Su5BB4~%}$121XZc1f^9pGtqZwDDqNY=={^QBjzOF?o&_AYI| zA(es7x&E6+fOu(tK8PLmKqIx1kxd}6kDuMiCDu{z@T{aw&T|)sjSUon0#qZcsE@O- zn8mCp!Ph)!4cQrzTdaQBP`)_T)zC(8V5rHp1`{&Q|Kc?J516#^Ji3-T?-}jLz8jUl zY|$76?Bib!3{sh%O%d}I;3FrGgz4x_=2V1PVc$t~h%6M&Xf!(mXY9WY<3na_ z7Ab4{`z4;F?Ute}=%+<}Cd7R@=k?}jy3HAoKp6tN>}x7*RaB2m&FkPMNaK5Q`j_&B z4V$M|VG;q1k@fqOPng_6B;#KUV@IaME`FXfob_vw)i1C#vg-;Je{&oQ4m42#(yDE^d-ZJ`0! zu|hM&igj&wOg5PB&SmN?hD8r?Hh1iETDBg^JI-KfD~X_7pit(Qv|pJ=Q>!HlA|+%3 z+Jf{v8uwv!k?McTaQ<4>WRWbwODptgb$+OwG=>t*arBWgBKxOSfk9$9=vDl=78=|} zj(;;9V$?C5gRuV3aQ}OTqf^?cUKn#@zu&DCM+!B@oZLO|c){peCR(w7jjw|MJDSii z8?e*{%J!DL5vqal@8(gc*908c@Grr-M~4}Rcz?!q5d7Oi_wh-COg<(ap)5?odD4%7 z_bxRnMNW; zTrbAE@5r?y@HcTOVzZf?e}e0t>sgU)jZY6%Zw0g3NwV`;sZr!*RDP!%@y|o892{jF zX_UpHRBK;)V!40e41ZWHXmOX4jTX6Q_Xm)v+h`K z{=~yHonf=36lg-PC6x9;Rq3c^Vw7Qz0A>YN#Lu0nu`x62Py`>!yC9r2K793UCj6bV zmnJQt0i;G7RN#Ii(8+6D?H~|yed5+<)-CO11?c^xMqkrBE=TMTDCTo z?BmjskJ$pl%VC&W3S9(~K#?U-c{WE@7qus%8SN%3=i5tVK`3s#*70BE2`lbokT{=# zA|+lt>*lyDWtB*qW84^|^pr2-yN>ghbx7$m7?Gmofw{^hY+Psisb+s_`qk61dr}Dl zL8S;-i$$yf`h=VlE`7qMr*d8O5CfQ2GHefI!qZ`y7~n|glKFHx`BMt&iO+1J7<_P8A;Xg3A(C6$tkH^25Q zd;4zb)J%41H1Q^HSX`XCLtyT^B%4vIAm{h z83AMK?ORbU>p)Qb-A_Psx(c36_cDpAH9LOF=kSh``l$#ai|}`qndVcS_5$?{fs=KnPhMcQ?^Ab+6*ah>Z12bYua}a?SraiLM18QS8H;W5I=v$xo zx^R|ucYqGG1_X4IZ56tie+LC+IsC~2cTHVX`>$UH->8Jw2Y~F^Udui*GV5!2ol={M z4;<;kULksG1%Y$fxqz92G?))zu|t);;bGxAXXDEfOFR74c6FUAXHrJ=e1n&%4*p)Y zEQmpQD6XMTHHtOAw34w(^{@O*8qedC8&}2N1f)YK1gn~^VBDaTJ6oR6B?mG*<5#Zu zdgT)fvFOY)ICOXGOtxs8Y5lGIqLifg9=bJq#*o^pqs*4=6HcuIa64Y0ktc~@zQYcTgOTkho|!q)vJiU`TApAkv+wY2f3K&|$!tblZz}QATT)GyQx)HC7=6 zr=%_5THfrhcBRB$saQDIR;{)tjSr_!P?{{K7(FTf_VPdB>}^&$s0Xx3X`I6MADlN{d$ra9&o#E#%LOhX2ore6@8rq?w zWh)P|Wm*|S45#6rC{+zq&maoe+$l{^2Qjip7O;$~?e~H2wza&9retds+Uu^H^Vgb~ zL#dSRU>jra<^|!;ub+Uy=jltn5nczCi!OEVicszooGo9qP|6#R3_AT6oc z8|XE|yABOMDAsCZHv6-B3B0 zHTxN)ZGt#`)!CV5Lg9wV1f3z2Q)P zJEO)qy_ES8ROekWP#UU>II(&{^j-WD1l@vZ1zJ=%4@ig6KjUMdnx<;{;;Ks9Z!-Jd zL#XBLu(5PtB@%w(Cx?uD;A0fXMbk--&C}1SwQJ1qmp7@F*}G_w{hS+bpO-x@==ZOK zJc$<5_kbUM{AYLZ*%@}_}Be89@~%0yyqJKOxHdWj1~-m zRt;70ccW~7f5XXx_QJAZ_-_#S1rQ9ovJqaF&oRbMH*b->>&fXFNI4Eb`B*r>pTGJ6 z0u6~q3T}qc{>GP~tyH(mipw^$8%65BWw@|B9F1QKi00aO1Ipz&K08>c#HEQ)?nr+y zLK^WOVS+*+xo+>yLqgf%>+&Uwj8L%u&v5^HhWi@doj=m=a&0MZ>#)v2!6F(Snh@4| zesE>Q7ff#MyKaA}DVdcv9g z_1l7>0K`R1ww;(7lSnUJt>J$?@euJ*j}<;%otY=1?=yEo;x}+^FYRyL8!JB|W?Vw| z0Nz;R6+B7N2db~^%c7~(WwYu>rC&m@1<2qvwLJ1ofL94*|56UeV)uL0%2d(@M5w^8h}SSt*5A zH(61HC6sAVekr>RYw7WjMZaKB#9l>z!9)jq6@oipE{i$?=sw>s_3?SB9%xGn9wqffKXq0YF17AF4ytsmNIV*Ii<38y$w25llJy zc~+MPtu(wq8>FoN8Z(EV__nZ?^R_WX;!56Tuu4xdgOZc~BU!VM8vGs_)@iv%Ts~=^ zn`r3P;0kP$t?~zC>b*s>u@9DhAV2q5vY7v4@|kbKVpty#ES3&_|X@n>wz;X z9&X;hgR)R`tx^zJ{X4CfQW=ST1}%j_mM)!Sl8QR(rg5^jvb3t$9~+9^e_uhTy0Yi< zQW5x!QX6d@6yX=AxE7hcJv;;H|6RVW*Te;K^MOkMnHanleO2Roe$B$$KK;x?pJ^sd z`m#3lV!g3A&);|B-~+}unXXcQl6yyr!xwO>wCXo)la90M-58B=^iJN8Zs5szho6bU%D~!N=@unLV5D7EDJ(h(?uI$q+j*-9`sCNT zy@ml#ED+d70R1{9GdCLHAhxWxdjmU@!BE;`T%MK3o-|+J0yYdv9`YkREGf2B#FnQi zJ#JzbXtB==hvziqJgFM`!8n+kgTCfO|Mcx9DUuWu?fM;BiS)*luO#{35ie8$AlP)L zyEBOd>%qHRhwz2qFbDhj?>>$vkJUG$f0v>+ptF@Q8t;dg{y7u%^j*p=Nk*1JQ;ycc z%gkIf{8A7FD8gOkLlSR9DCFJs6kQT#$|APLX-vxF6f@IC_c=ocgZ1^tp(Vl>>chf( zQOTy2Z{*LT52po2ZTNIjUc~kw`$GqQc~Kxc-e_Dy9Asg$Ad(G=svvqd3a^7N%5$m>4055JC7 zKAQ+OtdD>aM^PrI^05^TlW6+^t#WRL?RiE#n0A~yUGgpPkg3X}^@lugp{X0@8JHu7 zdr~dnYLewR!p7F6Wf*-AnVj;_bUy-+oxMH2tdlN@SXC!9BB<^p=9>3^>N#~jLvQcF z$9w_5(?=YOP6AfFg7UFCGvHp<{KaOqd}m99W}ED+%*(+z=42LMWOo-*S+nlZx9M+v zd6M8=Dk;XSF8zok9wJcnz~<)sv)75<2CwPQZpZzDVX9rVTOi#B1~Z~m{}zxb0+{SJl@4W|_I_@c-i1fovtqaxR2onWqo&68JLx zZzYTD@G$VQe}Ix-9IC6}U+qvyr>YAo8>`=C@YaM$4_vodWN&{~ae;XW(`Zm zfb(cZ69rv=^nhAk zn9JV0r~XWW=Vqe)x&b98Smwdk?KjeL>049h3mf=!u27Y|D;;iSUx$Q%r zBZwU$GCFOL#EIlF%)A!qB&rDp$$We)4n-N6t5L!@1Hw4}XSn}8!%ZLO@x)ay)Q0!d zdxn@)^Ys+YrCU36F2c@+qNig=0_k&|g9p>_OKoEYA@Y5zeebYO!!ONT<@!fG*yOR! zDEKI!yh&2#T2U~yfd>!DiC%cFi%Xw#)jeWq9lQzIO$2wtB#yFSmb6(Nz*doVIvJYv z=XEO`*L#?3#hS?%k|HByvq-jxAcTzqVI@K$H3!~`D}t5N8chU5q<2L3$^JQBeV;i0 z=_uzwoPb!SdUh(`h4;41_(buRgn9v-xp{J5JFBJj`UeLfB`>8*uY{Jf7zNYoN@Hq{ z*;!WN5giNvR1j5P6pIexXIqy;wp;4UD?&ZE>Zk@nT(k;CrLLBo7>A3RkHmq#ElrE{ z-7ew)`a2c(xSy{+MywX!KJR%AhV_hoc%22M#FMm=E^Nl)>sG$(WKPg}&xOpy%>49e zPG%JpF8iQ}U*%zDH35u{B8I4=4y;Z%HwFk*pD{ zL#?=>4l{}Ce{KKEJln#*pW|0SJ*c}=KUgY@Kph`|yD3&I!Vg#MBBy27+?myiU=i==9bri&7gY2HqFw1p*irL zNIWLnSY^H_0Mwv3aZ7s7CUFy;TiQTGa%*!yW{n|!rQ18Q7v{>C0&DtbYIUNaW|i~A zdl4+A7!<2Ch$WW7b@nF}r?`I+fDjWxLL&+72`pdl*pwNs%bOQniZ*=AGHZ7>p)7?m z5O$ZFXy}E+j=nBzqfn98K&(v(4D+EKn+%Anv6!&{Mqb@dd7kx|gfTe#y zbh`&ChxoRoOf!%VeTn!{dY}8}nLa`8krRF*Z&6`XUXcNxa_((TN?4aR?T)y@o0)^g z8RM$@R6d`T{6@lf3vdE<(X>{VcxD3!ibFN<2y10ovh2Be7v>o(1F9Tyoi?Cv-V%Pd zR69-RDP0roLv{-CXbuTuEp7IXJpM;K7-^75;oA4u{aPoWP2lP;q4`Kg1>s?k!@l~! zGTt}Bj0u)h8~KJsmfnTJ=(yf5B0CJL@5VT#r$DXJ^f8)$!U5ccYSN|3QX-dJgfeiE z1omyleGjSZbc!6gL?4{{^?;RJnUPX?VGtMJPYHOODyptUy>n#FRWeLyuRebFI$%+D z%^FfcnW%wGOV6EJOeIT7NUl1}iOTXvtV5WF4y>@*5 zZ_AS90CSqsT|Qn4^P~3AOYITlTBFO?k~KGYN1J#cGe0Iwj3LmC=gaGF#>7*qD*6oT5(;wn0Qpvni(Fx~L`f(qBMIvm?Kno4y`8|1O$i;;OSa z_+^uOr{Szj0H;Ena1)FdDiV<&2SDSH>=nu4Vz+#9B!%u1)@~j6@(YbdW)u8nk(*QL z!Jcr$SvJI`AYuxbrt@w*;B3q1-NPx3;REL^4Z+j;cB%H;XLYnq+tLn5@}$tK0&QQW zIj1lDOn^mB@YimCI=q8y#InaN6#rVtFLcs$X zO3~HCuV&Tk%%b1=HQZPF>|S0@A%rnuP?-=v0cGPYWnsXz2r0`|O2;L0TxjU?{p@(> zcGaWnd0vnmV|#)2jfrYz6S z4HH*v!9YVoH6mGEjeo0ubum>O)E3e$n8Dq1J0ADuc^8|^jVfILQ)Q8s1bTzr7{`dkVD1Ln7}^pKpQG&vMP(arCGbJogn8wW!$zHGSgbsNqMM^#Cp6aOv4gx^7DtDnOi@%A44_YU|3`s(8mwh+L>C*{a`lEad(Ll)Ig1cN`ql(4RKA3%Yrx~Pvf zYb9`5S(Ys}a9i!@E%AEEK`z3m(9rBy_x%9liA?v4Nf`_x8MeWZ!W9~R7l(6*!)P@6 zXnY?NFRXFYjoCn=P7dNb1c=#Y#t!;5gi4{Qk2aZ{OTX>}>pX};YHJ;b(1PuWQi@%C zMLVR$DD-m*jdAJ)L28yg%fJTSre^$cZQzJh#pMtylKP!!Jo1Cy&G^`gtZ`IHg#m^b z)@pedKlsAj$OtcgN|_%yWy&DIxMKMq_Rgt23}9{76I+d)#%LPb4H~0q(AZ8IyRmKC zw%MprqsC@q+xfcpKUfFrY|U?&xvqO=-sgT4Ma9rMEQW^AYQnJ#kdCh zSo5w?L(=o#(-)*F%JsRM4563-?XAKRK;O~P%p3g_Fyfvo)ctGl+3gAnM|SWNv1nx} z+5|KvsB%Bfd~N>xi#P@ihKGceUk*KIu+W7v=Ied$gj+8gNceLyN*o(R@4m8;DYGJz z%n;B;hvZC04gXwOKw9h!Xe?VeX=*58>zI`;hLNb!JTH5tHYy*WESV3OpYs}k@R8!B z;>{)THeVZ5jA{kiP}pori@?kK`7zaIxe7k;E5{qrvbijfruOGbh3x6W&Q8dOTt2xN zqqM=ct9D0lq1tF&<6&SsPVL0D?p$-4gw~*ay4k%gNpBalIU)_j)?sRVxug*`=L&|Q z=vrnw;qJIrP+a5i0xEYXj937ieFkj=YEwClRYqz#x4SuscwSAnY4FKCx^3g{$9q6k zYrtIa<+Sz{PZ8IwHa7o=GmZ~+I#)ixEJu1wNdjEu`-o@e|CqIC78m;Pj_2a*$F}mX zAYPYcY5I7A10K|K7Bf($08l{x1LF0E>~=!>Wlrj-pRCVF6?ZNy`LpX@p+_ zEsi({qs7ZGCaK%s_aa+&o!C_2^@A#o{1}>;aMOdYcx6Zp0cwTlCPl^*BtyC-!UuF}FVDgeWK(q-{V{fOFWcn~Qs)RzJ;jrBivOeD#7O zz3mzJjj8Y-yDa5IVC0e)NvAj&20bSts8X;Ys{PNo7f-j(y9{}{FQx4CpoW`=VPe5y z=vBDkesRCsGmx1#F*RiIA=d>J?Uwiz{9?$vNRjqrDbtZ3WXD*RA7hDca}?fd?$Zg# zlo@0JpUsD#%7YyY*IgToZ$HXCD54w~2BV9?@iDxAUcKi9YZA2-(~xGeaU;GF4@<|# zU=?*_!b0{&pL@ns)XN4SGNp}4U%xPZII}Q&dP38df2<{=@;j7DcI_{u;XMZU7{ri< zm@R*MrFh)_81W2-*4eX{a7?knH~zs56*>gPaE6oRLB#u``k`GCu6(f%Fr5kF z>ZS4JPd0(q(Ey)I@+Nho^CX}P#5Q=9v2^x`_z|>psGf+=->k8@V5ct>+h1AoZ^Wd^ z`0;inym(Z)k(08Lq0N-a>3w%KMVwU)N~=YZvpAq46lpPnOOj7dB(zqlR~bBJ9K|fU zcVuiHX$cU?F$Eg5Rcy}Wk$E4+r!GBwlm)FI|``lmHi459wB)*XPCW zepm=pCOL-dde0O2+%=ByX?_EbCfFx@Z@Bv+#^!@$sC6r6pg~WR>;6B zuN9FGM-O^?*S>Wf2t^W0`N}Zoyx9`&1pL_pkrA=fy_MpY&YPXLl8tGPBA*64@fT3# zQoSyILr@ljG`n|en3G&~DPKi^{mM_y$2u|NlWY2ufG8xXC52OPaI@2{1y{+bBNxr` zQ}mta&n>hUNc_}QD|O>DNlz(2Wr?({CmNxao4+rDer*GJ-6(9A(LSS=G(zlhf0+mL z+Ji1STv;^Xlxv1f=M}H?p*{?7_iwu2f26k%Z7~G1_*5TIkwP?1MGpy(V|F(n!~^hU z!3}Ya1GE3x;K_f=a5Wj3G`%C`ET@1rxXH$^5rk0`JZ-l8lv7qK!0l!TVau92G!C#j}O=y zO@n@I*!3-eGIY_A12!v~JKj$=1USB|pjYj^g%Hedi8QUHCYGva48FLb1+$>j>OI?n zX7%Gn5E5M8v4wT3G9lalhPW=@O;r?dH}c0#h=D;J5*jSV z{OuilEUYu-1|L_#BIXi-BFY%I8eIQ#&0#Ej&7T#2C?T$6Dq5ddyH_mPz~w|xa4`6l z`W_Fdiy)?vMh7kX)z}Dq!#s!%kqMR?o2nHAKI4!#C+H|zZ3+e)V4-rkY07`ec#~iu zD?qFteo6o@rbT}eiO$H&gkC_li*39f!T-X^7`PVZ zj8!y;l0QpCAt}X6+j}-1rj2`9^xj{1n}0Rb0TwYV96C+IkqNv)=$qWg++)`{X zzB^4Gq(E#D09V4Af>l-Y(8CC&`-@*T2KZd=o;nEASJNT2d&Kt+fPz!j?sX0KX>Oit z0pBa7x9}lLU!dM;Y;W&|x$<9+!s>6Rk#GhoRStpa&TXUhtUMbu+x*inavTkMltZD4 zVDTAQrwJEgxxC*JDIj0q*|dxdaMw{ND^u&8X2ZRJ!q1lDe&z$ZzbcpFzF4S;%Ue1A zp2EuTu_spH^ucHYBha6Vpxt{(e0suFadpb!TlqhHbB}RP8P}Guf}F(xlR|1n88e^0 zTiefsUMmZsIboq<>$a#7BpM}u>lsP{@Z{w!V%)DRazQCCor=~~2pB~P8Rv#B5B^7j z4eBmH7ojl*bLcrfie^YDcipeN`)qp>>(UcGtr3>w?xhWE&BGmjf5nA;M=)~75axZr zc#w>cb=hMWdd^n5SA_yT(rEqVQ= zt7zBM&NA5JUr+S%GAxDQl6(bp=6FdxF+D-52amG)*VvvPms$0b{FYxtW52bx{rSTX zX5XCRsn-i6-#vb!+6mqf9?g(JU${jw<$)9NQO>NbL4&L(Q$+_xpTQ zX||nu)!b5?=dsLhoPMn1zMX3P+YUw*Gg(KjyO7DXkaM?S-lxiU%VCzaXju;Ic zHU5mG&Ic`aL;)6MW?{y@^-{G~3jqc+qZfQ65fKaR4U+2}(tpLYLZH(6b+ANZMq@X! zbw|C!N|Bgx0)y1iF2&o4#UB$nAN=Uv!D*wOWcAGTJ*ky7nXfnfjfvO#F+)taxF)x( z8=#bLoppp%kUFK;-AhUQeS4aDv#F0=n|e}bl#rsRx6g%cr50tJN3RJyomF2&KUiMFJJhNvIZK*{wuIVDihScN zdC>XeDW(V{9gdT%2~O3{xdV7|I@AZ08h7{&RAPhDu{W*nQj|dTt`3}Pk3uhhWcP{C4_D&wbvSM$TrJkk1N(TAN9?_GG=v(T}a)l%S< zo5Fn5Y=COXfV9BlbpM-zz+kjEpwcEbtwd=>W(D+?&+vwiWB~F@uLZ-S^gTp+2~@0(#g&u(WPHh#TZwc`*QfDOKY*>gI(!zF4 z2H-t9q$PBN>UNW&%RGa8(r}CEw5JN9d@n`L8VuQ-01FHQ!o)YjU;XE?4U(%5m=z0M zv$FR?-USgtqHyq{zz{v==a!#ECM(Tcjr+zss=yn)2_s?Jg?GUVo)6O(AVL_LE1j+Q;Yr{KZ?{jm1hmm8E)OrI)xKeV;n%MUrCk8ABQ zM~U2Ja5$8Pu6qLwC6qsZrcMPfmRyGsY2yEn;r{#Ya9gsr_4c)aS-T!Z*%cy2hgbv9 zF%;U)e(;e``(}a0KzXP=u441{*D#;psISomv0X@H+B6bdwG<;O zBCI|M;2ExLW7d})Q#`m9tA7Io?HdnF?KU`9V-$O1EBDo4Fw?q|E%*f< zgeUdwRn0hsbE8azSSkUt0Wa=JOIk#gMt8z&We@}y(Ik!WCT=jeBjMe8Q~2QE&aQm* zxD@fFm%Ay$pQ3D!MnNyl1fNtsJm@){It5rauS0yKN~wc}WXu`uXeUNch=o#>%QeK? zdh064H3m{U!h7$R{GLnXgmqiTO?P}n*BYnC`3R=tbSnR%6@h-Z>h$@aQ+W7W1qizq zDowm0qPM}a-Y$j{>5qmPhT!Mc2k%Nz7zkHaX`7xExH*FLS(p=2zTH5Z>u^?tpMcC& zU#G`?5uT!F3uZD!_8d>YXJ5!NSZT^aYC=$R4Zy~%HPb!BOsvT*ru#d*E>!$Ah=1pZ z4)@rh0eSJSGFnUEY=x%j%mGK3irXhS+CZT$8Rz({grk4FV~xLo#+v3s#T+Bm~8B5^Juo*${mSxCP6ErpY%D% z28e6yl`|0r9n->ogK^@f){~}^o#kQCr5+U&q&soI0lvzDzd8Qqyi@;)B)`o(4K1G6 zUf39A`wcd}SEX7&5@gHPqM`cqadRAGYQCowgNzLMyngq@+$I^FX)_p(0P?HtCzduQ zDRU(-c4?*JFFo~B;}}XamJ63#ytt!gz$ll_h=+jF62ea0sUVzF@HQlwhoX+>*r^f} zZugKk;M;ui<}jg{NPRhrc6PPT+QfVw!3hdM7FM;`l|nLCQbm zJo%1l=g(IvEpub!SRpq6x#ao7WC3zIl|XSYhtmF3ouVoj07*$!M6(eN>eJ^ZU}1=2N=lG_#~n7I9y6~bg0M&sJ3M6qCUTq?Q(|}!)}W6 z2DnF@TufrFaM&V9VLXX>0R|tf@**%M?}NVRtSq-V0*7#ga@D?dFT#*|&JbskpG_ii zMUKB2AnE0XiJ%SH|8?m}58kV?-uX;y*K+Cd`u$EoOGqpJI=#TN?6{~R1(sT0XNR%W zJm6>@xVsobt}*pfH%df5Cn)_iur?B|1#;%}A*FqvhYH$s{+b!@DSUu4$*2=W@yVFB zfMcYH295EOJ@Z<3Sr)u^2(>U!R}IH{Ufv`rHf*7fr;h*J7xfO8!_Nd~>vK}UE#;r> zutLatpw(?7BJ@pcZ0U`O!ESLI&j*LX;1V&^_NF)q!7*=S%Y^eX5+o_^jHf0-pm4kz ze_$N1Ee}hHWO?{Cw0di&I8M`rsi6n@v=2T2)JXWehmG;7uz$}cKA{$CzsQ6hyOLfz z3OQF8VkCtM)>9bCztG!EsO+urx1Ga_mekQ@jpU+C;8~0|jmD6HAwu)rQx*lspbmR- zl+8CDiC-jECZzC6O9K--a_gs{JxK3w>G%hp^f32m^UY-Ow!;skpKt$mz;<{CwcGE2 z*W_pPC3iW`HSm{2rqpe!1ddt$Ba0%$BrX%3T*wMo93c{4e>r?unM!}&1?7=wOTr~z zoxDmt_)HG9KPdt@r5yZB>Xn=h95UyfW^{Ar0-2#EIFqUlV=bk={v{AzQZb0a=^Q|* ziQ48$;lag$9nTY;Fwny|iQ3}@V3lZ4HTgYD-&NWUSqLxBa#@%o{w;v% zI0Nmc6%Uw%?+e*U*fyEUm*$#YeC<-jkaH|%z7&aDB?2;Exzlg|{CZXkmqD$H4k21) zTG;2ajMEuj9FrH9#Xt_tCez0XQ*On-GZrR8(@17)LR!+Jhtae-c*xkO65#ex^dMHX zaLH(WNxim!!?}d#eMHO{$2#LIm-~`R2*$l5<0qAKNt(|sY@dtMksy9c7-dVVLtp=< z_K`_j1Xm{;91;ji@VlHJ>F5UCk;JSX>XZFGLSWq041s@{mSi|$XLH&PGUncG&EL!Q zuU5>#{#6mck`0Xcl9!kO(;c-CtLrx>BZoAc!eaPqvOx9uM)qQO@qwM>B$r44vw0hz z7zw@AoNcOT;6wu)E~6GNt2S6=$PP;#=Lta}Mbq2+GILgX*pFAq2ZWqlgn=S)GTbMQ zdr|XC@2!D=bV5Kug& zc(aD&So=rBSTwQ>k~&lK$dB_1={$^FHcdHej7>@u`XzAdTTD$g1360IEghmBC8g?dp^Lmqe@V)@LdA)%gpJk z!CGD8mi;a-3Lo>T;D=%x#&i9~RB24LeLNRus1nCvRS*Dh;$-Ev@ z1&;4WTeGaxT?2qn>>e-is-l=Q?La&ADH~NkMt0^$p{@Ov7{ongWdU4AAt zPXK+6cMI%&xKz^t`&pyPvuBxu1BW>q#zdi$#1I1i2{6is&Sm*1sxz{q>_;^+kYc*{ zB1cyp%+CGu;3-i~8z2=@nobfAN{rB&^$`s*T?x!f&|s}&1(L|DBU_+%fE*lhOQY~x za;L|KIk`-q_^w0!q@ELN#vtk|m96$v5HdFR-a~mDv#CBV1jlb;E^F$PdQp>|F~$cA zarHI`+z)nS%$#I7=GJI;P$Y=GL^ZaxYhwHs<(t}(*@1ZXuTN8`&r^6zqA*9z#v%@h zRPtlXM1)(Jz>>K})Fu1BdnWv#z_JGzp&Y0Usty`hHdhbZ|^mU+q?f2qqa77d;rCUW!me-*9X>WV<6IzP5SYVRk> zvmANV9c2+K#7|)`VrqHDoX#y4zP(`Q4m#JrOpuX8hTCsZUnk5E_g-G+V&hAhA1!mm;(M%`%KD-J5K&};l#p}ooY8p+9F)2C{T z1F5x|3nl{#Js~~#(}C4_M(Ys$^yRMq2Z5-MOb+ckqc=gHn79MWtuGBI6~U>@kk%)+ z@D|mjybmv}zvrl>i_}$!r6yQOy;Uy0X9GmPI*1uGcJ&$2CzLfZ!|Ei5A-1ctxTP>| zlKCgr5zy3KZ=)R7V54|GQswqyn1>6?jroRT*?PQpL-nm;4;WT|vvr;@+}du<=MvqD zdQ!&wJw{5ch}Tx0^wHL}7%-B-QLZYJ^!MGZSVY0>72kDN^pb8l%^pwfWB(QS1+puW zQiN@%*D%J`x)NU6+jXE{UXuPLZqu+68sN;Yz_}-y^Ea~Z;@tep)o@d?>E&e~X;m9F zYMz%`aMEdYfY)mjTys5t03q)J4;I(=I&*TN(*u@dmMZ2bg(zl3P^vqvX2`(xmAJfN zga%deOIT2JL!F+g8d(Qbc67%%_|j_5-p7rn`UnGTAxg@;pPx@H1~*HN{T^s?pvNkJ zEQJBl&6#nf;NW-PTP!15OZcOdQQpxUChkxCqw3w@DFL}FX|we1;2Al&b5{KYLm3Nl z)MYQrcQceXzeEEpWCaVG=+Kd|Xw54n{+g;5-ByD&D_YByjpv)DtZN33OBi}xdNgi@ zqE|U@LZxf)3*Q6AOW+@zf3@@^G^YVyDOtb63*6mZ?ME*O&oxoL+GHvC zD$}62^esIkF~~Ny`le4c$I)W`f?yy5U)36p3$;Zd&H_*cSi0HubM}{PAIgw;`2t8!rRx6V zcmE<_dbT$e&zmnCUQnSXMq&zuePzZs_5+5$*)@_3t6#-7!-=O@NaNBE^$rM)meu%7 zz(*V|HE?5J=7M_1gA%}$s@4X<7M)_tvyK<#Aa z>|TgtV5=K}r-RFHRlN~((ilry%t|aNaWq#AU^ja?dAo|2N-5p@M}igFA?7-jw>p=m z-@7U0h?#8wo<)fOLWpQ*GAZ`S5yT4?Yz{ewPxi;KTaEPwJz?>{vn{19cx(H>ajOQK zO9mDfTezcsJ3NUTF}LOO(f%#Sw=5)4SZS6LrTv-{?l2KkEHSGYZ(PjA5{%rIAt_zQKI|)l}a$2$e#zy=)4I5&@F!E*|gL%sLczNgIkX3 z+M8hTepFkAUd;-ma>~kpQybe%=J9#mKVM;9M+{1+xg5@S{OGCcp>W5R>S~$66?e1i zoQvkbNu>@8bb8r%&lZxF(a9Hh?PdH5;V?QtaQHifZ^%`lCN{}n)^xjw<~idz&-IpzXB><+U1p-1 zHG>$^!h24%p#t68Q33+x$&8zPTI~n8*U!CG2rUH4CER^w*p^$9?|_Y#+FpqUeR^2J z@F%EKS9)XBi>#hR?^>Q$bV4HSR2OO?!Ur+|(+t-2B>hi*9}j~N=`IZ1+X%jxR7n3@ z8rsWikAo_p6W@2W(7n|{hCrKacsCSNCFf|eUO0q)5U$we<G&%VM~~d8qeIB)Bcm^-|`c?QDh)e z{}(+`i`gYfcdUT?@jH6MCEyxEX54jMLUj2=odkHqg^gG#E|pj3w$w9equPUV4V!Go zd+kk+vUULz?ZodKlN_y%4`cY4PVfhWL{s zeLH#Qfq8`vT_W6ohn~y%N@TK`4f9tUXzQP8D9TS{5B%lro7|`=T%>y+^trD8Wo1xR zyyrVI(7?An^A%BF3Fz#ZG=VD&Rp>kV+W3|Umxp}OyVfQS4lzT&cmH}xlPJO>Wei6# z*26v~OH1+fh!p-syMQwVWN>oyUXG=I7o%ssG#8O#TfML@*NrXn~4=8D^{lYMN{poO!B-wum5>5Y8_27kw4natD65?5f`CD&RqtpB7 z19ILjh)3F05u_+Y@(>%4P;0yg7Q}Orwc!z0_EV^S{8lw`yr)J(K;ZU?YJQtx5(V>D&6o}mQc`eO<^&y`zs%UY6UOc2k zx7nRek%}({`7CNHd*GvBX&8zsHjl9jD(4?SMm_jOO=1MG6JET_v!fY!Rx$7Wz=nUo zjbJ}O93l>sW;{-^-I%f>Fv(&>|5RGN;3YAPvP~n3*+DA~On(JagD!AlP`-V>gPuRM z#qNupm5(@I@OE|RJk1u>FdBdu)_dbP?)5#bOSSlfpC#0V&|9c;vSQPA5f$yUUy1-U zRrcIAoOxs8%A@Mx=Z9D!ADOb$AI+r3mN8mJ=)J%zO=91Ll_5}7_xxvU)Z?L$pIMdU zdSc&Vw0C@lmC1j~aFuyeb^9pYOO^X#=5+PBXpk$gA_P)eZrQcm{#bq-(n~kV?&PW0 z*(+SCucOm=4@CcCxc@%GN!K|e2OAV`WYZq^k4;`Fdf6lFA-t+!Y(*j#V0Tr3A6`t- zI(<+YSX8ABgf`Y1Io}#8p^%3;Xk4{ciP)Avrn0Xr{d@-A=Elq2FE=UXuLx~22TCZ0 zUs40OtkQ7-!|!)|Dxqt=T&@;Kwhq}2f)SI{xA*am(+=1+7Ou8nq84MaTrf$a7{Uc{AzAL+oxJT0r*7CjkAP*$q zB8QhrosX^oIhzLFlYf^xQm7jih$!9_Emf2q*>8XfhQds*lCk^z?j2^2aELUsLy_U| zcKhXL)E5J?u_i#FBjQtUq-Q%prLTGl-^OAbRYDZQuNbuDP*0}HdBhEfn7HF=LG520uzkP56Q3HD*u7Pm zoB;@s1XDD-)?{XOBQewO<6?72oX3p+lwI;ra!GeBGl4%$CywVF7ax|K7?(DJOS-z% zx6bt24#)O2l&B0n|ANO8O$mGJyio|cxMG$Dr$x(x_Bv`FxTA1R7VD^gGvn!WlD-3f zR@>uc9JDJhNj0Of&9g|6{zxl={CROKNe}|hM`-Fa4ZXp1$EGF!Yo+}yOOv0RWM(P1 z_ey!>3ZM~+b_OlPD1H$^D4p##9M7p=TR0LtK?N@gwSfOU8o@F^2#jI=80?g>*;ajqs4iX3_`p zxQ^YgnEKgyd~1j$fyVKHwC6*G6o(iN8*y+~aM^chJ(?bfuZGLsqg)>EL6Sw5q|-wH zn(cj3*PeB)4we6uFUgkYPhN@v$4|P#0P9^8+~UMm)qrzkp(W(@K7x!K#;xR6fwET^ z!WmvEU{t2A_&Yo}fybgwCFh9MI2&!UsdV@0u*3&LrHH2q^g5mLQJNnjvF)gz5k8C# zqB}nxjRsok`f9Dc$}|4~28CMH!aka^D6GQ>QnH{zf1i5iy1k`_29T=YJ-A5G!IntW z#_+gB%xNP7UBy&HnYhn3-)+4r4&c+nTgH-pU-^%r7k4>4kEY76&Ew8zVyk8- znk)~e*n>y<*i=@EeV%+Y~v&+vIvF1kj(O^aSj$KV(EP8>DU)alets+X~&G{T`) z&)FcC{Yu8K@M}TT-Oe3s8s_+P(_%ndUyqKeM8xjL7_uR8g#1Q_tsCj3de{c~ztx)@ zV;T2eNPP5%Kd$_}L3iV~GU`31|Wp~G4j`Nnyh9dKHK zkE?}c3`KIA`Sh2FO6g66#7W(4RWr7#;;5hA1!Um(KJ)e}m3hccCf%CFGcVzg@rdu( z_ADkrtIuBk1U`YR670IVny!&_9M}k3yKIomp4{WE1&T-xREMa zg+`FMJ`vbKd`P-e`y7J0HdFHNie#4+7+sj^tfx5k&aPY@e*#2g;k`-;3$c4XW7ESs zj7tDP&&*;SWOlc26yqTO6Dau5VLvZwFiUh1QAi|nqb*>hEV|zRYkZrB;rm*NKe##l zYJ{_>`31k5Qs8pgis3w7%u>wS>r6Xpn*T#{F5tpLtW7ZEjA+B*9+YD$7uTJ!?nF@g z9Zp^!CcU@m2K1WQSp9kE_a0}6!=V|?Bz>%7E_9bvm@V5{KN$)X@LfF?1W<6|iBpEjCtn8rVA+M{MS_-Fp z4@+YvexF0ol(+Yh1wBx6mGI~v%H+WE@6qR#nD0?A^)fz>VT2jQSADokfp94SRY|PGD*)c@5%-@)_P8y+D0l9>8~sE6Na@$`^MHxSI^pDa7};(E!(&O2jlJ z(2iTodgw(D*g_J2SmD|z9{=7;j5AJkU_Ao4uX49ET zGx{ynBQSpDQ!(3H#r{u8gkyx%MxVa_LeR~BaeiQzg4S`INKM2cwQ*{3OF_2PIV7Y(b&ja#19+az{ zi5_BS4k&v)N;8G~OlA()_K-q1U@|_gZfI_Pwwqi(!bT)8@ru;0ni6sAN*(shq6p6j z=*C!vhx~m+(RYTlOj$i5pVZVzZG{e@bgv~>f?k`zV0b|mv58yy1Bdepi5oW!PJYBe z#CxG%!~@AU>LHfE&uL^H{@Huhas#Pn2mcSqdqovZR?L+V#v`j$Ch=|Yb`J)p9< z#6AQY@0?zJFztSEU;Fpv)6$L-JhlTM%|7QfcJoUac%(?==E#nK;75(dkzg2^-FC0! z)w~9uW8H|`lw~l~XayqM5lL`6V7ma;Tg98vtz9Qb{Dx2#Pxol7 zveYTw5X=M*hLJdK`%{qeZE2Wo&4ghjkbsPsS?N}?^Ii|J_W2L#_>A2P8xP1=qRJ>) zMUO>rP&iMDEtP@nH-5ucQMXm>pK&ZZq5^6>aLTlRUHd!DHtYU=y?(d_p`o_j36}tJ zO=k>g70|lsw?Hb?aM-dlTVQ9epnAP;&$l1h{vh`~18cJp3&8xSm{zJJY5A-%8FWx6 zGNCz45!)yGv9YyrOnu^SJ&^rgPtv%R-;rt0p7rg5p_J^45^9Ke;AGU*Ba5gI5lEj< zRQ%<+sNKeak8&HtRMvNPARsWWa&0-}D*g_^3}C89oOCd@x*S%!JERK=j(5EanHcub zStmBJdBw)J1GaZPiahyMKkX~x#LV2J#Ljc1^Cn3Q2iT8)h1|tG0id7ZsHKqzFDUut zpqD}qqDkYYCL@YWa5(1)o@Y)U$anedm%!rih%tk~U#y_&;AVX+d6McpNo>yIqsQC~ zim*D?8ZK=5ZM46c05!$yCXtNO=TkIZ*6%K`=>Pr^7$a5V1q2XmqDmtQIxiSIRrg7a zSt)eea^a4$B<=q(KuPh^IlNA(i7mUKU1YDnh(N|b8nn_1%U6e1x2#{_EU(VQ2Wbb3 zJN>gi_oA>Z1r|)(tE0QDz*Mvq;~W%7stM>*`Gx$MS-g{n1hG0mq3-ehS%3_qGl8EX zr$Pjv8I+JDc`DO5eCqFky_r_H3J&e(Aae42(9gxvYbyk`Y>%e13dE(qA(A{%We(N* zE;mdNY{KJvmifG;-`c>FpST-jO?xiITSXT%Yv~MOlIf6qxo(N5U?WD{Ki$|r_0IY3 z$l0H(`8v6k<9WOMpet=#Mo7B9Vw^mYqkW*yLULr_OtG8ew@rXt5eikx0E0{K=IVSX z3Y3^J_a$H+8V@|iPv_wH(M^YzMfz_@!$7yU50X$k*ASf5Ac9jzXFj%!64UO)bC=I( z{h3rzr0+TsyFOgBqI=OLwcA?gq!?a*E$_~VYBekJxEQR@XZ*W)V zUl9JJ9iZdMxRx12gE`MJkw1*dd5EfDmtlm<4Px$cX_PkkSF5wrk#7v>Qa4sfRNQMR zKacj!Ls}i;gI+0)_4U4aqKEK?WCIEgA6%~90_=EwP1XtG^&6}$aHDe49~9@;N*WN) zD)^U8g3|qzG1Z&digoVtV0A1CjCkgfZhGOZo};?`A(=m*d<*r_txhkl4L(_ubNxmP zFkuwsqJF*F>y-y0Av=d_)>bpW3Gm4_cU?*+42oa>rwmtj^m~OfMYYq)9)t7yZCjzb z_b<@{iE)_aM%AWSY&KpKKJ=R{N|)zF*ttmq^}+G5mr*}aM=o{vb8a! zN3t)DzWf=V0Y=c+$BB-uT!S_Bs#|~(hv!^v4S^)(MCXTpDjqxsS`&YH5)MV$z-0OS z4Rf+IhtU~vxH+a%6=?MgyY+|%F5{*fbZ5p3gzF))OM{*|B)4ewW z8WG>*uH7~kd(!w}6-Xjztp!7I2B)l__D(cWwg>)SXZV;z$H~;gERIfr8xb=_`Rnoo0**k$x z3fm7b)xn~8-t?tlfLvcXw#O}RE`bLqhEKyh4GnJT9L$47eVQsw<{>rTTHS9WZ0FkT z2buwI3GNtH-P?l_KB4(9OE{z~#4WhjJWXY0+ZG#%Q)lo#4zWg858awaxW3+3da8kI zZudi0iMF^H(o!{|>pKv7aBIn$1G&a1FwjRldWYN0)v_B|@^Q6y-mh?85f-#bNBURI zg|zWJEtdOOVC{u_p}aF#y`z6-<&BCEI|95634Oll0&0m9t)g~G6U<}i?`Yf8Xg(+< zGs}Oc`UNN!lRlMa)6Oual09V4NOJBX^!xYQjLop%*QLU>xQv9yLFUTs6wm&Ypj{ zv@oC2^S3D0@UZiH-OLTo7xyuUH+mx$BGa@=+dCL#5L=wRzD^=c>9AX2T4mp=S74s# zt;FcvBqpfaN-QV{Ftop)rrik_+mGHlXNW-AZ2jMjlWpd(DT-8y)P_dVeA^Y!b;~1J_tkkP1wXcCX=c<)5dhtoYt# zFP`b^Ec4uGEQko|LE2-W*~mhxO=`A50pHQ^;A?Unat*zA z@~2iqlp(O4Kk|9b-rv3t@|MDz0;f3NWN%Y7%jxG%XL%fM=gjw~nUpFj->#xvO}J*i zf`fhbfbK!!JdC!gwBX^9Gd&p`p>0`=Znf%~N@ZN6oKDj^041dpyOM=D21U&g>$zoB)RoA>W-Re z_B9>=C)O&HGjg%^U=%~VP-eH&w2bsMI?`qG&!ZQX`drAMA7|>D{fVtKv13@a+}@enNl%2s_ClNTe zVRIHwTFd0})uZH4%qcsvg!b~r(#|<1(^@$Tb^&3HmBM|&^j8z>Ge^;|!+!MhRd9|~ zXsfwS=tiswli;i46Ao@ROgBegg_!N$KtkDvldw1P(LipY)6fXR34s2NV605p;mDI) z(`!9jf5xxmxfADK5pN6R4#sEL6W}D#N!KoHevU2ZpPXqkqp^qZQ{J0N@C{Clr9dy_ z5ENPHehEHy7Ogc~$*#(p8lS?t7e|c1DYCYWuyN1g_?L-jFRa1l(Z1)xz3`a|X0q~A z|5QunNf=jMX4wP0Yu%i(|5Jv`VIpO^ zAcTN}OqZ9ZTi}mp{(GwatD`JZkxOH?jeyD-RyV-IjL_i^Q}#Ep%<#Q7(*H5sf1lx0 zYI4gCMkvA~aIgEz^E!*UzoNxcnb z#>y(^pqT|H=olcXUW%|UuInpS*GgOd_9F*nYBN?)3+1WY^|5Bd>nD5RO|FV`qsl4K@o+9C?tMoY66N{++25CJq6$dl zRKKvG$wc&uD5jIPng`gUqDVYqtP+rtd$*{dG_|;FF0lVS}TOw z38;vwp(jI*IdGNt&a;X{S%7$fGVkF+k_*$H>)9o>TQ`YR_ci8{4ivAq{Am3tEEA$A zW_+`E<2;@dX*{XcFgP-thIDmg2`G_Y5S%>L;S=`@CjStH(xMzKC=gzo9f_%l&H(DG zf1YxNcZ{ePX9KdqpT#F1S0GGJnzF?hIt2`E_!|h90lz}*??NPvW-e$m#jW!OC5?!| zp+F4FRO_qxq!;@fklD^0-@6bV&2|Z^k4Mb~wh+BqM zG>;>>iv|Los9x9x1J*rJKj2tvBM&qI!dLYXWyT*j!e7?0o%Jc(5i07>)~_WF)Oh|p z_I&RI9z^qRC z(i;+L_Wnc$LgvQRP|6iJ)qg{2q)XcM_Q_b|6J5mVQC8r>wZQM5rt}7(JOp38oG1zyo*(1j9DX`l7PZ)@pqIP9_;64dY{YdWpkrPr!Kap7fd)`q}08kr zEbqf$}d|`=T1v70VLI20TfbGl~L&ki)Kt>Pg%P5{B8{0{jTom!0){Q_V zaPzMjOKrmz{+dYc%pYn%J2RU2cUq@bEtXAsA8}_Zn3Ks%P@IB#TgC|y@}6&aTZ9GmGz<-52=@fp%R<+IV~|N?L{(f! zl2BYsj!V7`>Y44^POIADanDsEn+XQk6y&+R4cD$oF%rxDiXjxFn0P6$Q-AR)U0Q^3 z(`N!w-W)!qd%tRG@Py=Nh`FxNcaa#ecz08&Dc<)+K@WiW;*cWgYT}G6WL4b*UO4HT z{RT!8hK|JsWvMF8`$}-n`HuA`cg9^xg3;1$RQ;18yG2`SM++JIyDhN@m~${} zyvoa;Fy_K+fAt;TKTV%wE|=LUPMG3AR~eXC>Ds1*bkMpExcCkq3&X5~I*vzWHcXQ@ zDu%0SatQEfWXGuQVn@ciT1Za5KSy)MHC*Y(7~L$y5g2_elLsg{fAW!78sNkbu%2Ar zUz{23+#KLYooPj3khQuj&Bigbc#Dbh)(-?!_^HI__Sx*<3Gmch-l zqCzBc zlvLSWVA~T)cXL6ldX%1FQ79#R5o?R}SB0^PN+k{JXMPO5jdHTRfOb$QeE8~zG5=O& z&rq5j`SRVhB)~RI(nZW%|2?bnusdj8tlw6gf@g;jBZDSKWG46Wsvfz&v%;PXL2orPm96pNn1U+;rnw2@{UqUtVU%+PQ-{waZ zUX;YoZph}wD~&KUaV@WZJ(ygIuvpvr4}o>Lxp7wh-Im6ms$T91Q31X7ziiyEHno5x zAG#=v`@dzlqAObdrBx5eg)FO4Zq!Dj8AOu9gt_52_^C-WJ@Q0&JG|VD*T{TANDsY2 zCzJ*+vi~#O|DNGCKo079uE0JU!2nfaxp*1km7?2lzxg~)@Ko`5j1@2->+nC|yzZ*r z^u!lCYa--wcHrcR^+3!n4c{!9T{TKFIbdczHo4l0v&R zN8x%WeQMeuDtC2}QajAUsLP&g3h=Ii1CmLl3DM4|0oj8aE(fyQydw;_#v#34NgPFU z?B%`JV1wgeWq-WUk)-$p*?kRv=x4{*iN4kQ$k~sqoLh&Hz$TOTx2>uHNlLFLO9q@! z#6E>JN#w=#h-~SxVP+>P$gIebO*yfMcUJU;$jEP?6Iz}_=fpsV{eq3osMEs^hz&&I zo#(Tpmy0r8cqdhiW{~NsIMnri8|N9uXZnYMG~R`TF%nF-W>+*?LQ3c&jlB5ovG1j4 z%_)nCAB^jP`T7j=3S2{%Y_Ge_?C?2SopE?;Kvtl&XIOemizyS}BG!%xfO2a4YLhF0 zV7Y+- z_wF@xkGgJcrwm|3Hn1j$Ix3W2%6!@A<3?gq=keYObH<8PnhWSiCIX)W;LZw!8{TsF z-grWi+q`hG*Hzvz9~*T0hlgO3uL5i(lJQh-yo|HOIC_49@EVzb|L z{UC8ot8OI3U&M$iByG<@9m86gtx@PSA|hFJ)>uw{6ikkqzcA70m@o>J2~&Q*!$PIZ zsBny)R#B(3NYC~~0}US^`?cTd-Eru`1-&GLWp@&U7_GPXu*p^r_I6F9{(0uHNA38nu#ocJ#n2|01~MsqvCkQ8Sx>2QME z61592Eu1&L@~$()YXR2+V3e}#`WU@Rc~Ks@c@<=}p50Nq@owEmpS37>5TAbp&WmIN z4!R;H8k6w)jr^0Mvz*0 zTvcegis=~xUiMOJ#~HuTX?bAyI#`Wlima6IQ=`RV^=E9`6#0|@%@)f#8|9ir-KJMe z8bL&}W;yg|R}f2hK#A6{6S^@dec0q&$9rc841iW*$jZ2Kg^*khvA z31$45-&HNckV2yZeTe0%5})7^qkzpmQTlOkyHi4Pc1_1l$jxdfw+E*ue);qWe}L*gCj)Zy0?p$ zu%KIGo^2X#hJ@rGu~t;1%IZwDWxwSJP&Zvk$U|Hb(dBIhqoa$Sn2wVX+QVwm@M6uD zxGM55uE0ArkPvq0FkY}hh#4}FN#?)cHCO9n~0Ui7k@NHZL{47-5X_&ftHX4)n zToxabTk}kD3sf95gF*QTUcUWArVkBA)%AslBHi4%?YA`4JjjP{-z6q^o$v1i#Iwv- zw{s7@1nrXwhn)MZo4MPvPo1*0hYGTD*dKNPOUK@amPA1Wm5V>Kn<6a{yZ)@zKbU)p z)s6UFd(+hbV|f@V2a`tr$IN!zSbsdFN{eTxcj=eTlzVr7|BYNApxe*Ybw-Q;dF}jb zy25YkTo(zOY*6P`0D&D)FMGn%xAOcSv!?9k;Rjt(;0zQK0)9y%F7G;2%K$JyU03?Da5 zakF{)$aSj}jRHB|3_j>cMPUe-!QL5E5y}qiwRB-wnb~*Y?xyMFyMTN9wYX>~%Z-3B z23g3qCC>U?R>==2gQ8p1r6OMzeemv;%!iQ+nesM6OR`D%$02u7V|;*4fLRKfw^@=J zHBh2xSn6|^3(7c|g2Ia&6FSL%pMB^P1dZWM?#n+d1m4Vf*`iKW>n@^Uw8QTJJlSX0 z&syanJdw&n{=vvD1Oc^Q)P9kK_T^xJu2+9Xvbk~C81S4c*<6%BOm zfv5kFR zl+PX5ZIIqCuhLF4@ge`W3|FrB&A3iAl-s-(?{jh2V;kRf61i1!%rVe^y41RW!_tT5 zeGwb2%6osmb!xXukVO7}hWp<$oY(vMLOz@9YH8_PwqVVVlRL$25D1s_UhJL)Ke`cW z0M~{7(M%Lv;32>9CULA8%MnQhBubJM!o)hK60+>Cf7yMI+wSrl2>SswA*%$<$=sxL zmQjz#W^2!RE2^guJTQE*Zhzuz?{DMJ~@35(!o-X%N5V!;Jo64+2NW~ zPsl>xawQQ;*tPuYzmAnz!Jh}KQ~P~GHYFy-LENsXrC9PL9tUxolhwX`;+4R>YPr31VzrOVH%Gv|8k47 zj(_;Zk9GQE+yj%obzxiOsBP%A`deU{39$!(H|~Mm2_gp zOj}!38=0tvuK+V}U}yAOT9<%jz3?pM(iu@FEQ8jKyu*}OgrLnj4WKN}rtu3RQSrD* z|Hy694R^|zz+Z4E&Fs+1?z8?_3S!AB%=h&2v-XfEtYJsbmA|WP57TrNgr@PDNF2TR z0j6-5xam>;KN__ld}VAt#U8c9ymxxfO56+XHH+pqP{WGfu(o(8JG$?ETTy0=_W6pQ z%N&0`Wi$%SmU?sx><}Wzpx7bbkwSh+5PU$>6uRRv=sP<3H4zPl+jCiE0Jp`g-CPu()i)O~goWt02CP$@XPt#^mYAp1Ybh;81}jmXc7i z3omp*BN7UKsNa-4+jxet@m03{JBFGGklH{LnI}os*ff0z7THB_pL(IXIJ@IG*zsxZ zZwyV>1Sg0np{_O6QBM?DEnL*;(?sz7T9HqliA51c2&4S;$se3E85nWP?We?}|yEMV4tgu#z^&t5pk;QQL1s~uJMw_Wmr zaRU2=t*`R55D-znvxfS!x20;fkaZ99z{mGeR(YNNJ{a>tll}aF1w@FSQu`*oo>pVM z>nZzl6-X&MOy5@Y93HzRx1blV4j39)-ejDom#gmAdS}L?8zjwFriDi3Id5SD&Yx9o zf!@ODEV8cUT2nu(0(zBQGToy+EjizLuv(QkGqEZR_)KtnHRd|>Uiw@?eAG8Hy2^?pBXMh;;teo@LiM0y-VlZiO@B1@kT6Kwfgj^RyMh7HJDbv zc%-r3AyKDMoJidD+qd5UyX&)LNDPxx%xdDKA5 zU)O7%mVE&TQzr^hPAvwH|5oKNiK5g*FBTm;JTD6m!8q*(El#fqQkHx+X9hqsJRnZvDz%XnA4aIR`-M zmsuVPrWezVitv7jM(?BxtsrQ=$GiLZWos?R*iRr8nL0xsaV}tL#%t|QY^21W?l$c& z`RA>XXKQu`%hc`5Q7>oT{_nCWkmMK9Yl*%C`)_>PAB01Ga%6nuKky+ z(BC^P133H2pQ!vFVrR_A>T}?!sSf}GTwo}{UyNdAAI)D~CKUOYHE6aijT;X6O_@vq zPcuM_+d|M{e}kQN+%RhS49BQE8?EE)rlLSo>jTB*r64dNO~Gt_k35T!E!z;2(T}Y; za7Mw{Yi?W_y52%t@UMr5N9Ju`xB3wNpQn^AI<0RQ@&b)Am9Vo~euVaf5*sjU!#MQn z{4;f)7QZ%h?J&oGCkKWT6??7=?XcTPLFg(zAlv$XHNNOC?UWVqx)TLn)Mb2eB%=Kvwk zeou>_(v`Tam1-R&feNcmzKoaKsGyy-Hvi|!xc`>n@`AC~0^IQ_R0#+UJ_tQuV}EKZH*b{s;)nT1&Kj}j6 z4%c_LJ-t1HRT-0`buGH+XlotV}X5WM+|n$k0N?VXq@1y zX;YqAYJzn>yBrRr+GS4GAouezGp{po!J+ISHxd}?1l(Sm>9dm6zSWBcHFQMr^>;rq zQuE_~;C%*E$soEO@^G8I+;WD1a1)n%w) zDN0|A*%zhE8yoCOA$$LIdAv0=1(s2n0Y|h1@2Xu{gIpoFkYr&!L{1$^?-|;u=Isd> zK#*$6OV8G+0;W??8ddb1Q8f_Fc9$UtuhRO{DjFRG(3y%JrlD3Y7cr~Ut@u$axQDUp zZhAJ(gyZZiEv}1!7RNUcb@mUYVlgUJbv&0wzam?xT3^Tl*`m_<_LublWM~}=`)I02 ziZny>czDPtav+CM^=RPdLDO?trQ2ixsn;6WhCf5ZbZ?=YZ}xJ^-Z1$2-R~xKm_C}K z3>Zg%aj1nPCRt@F_rmx}Fg}R)4Bp=$761D`gotHsWY19XU3}86j%-KUL0P z$W6W z(kBz86T*x$R8p&!OPJdpO~9@RZ&~@ECv-a60{nQ+I-sVQp(by%i(jidNjocnR@~Vz zE*4Ba8WrJ_22d`x=bWUGe{AwkA5*mzISOle=xEv=r~>rXcGlG`!Ar?6H2Y?gw!IXL zh@!EF?{6G7V0qV29(B;YR%$#LS!s&2tmq>#)XbA;Na?2!a?{v~k zAWtlN9$VCiVH7}^h@&|TeOiS4d!aOA^P;ljn z(nzF(1GglEcVegn-ZnD=)pnCyw!Wr^l+@wdZ)eeo#MLlU2@}2kRmZjhH$k$L1*Nl7 zb9_ZP&Dhi8JxC2OpE?Wl!XgI_klfxtV)+j}Y{S{2pt<|&xYV&C$s}v!UZsXzvB)oT z)l9kndQ?V62nuISl~opWo1SAfV?RauG0x%{?Ym)f?RwaQRg?=Vn~gYDf#<&m@eYN_ z$t_=6JAH*tv9vp4nz(d8eQ+YeQwEX0^Jl4r2J#HpKJ-!COGnw=yU2=St{+!1vMP`f!*(tFD5m8gcG|l} z$Ysx1)7=GTi%m{Fuxuj6_vxBr!PW?PvjZ8(XD}7{X$ySrXlG!Xj2)LT>(bta@G`Tk zOTw)MMUaH)EmM=hIL8^A;1xhZMAUuU9qO({e$Wb3YZieyHaGsv#)um7?e4;B^A@BS zRfyQU(ALQ$Vr9}P(7aNOXfFK{K%v-<8XT8rodNg%zW3q8z1--bi*G^M-V8b@8`0(^43X%*hkPKE8{=W~E*cCP^iNK2c7x5IcF_Kk; zXcn35I_}fX)4}#9<42g=oPM*v`~a7^y#jKB0y=K&?~>c&um>|&Lfhw_U`ww)`R9{2 z3jik`TtOsl5a)jH*ZR7J?bd2G5^ngBScUaU7ZYNt3=oNtW!)jM?k;M4J#flKjqorl za44m*-QL@@M8jtY1a3FAXGC*{kVez!wFRT9Dq(>G#(8z+aoSF~gT|IP0O~j57~<|< zI@fL2j&b3wN~N8=H$ernmA-Bj>LAHrpvz`{epi?=Nri@oZg_RR|K2L=^iCg}k9I}& zqE`SLq|mk8Ua1T9@|;Zcz|dJlLEE^}of^RvEk#n3B7|Fj;qK>@^jcG8$Z>{Ruc_2` z+yZ%pMLnn3?E4p-d$AmV?OyR>`sCzg-W}qR& zn7{bxSepjy92aiL>3(62&6jzuiKPK}bd-?$Y}0=9 zE2KC`L({%&8?)Dhtjo-B$82DyLtd6+EXeJv6L(A^(}6W&)oGc-vSEGR6I-oep#zdG z!va&C@?o-{f<#&6!?azh;Fe}zH!txNT)Yvxp@D?%&E{34Z$VUSVoH!GdkcO7`PflZ zs1vT*8k)?OI=JN`nQ#84CyKP=)v`!M%9;gReXsFyVIXu^oK9Y-_}?;IxIIoF)aUY- z$fl3Kg2>xv$vAY6R3rANmfraq_k-Rqh6MBsBCY0`tNDaKheaPKDgV!K|9gge@9xCM z2zAlZL=&N9-B>hJ>EbN&I?F`rSuS{!W&SUlUwe@nn_WRlq$ezR7pCf(n1)5tu_GeK zK}7uCqId>8!Tx%uNDq!;6AL~c(x6oO@@F=aI>)2*t9_=`}5+~D69~ENwA0V-eo6Q+2PdENW2by4K@3rEcO6t zfG%;WLzG>mjeI(lPDjCH-#hd?yI2)Fp?9Z(pByMv&~YMIwGGK(+)gAqoX|{8er>Qh z3|Fzzas0!o@CA_B_ba&4vvtD-lb4+=B>&RFw6ZmqNtBXi?^UE3RsbXu>ju12PiZqD zAryEPy4$I(sXwL`s@klFE<%U+7{N%^%pT_FOU@7F#gLn~LQ0p=a<2Q2cg&*lJYlLI zcmSn31=V3E@ZM$*vB4(mpUk6VqT9VkWh}=-o0y?#ai zMH*fUzARYIm-rU0BVv>bkagG5hbrY>3Pm&X<2gRLXL5zxW$99?jh20x))adHFuG|Q z>|?Am1e^*JHBg}mKlmg1Fq*q*C9shGG}48EdxX%$gcy_H4(+baEro|EGFznGB}#`H zX={8W>tG6CXYp@k{9O+}UTc1{SIThgu?x?GJZw`aaTj(9f8i47B6B4MD@*JRCHQHt z{F#uy=4Y`bUSFSML0kP}hG7hzW$^ZYWfUOUd7f#&33(yb&6OU!3d`J^B??L_zl=SM&yqp3wQ5#5nNBv2E%xYg*$f?UvTT4kBc?!lt)Ua7de;CILX_-5Oyu`Gl8bYqq^3|Uqx`^OKJ7qRPV1X6#2Li+!tv?@s_`CW$el0z)xAyX>G^}E>M^1Tf zWdoK2#Icr|)@#m_AVWjt+r-VTmQfJmXiWoToX8j`&q1{Vl&l?Tfe{7m@Yt=XdTW%8 z6HQ$`Q=Y$=ubMOU1X!+^x6wCShgSvL3Y+y9H=*)7x8MHk(=h%hJhgaE2C`6B$uc+J zakxEHz5_(1ewXDGN5R34zz zqW$$@E;A(7BZYAFE@>(1*;{GL2!0E!V_PH!-Y%a~8BDhRe8M0l%7|LT2gsoTWtN1+ z9Ek)=?p!b6vs1{f*=+FRK*!N_F%${#N_eULUa;2Tzzt>#%+4zpMMUUlS!?tLo{a|xC(S;y%bExO-h-hNgs~N+%G|<%-O|maWdO4 zM*}SOjnJOO2^4j-B~6DD78aFq$4GFUAyoJsa_^mwFUC_(;tIxYMlUkND%^DKO|bQh z(hwj$wAK9u?>sW6naU39zxrlfkEeBB(j+`5T&Ty^WrbF_ly5KX)PQ#?94(faA3~T+Z`7kt%b(Kmj22$yAJfE7UKNQkXkdddI@8j- zmMeDVTZ69gXH|`#BfNn#DMony6D|kyW8mMD`DZO%;bv$L+DDGGdA?@Z1PfUCns6ot<1lslsjm6|Du{Zde0tQA`s!Dw7}sXK2{ee@Bxq* zW4j5~t^G~lRm5V*!X|Ev1I}*+a*&&i`oqboO#XN9$V_%Y1oDDvQ03dDJRbZjV5iB_ zITnGb4=$jh#riNfzmPqWbwjYl2A4Lw(E2C3u9&$1{DY*V)3bOKWnNBZ;D7;kD)ebbKYGEZeNx!gU&4&wO2Q4te4l7>JTZ6^ z#w5w^IlazD%)cLTeF%45Q#lea$n8&WUg}-D0>FB&MMNi4N`KnX+TTtday?8mwFy3H zO4(5tMWZnfR}YBlJ7p`?N)DHvCvTVJ6&M8@EP`C}0$pw-yl@$r0k5T}#1@mH%`)!v zxtw#>JVNx{X<)A0eLkVIFx{Q=$!6NPEZf;)vY^#HY_e2+HROGg40z}>IT{HPpA3(f zxO_-o;D5xDDaFsfo5lF1z|Yx)0&<&fwn%o0h8lIXleE44z%2dPVgM?6p zss7J!|9gg0OM9))&97gTM!cI;{($NEu=0z#)~rdntRh8@-qUZxgNG&RPTg}>9VjC+HWwBHGc`RmZpbHLJ zQ2~nt_IsC2j4_|~cA9R-iOwUQZs&$otO+)9s>^$nDR9({rc7C#RC7u?VmK3*?m{3v zx9#GbvEmR%T(wuK0Qlu=M2u^O_l+wKFhHC^IOH^HW9tfni8qY5oAPvdeS zs`};uoWImyHu^_g=R?tX_fe8kNwoM(j>XwDCwxSX@Jc2JO@FENP}O(wTz;hltU&qp zv_h1^s~T?v-sO%JSATs0xKMpl8YmW0hMrnGWo$XYG_yxkc8;HIy%j2VL%n#wi9xyq zJ?Bb9F7LH&NfsQLcIBzui`N}Vh=x+LeLpJ5tFdpJ;<#2afK1HIwzP$rNy;-AayJoG za{i;gBHaoMmbbYY0b_XD8I|2$7Bf*xZRFQ&c()z}dS$W=4o}cQpqZ$R;o#tNpZ)!I z=2P=liH-ey?-9nFlp6EQvk(xi-*FjR4SVl$!g$kcmBy}nt!YzF7!~}e%$fypF9sMy z<2WRZUn`lvL79B2Z$9k~`z6yo6a?krBK8Gmk_Xs^LO=dhQ)r?;+p2wEj0Z zvjBc|gcZC3G&;(=8JP0#1XjgEMikwN&JV(pqobE6vcZwsKwT4`jIaj!kg?nzeihHU zDRiI5lD3IdyCNco5ipb!9yVM~dm3j;H94*dnI}xicL^-ZTc9X3Pc5pl0m)IrxC-2O zCUQ^OrGzUP@cu2WOm|Si!y-k( z+d`YVQ^86MnDll0p3gT0uFXZgROM87?$|m;G8c_YerrW)s)|04x184sAe3(YxWw(cYT$MPer(6U=qHU zxNX9Ux`hJ0rsW}zcOI@txf3lWVIt^zj+;M?xrOax_PDU-P*s4*8e822fFf=th2#P)N4`$|iZABcoj3|O z)guR5ol=BYc5$=%;B5Phl>O1Cn2656lP@3Y^wL4HQmriwyD?8Hp0M@~6rXBT3z9nJ zQFm8*KRH-anx#63r6Dh4zG6lmiSBuUwfzg+#uAAUQRw8C@~2TjW!5ryFaID#($1jn ztFeD>lhQJgo20N6A#5*GJEx6q_Tf^$&gJR$B*M#-wPU*+ItTT-Lx8|AH`isZ2)B!xTlI~!1#p)^46!wpluC!+X! z7#Mfc74p^9wPkVBTh2wzEeE7t@-F)^;dkT|wX`kl)^RwKHmm3RKe8ppd8P(!Sa#)2TI7bcPNjtWn>N zy?tjE#n_Ae4gwZK%Mvv=dy>jYTMU*kEZxLwdL}D!zISKe%cpbpf`q2uGd-^`TNBMN zYuNR3c225OrfS?_zEUm-7v9+gJl+E&hW zkj|7^d&gW@SVASi6_9?ag825;o)!Cx|G}G5T@3{+m@VdS&{tdRNYiOjlABL|e4&D4 zH0yuoHpH&Ih472>zh$_5_9~G$6%4ZqJH+qgoqtL8D!8~4&JpSut^NUmp)oWc!XC_H z7rB8BBQB956<;6q{~7Lo&u~#amL%rdqkm0_@oEMvW1)A*Mo;FbVG>Bazlt@SI)don zl@BA{JZ6s2GRT?=qoYpJYsjrD$r+a$4WXYP&%xA5&JcV`qzBJ>0rTK>9`1))GW;M? z&SvX1=I?}ip`hhr4)2crG#NuS9F|=9hhIe0EVL$I6iZzj{<81E3^2drv;18pbA))Y z(`D=Gxt8FN`*o=mr?E!FvGf*k0ATh7F-|C^XqjNOiG?G4$8z(@YG(g8R`%^@zDecbc6Tq!M~T21t<;Kx3M~WE&sNO5fXhFiu_ey%7rm zuOVsy-9DJ{P%&2&e9-@dn^N(@=^$IHGo=)1&e7(81o2aWk6^P$4i{npywXRk$uq^~ z+M)EO#wyjGBRGB9mzS0R)+UQ}u>W$Y_S#p~YBn@~pb=nEPG3fE%vyT(%T&1fJ6?X> zP(`f+k@W|v-=6IZ=AZcrVtKQPPMLQY1FkPLj6$&T>xwD?i)5yCC)$^H+3nTO&}7VO ziS5Uc)SOAmrFcH))SoJV`#>Qvf=;<(3+-TM-j}lSP#<3SvkE27g z9}?vZ)p?G*(XGKaB={uk>}uA(in!XOSr-p}SVeJgcBaXdYC`qO(jo?mBH5nr62n|i z$9{Y7jwA&H1~v{;If5qbHZK2?>FX|Pw|paNu{?1Nx0HFV`uPZUiNhj~{T_|gzQwpr z-lq+~sIn|fb`kc$&$-N2b`1lSd>&+d@xls9XzXa?E~D)m{N$T4ln;Y7j#&)Qq-3D3 z8wocbGtj|&QHLNJ-yC&j^s$yqI!>~s@fJg8QV3+`Ym>1>Z~c^#O0_PYh;$I7lB4Y& z&sVwUg6GjV_W_^ewb7;~&RXbgZimXR>|_2+cxU8!sRX&rWq#IW9Ro*tw^&S^G-CuQ z(0^6bezR?HlW0F&aFyBkb?{JPkCl+m(xlhfB&d^(?|%L8JT3Bkl#hXP_jGQBC{ow+n=LwS#gx50*iF!H-)n~vpj3%Q8QHX7c;@_YB zSpgKKi%fzNC`wEneP0^L1)~Jy7LkXIed1K^ZLmP>v&QnEMrcOgZ8;E&u~PIpE6W%I z!{D^Yk%bn&0Pedclt=NsA#|u?McpM-#~ac9G;kV@+~f^MlvuS#gMJ0JlEcsD$=ge$ z$R1WKn7fA6c;|Z|m!bKg*>UeH!PsKEKklqBncm$?fKd$oph_(8LJ5-x|kdVF>VMg)II6Lf6)|eN($l8 z&w($zl^>nWImMnCpP)+z?HR?B3NT36`78b$E~OXxi_hl+PJr35x2h&RkH-`_TLaf9d16p3w=fPu-f>+0Z-E zpj*#b{ZdlalBufzIB5WhG-t=O?=RluqV5U#UbbFeJ?6#8Of=vdcV#HXYRW*p;D-*d=Q&hg{Pk< zLl;l0nwi{ED8e8m^C(CaUbBw`jHoUW5HZ$MAGKNJ&TF3Y(TwWG|_(ZP!%Cy!~x(8 zDe_0@VCzq;r!Uv3kaYgG3cm=a^Dv5>IS-koRRG!2uXA3{f9LrPNY)3#LZ%?lY)8`x#%ChX zgB?1H5m|fDNWqmDCxaBxUTmW&ZOy8WJ}-h*(BD351K+N(P0jBZsLW@vBLwgE+A{S~ zuiuPODg%Baq0%G&^FzuHZYBL_qqz%La^L3s001S>Hi_5`@ zo540wAG+;zzY-ZvUqQ(YCw&V$sjZq>DU)1(Bc?mWLq_O3|QR8c0n(6}`Auldz~ z4wVGQgYDE$;sjz}Dq6|Q=Z6Xzi=X)BgAzdDojQd6zTnY9zas#8CpFCK3`X0 zqjS=r$gAYW1Isbz>G7n(Q1|KJFNCL6c|E=VTZT(c=ndJn64qBnHWXw3Q&y%%y3~2- z{B9O=*ld4#;_G3DJ+!vC7BjN1F{X@;A&C*q{~7Lo&v3dk^+%<}j+Q%-sj6eP99%W8 zLgohCUq9&jG8w_t{sCkUeIeRM9)wz)TFr>XB!xZeF+1t$c6IWU{6c(77eMFJkbGFr z5&aLl9Vs;bdV36i1;~#og{D3czYeP!-T{v0)#WRcThDHXMbB#p3n$9 z^E&M}WIzXL{Ma0YpwJyQhI%vh^krc{dpn80oH()(MS2#i2|S$WU*L?OghVJeI~t1@ zt47Vni45NbX==fAVbD;o0tU~`S>;=TH{QsA3x%!`5z$B#k|GbDKC0wz;NxWtPzkcK z`|#}dCQbrxU$%iSunPZ0f;|01XK1vemi(g!@GA@>taTg1je4b4KSDfVwNto%D#$1p zzx$)`B;H^JPBRIUR20yvWR(lO7M3I(m;WUYF1pg#Ax|z$YdhRQhWko9TBDwt_Q$&9 z5z|@rjz#5&0g+c$1>LNrPFGHlY0M3E#igf$6nbbpbQ@-gzFm>c9n37I&RtGdu^?M~i+BI1tur#)SN#pyNas z>(i=DXP1$EtVON=VX-y(>iP9uGFW!<2vs&hFmzOs{mv(`K2c>^vJ()B0WeUC*{mit z!HSL0GxTH(6<%ME0Y!%DY?WDYUVKW;eWKq0W@+@G z*eYx6C*il~{pQ46(o*^|-v+qpR8I8v&yiu(gMbE|7gykAmlqi_{BMT{CEuyERPmjg~&K&A;!JGbj4P7_hyUrP-Z|X=a@I)^qQmTu3MbCnWLu zO9=H*_RaS>4%{{{1QMBU9lbPW+#;EK9RR^Q$stt(N$8f$zgSs@-!+e@=0; z^^KiBWKXIN^m`hE%t`0u0#Y_{pm3j1pE1NIlSK-~oskk`+PxmFfmj)xg4k_X{cN0}JLBZeC$Xg?I(# zVxxXUs?FSUW4t_h$N=W$5x_zABnXjVp;t@(Bv9$F8gp1e+=e+xJQkAmpcDAR8gz62 zgplKgZ?8&;#-YhhM-|5!A%v7$y zg<5I!anjw5rwsE?R8@%>GO~h%lMrxaEZI|I$f2EJ!_U>&*_3+0K!{PDuEkdHgck?J zEtSYi-ZaJwb}<8hHT+mYhOZbLkBDU%^3&+Q+$ZH2_46az74qc;42?rwt61u^7=$en{>Q%v)7_X|CkIt=~bp z@nnGyf)EFU;~jo`$Pcw3Pt~qlHbn$BQrwbI0$r26b{DE`f)WUX+t z>sgylV2dh_Wgsvr!gkcV7cN0apmePNnS!qsQW(WEUhuL6e>s9n^&jWqq6p<^R#z5H z;RX%6$?LRI()}Un+~j8#mG{_|dVwio{CQTNcqDb=ch-r_Be7gT>i;p^f1lwLE9ICh zm{253Q5s;C#EhBT)K#=p^i=VUpN85#Y@q{Hjc`@ipJ?ySf@9X?0R{HNC!cCM&&Za! zPxf$}yStz%#`wUt@J>yq1l8}((luPW5Xam$=CzjNeq*XnB@^HYVOG~LGknCuvlk65 z`0sr69iq*b{`)^;21xYPiGwTV0!6Zc78lhy`Iey6g`AbejJ+2S*&2peQB+=# zTijyP_|;~hPm3Y^I;S(Cu7n?SAyy5{^&C(M+Xf+9T5Ot}9SYmiwRTVZ_;4G3G9&4T zXzBw!NeZ*@VO6VHRTr)*JruYA4GsNmLaoK;%UqyD8w@@nvBsKwH;s7S2om$}un7v7 z-Zzqq6@d7q_H{$)4+@nWijP%z)@*yNhpXi?(-sL9y#@$AHQ!NS z2R(2YbJKc<+hLz8v6lv3CQSIEDn$LyuGMVz9M}T$ok#Ga%Su?OS5^|Sg z!Jm8*@zv01d>rd}vch!m286mG!#qr@NMMwa39ZLzh-WVgxMM6tT;|Oc=xo;V15w>b zSgA!*CcO97>MHm5)Lw}T>P8q;j>`a-PrA_z^uByTX?u6*is8SF5=p$a+iJIp15Yze z^4MHx7f?V21sRhxE5B$=f}$pREz()lGO0#4cxAu&+ZT! ziD1kyYH-7{;PR79Q~Ig2T|Edve9T!A5gEa-m=K5LPwE}$;0Q;vFX|%Z-yA&oVf+ul zCsl#DTeBVVkNY@pDCK5uBWrB9+8lCs;0;eP>C+eRJxd_K`IPs~CAtr`;|t+8A+i*n zk!dGXQ_Y}qN=Qp!R~+?7gwzo8o;lv3Iqq!5<`41thE*x7_J%a;Pn0n> z*c;Z==Nvk}8%u2#LC6C`4Z&565sx}Bkm!W&D4JV#eI}oa8-3o%%#WSUYCPr+<15KN z)1Csnzn#X$_%bmez27b6+KXFZ@b|AZ)x7*Epx1i%(vtv2GU4Sn3&SvuhceYV_{bi! zjk$Tr4sfg#U?C6+D&s-?VtbFm2YL=vqY%UV>qD(avHtjz%*!eIK&S95eCgz;==7`BVqNgG zC4q5mnI8yt4mhJrF1mTD*vyaH{wDs>>fRHCT=m@cwf(8-2Mo}bIP;vyVv^zR!KT}# zfcQI}lDd*}Z7lcVbtSfX2LlZ7BR*L?s7|lllsSL&=cBF^JhXwRMojK|a_f{?zXgiu z(`Yg3Iz>&&ab8*jXMEw@?++Y)Rd5@)qhB4!tb&Y^7=fn0xhAS4NTEyjOby;!5seg6Y7>3>h; zm$sWu8-u{uQTTxsx~)~+u-PmO9r(eY`!2;5dc`%t!Isu2kd|Zh;_?6T+kjc4$&q%T z0uqTQbO$B6+ilXb=-bTk_krKo=8pJ{I`B7s-t7l!gOJ<2f=(iS+EA%&rOF^h8nMX^ z^ETtl_-{Xd->G3&1FYz|9t(7Pzy4bd)(EVki^O{*G-GdX*x;&9Fqr)IBO5;WTfl|V7sAVIcZjAMauRI9jHJx za(K@tYPF)|6@yX?EebaPm?mG~(YThh&vr;O>D_Q#l!vslR{o&dPtRErl zqvX4a4D%uP3gtN9-OHa)FTeeD)V^=zvr8lfEQ57A+3#2;hvAc@DaRPdq|>m5B?IMi zM%>-(M*-rpHcXZi1&`LObIV2s{_rqXX(b1Hg^$qFjQOUrdok|z=4IG*tx^ZLqo!SZ~d zSlm6YrLg{>L&Sp7uTzhwGvJa(f?B&0Pjp%U5&Ab^te8sr{)>9sd^$qe=Pf0-)9QFA zsg9<<7aC?H-1-=N{Z8fa?dVisuW2u(D#&){$-KL%_WQ4silt#>shUD?Tvo9dWdEdN zVa_&XO`{DH$-l@o%CHsQGyO-S|D6eVLF?>Of+XG3i*~jB&L??bZf<=kv&PGQ!GR_E z2jAyE-NQv%1lADIs!j?O#edR;g!*acFe!)VzT$%Pjpc5XwreTXH{pH%n4IH-McB`K zx;GD+|1sQupW#YY^u848##hY#>ghz0A_x`xlCH}uT4v?8n0_lZ{|7|SB5uZ8k-1irv;|GhUs~LkiSILf*rUpp#6o>kGq-gy?X7i53zso^&nx=oe-%ZuX+8FG$OkUymg>Q@S+k7eq7hyX0gyK&%dq`dHxBk* z(julS@gUEw>(<`{|JY&$F0pySQL;w;^Y|lCRG@;sWbL`E7znxd=@P{?OyE!rjop6$ zGAE709H|h#gzi)u0tA&=Kr}pwAKAhtwDyUu@ZIy7YE#NXg?tKwsGBE=*NcS)owEWg^ zegpcpuup9u=roYmCC>{P%i%>`Dau_l<>=hx42fdQ+90`HLgxfD;4y0p!E!K*6Qal% zr-PXQj(zMJ5xd)(ujK#UY*XND+&}#j{QURvE8%O!Hz^5Jk4b7_r;1$Mp@hOJO*c^f zmxQ(${nvzAU*E>~Qd-6hS(}W6^CBOEq+IEq^ zO3xJsg6&Kb{}`CdGvAxg9=}tebDC^qxftj#o{ISo(ISa#r?{Sz3+gF z&(;r==`H)?CZSZ$ZXe6TIJhl2zy-3rlb0|}i}+g*o_BWbSm;rI-wmlHe`SlHs5=qj zFa&Px9`DIoy+niY#A$o)w9@_JiHAC|yPX}Y)5~)%RRM|a+mqQke#NhY(_=6y_%0ZM za2iA9uQx$bi0v>wA@FI9&^c*wJ~N67d1&8YL$8~PaVV>a#%GmVDA8yL0W;V80>}+^ zDYh=;ZbW5@8j2D|bB!$at02lR@p%ym7_*sGA4+Y8086R)`9ZCQI8>WJ!*09c{<#)d z{Z|ZdX0}1XQS|bz_*$M4&-mE+hr8Xx54J5kiIKS4^rbZ*si@DNzkZE1`JT{$I&}$> zxyHB;b1*QTI_@GSma|nqlaFkeMcJzay6lq3I*k6?f{o19LaEn>{zes40mT34r5h60j|_0M;|y zb7J!cIMI8Qicoc1E4s_K>Bu+qYI0_NL2d;t5Us@2QHsOR=|Y)N7cJC$tWzAyRVTme z;*4f-U1`q+j#@i@ikMH5m<$;d{#6g$(mx@}sNR`bP5bHZo3$If2o`s3 z-YTw*fs7$vUD+Av36guC;Hx@Q2XcQH0Gxnxa>qqUO!KuL-Nu0eUZYu{R=juwmP|?8X=m8E*qj!Ph@?G6(nh?9e-d) zuW(|fI0I*F*anoJqWP(tJT*r)X&%D7N4=F~4FsMCbBlP&Ss;sK->>YgvtyKe0#dHM z3w8!6&vYxn?mqgvf*4#r@UCB_h8{UTILw*C@Ve0!b&Nx}YuK# z|It`D`O@~Lq<>R^mYs&iE&o@6RtMk}@|Wfbk=wKm9kvHvrW)`^-Tu=M)UdzVYQ*$c zN5ME|1JC0dIq*bNVIRhHF@djP?9YQ9aSB9kLTW3A)Sh69 z>z*slH&|%=2c(ieUH>V=1xAc~)@=x&cFt`nOE=GXZ>I_Kf=*$xitRmIb2dI%+%Tpp-d2P?3R2yGzyEeyd&Ed?7w3F>|dkK z_N{c~3ZbH#upI~JV+m^FPkDP7WZyjb@mrTK_8byI`$R%8ew22u-^>JY!ouYJ{c0=p zWP0=%K?X)j9}Ilv2Br(DwKaR>C(l5S2{gWJtX2{kS0Y!&R|B}c4~w6pQol~PV1!3% zovVUni`>Qu2Yw}ZYOAYCXXlURA00+!(;C-4?g2$nnf&?{Jh>f-0&>5aM+MQ6TI8~MH}!reTCV> zJ2<09mWdT|!IMgc5_!JN63czBD0(dK^a`T!1A8DA*MQMPsS^eVRE3spZ?7 zY_bq-dP99w_>Nil``?sz_(gN#w`Fn0h03N!K%{A`K+FN*CMm>qa-#i1%Keh=&H0~o zS)eY_rm#5b;$KDne2!#zdZ-A|sofHGP`DV8MnwgbzE#LZ9OpJ+ z4CDhyLrft<&rLn6$ki%Ph8x3bi+=&7So+PilV0t!Eb_lhQtZb-dAG7; z0hwuS@uv1->nBg2n6a2{C6fh|3-K8|*{+$jHE_=a;ifS(#anqc8 zs%(1g05bDMJ=``69)mbN)(edfo&Yx)jW1)18^>Xydt|8uh}t%~$80Pw=Rfh>vv{EJ zx|}B=hjvJ|74Dc|dHmc6rkO=p2EE)%BnuF#gx#JQ@6DO-j3Yumy;o4<=`+a%*oC?1 zR@-9NWO+?5EuY?t8=EeN#53#th)(FZFu?!k0SUve2-c~EKrV#pWi!#mv~-MXSsngp zxxTbW!dj^Svd8Jq5khx-szw*nF@?(uGmkJP9R{eInNd+DwH?*LbUssWnYL{!y_BTB zB?O6DaMRgRV3{UO2GJnl`NbPphP3!l?`NkuOUYTkpPkh?kGpACN{IHaL66+??4K)d zn8OWWBf2qoZoobFx+k;qIoy0zLGQ4PbAa54OKBW@lL!|#&AcRI{4QwG*-~dYsgv@qxeoCmwMl!)Dp8|*O*0~SGVFO$Cw2KG=j+?Qo0bfFaV5B0m#se>g?)9? zSMwFvevYgwbz{%V$PAC*3teATuMR~}+EY4iCvRhpv$I*l zB{8xtalI0Ry-Yxc$i2KvVw2Np6*fBfD&%Kv#&1^Jb3t%}V&n~Iro-UZKHm-N#LAeO zYs)l)Qd^=kmF-2`rT`nl#tsm5N@~2moH29|GEIPK(3{Z5p(DBbp_{pMW)Y8l`x^Xo zF>~&jWg}7~IowSnh-9B$^y>cfhu_x*?uVo)j~+jf;WOdx#)wReM#V%t`BET?|%@&O+UoWQv@3 z+;yGuuh;|E{G8)V+ieEE3Vj?64F-`VN>p~d$kYO&9HnRjY)mg(AFHsaJ_?E2FyfoZThr!kIIg*hA!CWo`hea zi2b}kCv=eIq>xxDFc@^fjp9tf7&~NQS^aDr@UAl`t2#9giD7M`_vk1QNHmR<@}Tz# z#8v&ifh7xDE(-S!iZ9((QQsw^+JF%SEJ*bbvz!~$D&{mnZ)NE|7cw8w zXe+J&8c(@J=H#Da7Iy>N!{!jydv*Hh9hX|fpUohU??WI1EV*ngEck&o{sv zmP60{g)^xWQ*iLHy_(iSDJS!C5;p@s$MHX9xG;jRN-chb&>CtIP`r!~6=gEEL!+VX zM*5u}Vb>E--0n_(dQ09438EQfKnB5KOuXE?34vAYBQY2uQr=jEJa1?v^TD8mK}-?wBo(MDK&&r z6n)s%{=6)d@QFfN=zX#om}^o$pf#s{$FvZv=RcH#!pw*J z3f(8Zi+A?@@5f+&EEiQ$;Ne`>UMA5uE+~v@_%JBz_}%z`Ui#-tM9Qw7*=RC87*FmQ zXVzfxqcm7&)kK2H?a9uoXwPr3?GG@9t=D`86mkA_NG7JS#GGY%t-9Ut!X<+-slg;+Q z5<{tXk!N8i@LWV@h3@0!sAPH&2{<_TSmW|MTl=IG*?13;>(pU_x`NPDVKtKoPm7t+ z1}X)&jj+~{u8$8?8ZRhd_g8V*vv9_KqCjM^@Yns@qafCL2k`Inz=>)Tsz0p&6#`$r z2z~g2GxamZO$K>AF!D}OjqL){ChrBCeaqY41ePmSzuHZ=2&-)oq1zJ|#Iop7Blm=J zma4!(2usQA$fnGHA~3qNviTx%_G2gMpHh59y#Lu`&z4j*%8lT?o>rM4VAeDZTJ?kHsB{Vg3=WmOoPT(?uXuUl|UvgtQ7 z0!hU{YUD9cp6c4{anO=6#i=onJsTHgci_Ji*=rvP^=*`R{o4diid#xGhb$EjjmWOd znTb<1M3u9U_0N+x&2h}(_Ebkt`s*-D6?`BC~pE5cHd0uk%ED*HHDI;}V>YzHUmb#yh7*l0rWH@C) zHcE@{>4=}%V*Zm!1Nz|6>RG{%!9`fe2VIOe*OY~A;PKXKMLS{ey8L0AkU z2>PKsA$~OQIh<=KldRxU=GJGg@{UunL}RzfJv0Hd{W2)umhVVuE^7loC-HNV1d zeSUU?*m26cf8>X|$= zsSH^vHXh?O6c|U02|}wdxTCE#AC^e#z+jlgQQ6GD+~rWPlf(hnU7$bp)@69PoKrN{ z(AeHC7)HdFCaYU|7~#`y_fq&Ta6BFcjeU#q@A)lW7ez{TTk_6VQ=z7 zKi_XTXKX8>46=T%Li*Q1JSB$F@p~e;VXeuIN?h}o3Bv{7rqaDj{=}S0&5zS^z_~~{ z`>7*I;Pans`GhYk#a}iShlTnJd>uxInT?9S01p$&#+?JVj}SqIW9*cAm8fNbE|<(+ zc!baI?24m@K?(wLY!xCeY);X~tP5SK0p_QRjnsE|#j;%#-_}WsKya1V_i48hBk8Vv z27iI8<+7TPzjcJ4w+zVloE$~HL3iNq1)@M@v8r8Kdm761=-ck_`I`r{BuAQ6L^MS^ zaL>+O)Cz>mPy?YK$Bjk(>4n9tThK%a;U9xTxpz1KEM6lTGxWFPBLm zBg=^;Wwy96qkR$Zoamwo>$N9Uqs-{kJ-0g{^!{q0-LvhRdB;*Z*=72#dnk(NT)CMV zmYl0`X-w}S*z{QmVtUTKGQoyyNRS<*{HDeo?Ij>C+Gl>%GTAQFcCsq=f#!{hm(c2YRC&K}B$o%SHYy2hr3!zGuB4TFqsCEg~V<+v;oo&HyYTI9`<5DApM^7~dUNzr7%RnNAt6u+JvDsaZ}x_XHkv zX|m`fJ!eA>qR&IxwpTWt6yod`r;U-^op;}gut0NKkuvfEKeCg4M!N{oIsHE`W1sK| zBKc}&w`YE{1kjrQt1roQ1?81TpAm^IYz;BXFR%u@;`s^#5bH|31S7tnlP% z(0}rL;S0H3ZQ0PrdJhe)@a~tK^bXQ!ygxEvLDYO+wNp!W^jCSrxxDy{OVKDo%W+YS+;u;QnS;K`ZR0>s?+Vo0U%htV1L4+q2?MBn5fj`0wYA848Bku_J@g9|no< zk?xGZ5J?5Bs_r(VmL5dZ;h#`^ViaCXp)82Az1t7NIQ!DT$RmyP->G9!Zp8E>oIOEf zSg~m7kjUWl_eGz!=*D?4fuZ5H{;=ChyRkDc8ZrL7J_^6|9EgAX=JeZ;fYK1W!7Wmt z&6r*EVDQm&3=B{U=egX~X*Te((S-dv(rEyb%%uWlJc0|c6o`m7Xc6A>^01n=^09t8 zUeM=u2*$vtQ0H5BzbCo$-c(q&kI+!=A#&hRYBpQTmiCQTXd~EotS3n~5g5D(XpuUd zI~Z3-h)|Ts-MIZ_dD%=ZjVRCP+@ZdAsj6O)Q(fIpxw6ISHW&&*{ zhk>Uj33ovw6ZRgFa&7+){ORxsK$kqry-9X10r1NWt~~fZe+f_t)2q4Lo#A;n;}E^2 zLG;ZVew5n42g=1$!uGxuOp2VoCs&+ZV((y8F1UtpR1&z(wZ}XY@fI6xlHtNNpu^lkj;smXg!9hov2T`TaEVt^J}Ph-X}@e6J4| z_w7B{f-*`7c1iWw=ITPXV1ZN=5k}nfl*dj~bzFh0%3T7bsJH=h9ieb{7AEf~Lx;t3yMQi8`%RC~xpu)vkJT`w zuO($FuNV0bqqMIj`-&?2zxgyHDjGN2Q8NtO(T$lUhAP)Hnk4NygHuJ*{B=Od4QN)$ zcvA4wqh;~AEt-1%Ofa9bF=no8vghZ1vQhdspSdarT~;x)R%+PIf4Sq{=kNbG_j2Rd zu=i$cQFzJ01oALF_;pD~e|V4>qxClI#KtDZw)kdpY=}67@ceJkfyyZ&Ge=D3RY{JO zMLH_nueHZ}k8d$(0w%ZMF9!$u^~&t-wHMA<)OCLx z@o5zpCBBU9SG3XpEpVk4gaknzqV*0*Tt};4Y1Y)Z8`!mR8gaOSSxhf>E>o<9&j85~ z%M;AaX9iBm8M)FS>)GYh-d)v{-41b2HLUCKOfbicF0`9>RY;JenIw*+qMiVAJ5r&O zY{u^Z4LSU`66kG_JkMSph%Xzq^h^aMjY34BHLk<==DV@}DY>AP0)E0C`4tW4e)r~G z&tEapsho{4vkM0uKdG`I$WA5v2JknNokJ&-|BkOUEjc3Z7i7HwyK?|P_wQ^{E*g0E z|CHhCy7u_&_3k;%`^wKE-3qM=#hi2Qtvt`IbD1nlaBRt4UfMTM8|tUVTJs_)<+MV+$2QttT(I)g8` z_A}xBk8G3iD6dm2rIeO8=ShcmJ?n)`v)S;txR6t~q_8(V zLA4mB}YO3o;Umou)3F*v-ex#ncJg{Jf2?r0n4!G)z{bWIEZRV>|EQFT^p9GfRbct<0@4px-NC=KshY=d|;few*SUbA@_^!Iw z-!7dTSFRz{7!Z@Xa!-e*-xnI%laT@6E67x~LQ#AY&cVk`c-q7o4H`C4LS%0~WKTVk zegqQIXuRtOgTfU*nn+y8(Sl29*pkEQ8r===%rSddy@7^ug7P@B=>dTx)|Io^?esNr zJ@ZP8+iiF|4UU~qKcF+F9Z{KzxQwJwp2Ue$MfqjF_?^b-Izo|KAM15Z3D~Zk5oJ~< zBpghr#S@&kZjxX<(HH8ax)Z+`co6ld0FN%8QWF}pvz}*%158WzP$<8n%jg6F3Kk&&D#|7(N09eUiRx#GZ57%~D-bX?{9EFj}jH1eeNZD`%Ym+AK05=&j z?0|M(y~8;H8xuZs^uw6C*ws+&rb+8UIio(Z`4Efd4%w07N%o9$p+Ts*Q&xSf4ID&{$>f*;eGX0;d$x3 zwlYU*2%7}7Zyq9PvPvEEPCdS#Og(|$muDYSj3J1f+rjprS?0l>#WpTv17 zJND65{Q3stgxpQPFVdrM*0VW_+@Wad<-QLA@AdP#aEE*z9AEp zv!7t=86dS%71y#BlK0j1YXxi@KBbe^7zao!ji{bt{Sq7^EqL~p`90VMRWp27%~g!D z-l>j~%>rbtNRF!Qn_4JC55@fJ1%!=q@0w8BWH3w>8aFK>eh(sWR@GmR&aG;K#q&sd z(F=fGr#45yzG-Fa@3KIWdoT=VoOc_d=QJr#+$?Ei@4^RJuC>D<04XduQZ*y644BS; ze$QT8H&3f=M`9gFsQ}xgb1U6e{N1oY)qN~)z}Z7J5W|hQ-KGsKT+(}JK&LZtj*ane zk*-`Fo?yB4KNsH3DpP!X!CKtR)y0cMp3_!Vy<76%LZiQLyitZ7AV#RlGgl>UK6)^1 zGb$(^FMwC}{Z_%+Jr8?)+#?eMSgiNQd`#J&IZdbM=qGy?0HxKic@{#HOFepz;Kz zrrsMTbM@v05F|E$nc{3E0zq8<&RowlvhboQy} z^Bz0kC+a9<)!x}Qd8R7;99*nbWE^1VAv|b3ySpP2L_GttoL+V5Bh3fQ8+&7AM9;}+Ix`-wFsK2i%%0Mw`5S7u1?sB^T~YYwati4FJ#G0^H(EH(Z#D)ht`GKTDmJ(OgQnu4-TTUDy z3LU#()~E*iq0+gSr-*zYJvcQ;*YFm6-O;Lx@Gb#pn!LrtJ`1;L+4=|&aXOegHK$;`&{aoB(FkHdEtaTdOHs8g#LD|1f zE2Ny}g?uq`kaex4ZH-1Mzavj6!S{@>By00iV#9ry{A;afQt507kdi(mop4#WXP8qT zfX9+>IjCi4DU=r@TlH;Z`S5TF(7Cbn2|F)8#OtaSCpWJ}7RBqMS>n54YqWdEuf#9` zb2^RRCIY_7oPWf=cCzP_52B|YAz%=#ZhT4@-P?`>--o*hIT+?Ykr;@?{JtcHag&i5 zewC;jpNpNl$HD)PU0XE}e>n^4zZ6ry579_VHZnH4rasK2oa)m2t2$&BSWFPF7e|;m zTe5Bo%3Y}1!VMVz$8i6BhEvf@9UHxJyl2joYu9|W_>ITLu{!wl=LOQofF{~`7O;6o z9d6@X3-aV;mR6bUc5{A?5ube}N!5adG74$01=gByfh1uY9MtNbCLZ0UG5%zRpS>HA zK70%i{}4iZ1f1s|oNkNV^s6kT*gq6_nVkCnMP~On!QI35)OA!`1&g;|P2Q<_43{W` zH*9I6W-CsgXXiTC2o_bb(Ea?(3kuX;ea$y;)AKr*--Q)+$}bwsnb&tetX7u75jN?jo*^~@~aIugbsh$X=v6h&+M_Z6i_D5TFX{ZlJ&DS9h zqRjU>4&$MBVe<{22X=exjtS6GkA@2QHuDK=8U%$d*{j;w(AW{hHQ!vU`s>taOo1t1 zofrk>o1h|wQ}Ga))q&ZheFTU^UE&=qm#jK&NO0E^it&*&yMGfGCbEgZp=)`zN7<4; z*=sB2z0%pgZikna%@{W0d`w#NS9mvOq`e;HJ*8S|(f&vOi0~p_eqflj^zWHASCgSf z{m}ltFb^u}9P)e!or;x}}oa3XiVK^oEx_Rt!`+7~#ps!hl>ZJarMywpUw>I`)^RoNr z#A_Wn)*tk1Gg`0HASkRvnEC`V^?{|9A}2lv=f!Qw1&RVc7nfCh?ZdWjd#yAKxcZLHtq*zMY2N zsW3qXJvH}5pIru74YIBixya9tKa)a!nv!Tf4y}B{5gdvGBXR{T4O`Ka#RyM=zX-}aE4)PEk;dXp4 zO^@ku8cnUS2hiz!W-IBG&ti357p3OuXkX9YEwu3%E&fCuvobp$1x#9!9_cDxGsvjl z_%Bv?<2b8_ne|gJOXHR4w0{P!fQcXK8d>4mtwYJzSU9EM9I$7L&uK}BDoZru3{c9+ zKulijOoQ>LRgYM1yo1OP-MLs~Ae4|ZbuzjmYd8H32<-gZ*v3Y6!-2kOBPdV5$Slye z&(lRON7K-j|4sWE+`K)Tvn-J2zKSz0K|sFmvuf1b-^pmcB*d*?nw59}-)KzCE!P!& zU9@)yanOTKzRcL&r#e+<#9H0`7OH2)K+;+RdXy2e6t1?$T5?}SMoSjC%tSqbDKNm={XeCB)I?% z1|kyXtqZ_;%EjK@cIK88X*)Z)q4Hzqo*)?$nzD=}3%Gbk>HzYdMePiCC#yF2cA;9h z2v>Kkvyej3y>Om+5=1JSVo>RXvEyZyzibB)%we`Kl0Y@Xpf*adKIk?V_z>B)1S+L= z58oFW}2cojEYL6Z09Sm;lW?*!3ZpYtPiOI`_TEB$#7_Ss;tKcEP-flk0t=X|vK&lDkI z&2v?OU+iLU@vg+{cj=76En}W0ph@Mf&h~p)np1^|I=;WQj*?6*3>Sn_g2VwEy%$}uhuIK-Gg3mCP7(}o7oiLj@V zU(bKySN!zsN%ZQ1X(CqPR`s`7B)(+-r+c`j;;9;zHN{i&THhY@Ee>=3d6m9Gr>iIZ zJlb)4k0BK7iqF)dHSyZ8KHOiVe{7&I{g2`P`wVw*7yd!z6JyU2hl*TGiB@71U@ZFwOK zIxPW0cF9PIWkiF9Dje=|vj8(Y0Y2x)hj>ZWkE^uB+wMTPqOz4#!J457drlw>VneC1 zH5wlCFS~_S=A+#M!4sI^u(ns(-;ND$=r?2Xx!qChb7`NNLoD(lS-!i0S_W`Xhsht6 z#fb?eoz6nIy`j#121B6QmC-YVlk~J{O#!8lgJ!8R+Vx=w4o;r2?dBngZ4JVh;zxHk z!io>Em%y!)ZS?g{a2a{@xNFsbe1C0T>PU8e_gl^etCffeHi*uK73O`2akEhwb?hvoLGi(PeSl>l&Bi?@_o19Kf$Bl+)Se|w%G{tR zO6(qA)((4_7=*((=`dW8`i4$3f5PTzR7B*)ZDqky?5LCq(st)w1=C5=^&iSC6QE8S z3u~mQ*}oAf-j3f&xZUU(OwLnG|5Hha_B<38;rT>pHa@!?$08QD8-Kos(IkCz!GWTG z23gt|a1B^>hI?1~HR|HBvR&k{0t8&F%#5m5&wF8ZAPu?xw$%^M>gkJsRN2>KE=h{v z3vV5g0+_0uFvFE$z(VsK=W#QN$H_Xsm}is%eP7IIMz*hpLH#yOep31maO%gN1idve z(Ty`q&vIWbXqx@d^r++PbenOkQi7mGF5@_vWXBUp5-7onGyw|}_lr)t(2LoNk$&q*Dy zT}9r5f1_j37APA~*z8)!>g=+yVfUtk5IlhJq|U0@*ZW!~-j%J!{!KLDdj9pddw%Gi zRd~-*2pdqB#N?yVuD{W-x(>eIqd}YfUgs0}FE5ucFhs;$WjV}{*qR_w*~dk`-esKa z7vM_3@kLa1Xbs>95XZONaaWEpdXJ#|T<8|wDJ{d15s+&ex<*()hz9-KVvwrT7n6a4 z+;6RV-|FpzJ+P`O{W_)G`I~Ku6aYej2QPUgxu!bj6sx~tcy{4=Es+>MBJC=-s3tes z8?Z{dS4TM*T0E(E5VeLV+w(MPYx5&sIE=%^?&V>U1lccfG96JhR9XeR=!`wNen@cg zIu^#g1Qw|}Dv7j8AQWRLE<$4(w#cE{^cXcNT45q)4l_u%{E)~R8EWNUo;GV(qUr{w zIlkbG;5|RrI2GLjsYEewObL@Tr<1&aH;>t-Z{2~0k#eJ&bC*}%eL*> zS}j|vmTlX%Z7o~Nwz1`9uf=!oPk25&-=6<)e>ji(x~}t71NT#qVF|bN3TQ!1RxK34 zOas;m7u8wRY{COjz2rv&Jke)`m1a7~Zm zLd`f8VAH5TA>nK8h59b1h1phMN98z=<~PzQINxW}LjI=O(dKwn9MgMuer?u*k{cXe zrt0RW0=M4<8m?PH7b+n)#N%yMZ%|AJUrEPxllt^i;xuwr0>$Tni~BMbw|R3_q$vUx z(`O!H1!}zAZ^ztt(+|nwHopJvIwzm;mrEkaOQoFkgFd!#TE8fDUV%k|s9Q>^b zP8L5KLXwQDXRgi&GoUXDV`cfF?eG$H^_uuB9$WDP5Bd!j-n0A!R8tFyiQM$7_DFKt zHC?j61oaoS;kKVZ;`71_QV1C%3U~7Hb^@lnmx@8B>og_V2767LV#oy0wR%#hBe}h( zCAJIh@OV~hN?9iOg{E?FSzcW5na32s7a62LI>&$0-TQxq@}2?QHQ?IX!aRX{)^MYI zy#@{REiy(7tn~Z}ZVQKZ@YvSxrZ?%stcywnkj>3gpFywPv7op_x4Sli;c4K?^aWM= zwc6cb+O?YZqq}KtG9U;I$dfXARu&{Yt8 z7_?|fFR^LN_3{x3>i%F%=j&xb`+#C4_~GO{lSRiJ)QSC$qJ`3A=km-TDAbZm6$jivU@kUqN`V5{GY(rpezU!c%L zWq|0Dqpy^}H ze6D>FGZ7t;sgjLn8R+V53uTz665n|T!tPV4#tkF96OeH)x`n!JqpmH&26?4lEOtK6 zq_0KH;f9+gcng*8fkmnnO(X6>L}R0A&`<$i@}isZJ4`F__n}mOIkvUm)!p95S!w>5 z3faU-z^A+|wPR$<#tx3o?k%S?%-* z1HFp{cHRr|Ur=ONl^uTpjz2YmiRv}&tZq7PvVRDt{l#J%qq~%OH}I2sd@QCD%q4vY z#uZ|!|K^=*ndV~@z=Jf^^0hY>{=P#q*u!o9pIfB{YQ(*foyNvS{JdYFc-wp_AnciTY>j9?I2724Ij8HEt)Z6A zbAxfX*a;x)9)aMw9+BH+E6(ZwjTyx!-k~*BZs+Qn%Cv2z{MB1;B(EjpnuLa7=xGrE zFuY(;)av{*Gswm`;T3K@Ls%>*#Sm2 zA0t_GUVVNCIbi^ocrcDi#_RE8YJm}av$LXZtX5+*zzyD*Q|L6Dh{TUPDp2&#*)nEVaeAg7vX8_qNhsXLYKa10fDOQT< zSFwmV+vP^jGala2NTRzO zSdlV(f}aS4@Z|)EnqgF?*4ruVEjRYq6OTgMsQG+Om?69g6qEott7%7(&w@qz#Zoxq zfbtIOQNYyAaU`bx{D{)I+CGTo?b@*C-`Ycwu{=7i-)c~^&eZBsH?d7^rcwEdO9oov zZ6-tBSU37G6}*`Snh^W@VQk2v--SUNMoPln+9Mt0MZ2svPDn3e@ zMH78oCac^Ky3?BDT3hELXm$j;z>5f9UInV`_@d`nma&KJ;KGQ91-ZkS#{Mco?b`_@ zz`aq67niIVzZgLaFEDh<`?1RRPJfMHQs}q>$#H@QwDB7>SZWx}QL$V`&5Y1Bnl4^8 zNx3W-y+s>h7YvGmSTg*eG{1TnGjo=_tY_i{TKW`@_Kxz!0g)2TPZT)d`bPscPe+Y7 z%+$+q4D3X^L5!K`+-ONWI!rOV^p8VuMH+I6NNkdL@e5r@dOBR@@VW=3YU;et+F50B zr$+$Ps2IFAM;3Woik}!%%Zy!q7@f?FDKFzMqP&_jRu%>BM~nD*2J1=saBoMuNI7r% zf)s;u)$Es`0_0^6LG55fELX#s%M6}^;` zd=g&n=|aE|XKCo)sQ!cS$rTmSau_3g{2UNH)X%>NDZ*MLlzzV=x5Hgkhc(vvwAWDq zv1R z>XXWkfoCyEJYEKX2uFlHwOdT9EdupVy z9+Dbia2YckIP4ump%|i$gWkrisxa)YsZAGW)sA;-4P~^HJ-8YMTQ4{3IXmG)bL2Ty zUp-a3-+2bMH%JH3sH<0?3kr{bGsY=t8w%75j@CS8<(w~=H&W-vf4gQ&4BI3JQwYrf zhNTn1jFdY4IAyU-ds}kU#jw>sPhNtH*?uDF{-G*>RhbSuZ!SH2P7`_UgxHt1gsj-o zo(AIUtp27ahvHyavUoW3kg3OZmEvMj2!~Db`5js^eNort;N8!~Hc3EVw9sr>z2W-D z_8f5{O|-UkN|`rYbf$cW*Lk(-5DQS36;Ij|@WFQumddYC@tR^3vSjgJM>(88HCB*& zCWG^JT?|DAQ~8t~Q8l-fc>#j38Etk`5)tO|G)$U^2!I?MMomOf72%0E54(O%c@1f} zyOJ%kLDfa^zUL<-3}~f$r_V~0;zXP9z?7YNNhO17qEWeGNYDB)%TSPlA2=NQnlr8* z#cpiOdvwt?t`T;+|8zv*ZOEbXMiPaC2Az|lZhL$ypE})V;o1862GK!Q% z(=`fR?lxmG}iRdN}Tl zeZSTyXVF1u-nImw?DZ0XQcj!BVgIx*d)qhC{+sHl8ci2Yg%?^zpMOWfZ^=9SrwrGI zd~(dK#ykTbQQjnU-_^F~sq%3ifjCHi-~+wY@N?n)sZ+?~bW;)C5pCU%IOt)P|1sQu zpW*WUrAB-trApu&m&cTMNn?r_63TU3=Cz(YxS@oXd%-GtopXprJho{_PxEN1^WH$$ zpN9voidO+ARqdfXKJa_=r-Kq-2KgSvT9K6*mN!I{Vj1bSGR_9HZYKA^0r<%ku}NR< zG@`||Zsf#S;jW%%)+daQN=t^Jp}%qE1)4!mvV>&p{Zx;^;TR#LX|}$NC!dlBTV=`P z)?c0+kV9ckI+vCcqGa^T#SHbN_qQs~?`e7Nnpo(DAiZi8P@?t`q~K%C>6w#9X);sZ1v?J)4n$fg>aI!%aBT}$ zPg^`)`&zk^Tvwk=wt+#b<(qhB5TPj@YIo`f0#@{?(9@UQL#c9f8{(68&$?dk!>**E z3Ht1|a6t^{Ju3Wz(doy#f6p7jC8S{cEj2V;SdX4csG=83nOqpa;i_EiEMh~Xv3o^Cwrk$)L3QxgSWwzVVe*QcG7wl(R<#!2Hzwv+C zhur5d)+c>UxM%w3&?U&1XLW!t9}o4Mw5_TC;7Ctsj~MQW{R9(pJ;x2Otwxre&&p4- zg`33>o<8Y#L_5~b12t#^FU0t|tt3E}sufkwn~dbgG!){6Gr8V><^XeCExuj*?BX`! zk^+`CqPowj{gn?^)t6QCRom<`eFYAYtlkqE_Dy`vIsun~A6UL3kJqtB@rr6qn!KW1 z-m{3voRoK=pbQ56Ymq@kdyRPWwg>7wNKfSI3G;F~!D8k68e{*c&rhAexCW&P6v`7^ zlrs%;TKh!s4Rti8@K^SY$S zvKj(qWvF&i?jWEskqC{dgUxm(K7&-+UZ+ubWTU(tLNczlexBGPPK86s9IDEF-kQ9|B+j|VhU<0$Qr zg$;*5XWrmwu2sJyk}&?Ox8T0lSAF-2MEx8@)K5AoR%1jUfNVLeS;CELAOqjkL#)V2 z=JWiEq@#!)Oh%o1xj+uc@XMA!XksGHxa9HPISb@JJ-Sp=O3ta1_B~^Q3{s8YKl zyL&&iI^iRmz!?dZq%OV@Ls8hN&VFD!$%_YMx-}8K^KntAcB9*{4c)XTVX3-i<-4&nbK`o0gJJ;EklQtS6gbRj@-6@k@n7mf=j_$3=v@%RVXXm<3m!TE>=fUNi!Em1Q0{C
    OzX{`(9^(~@^#{UZKehc~|lQ34X71iN0auEQWX%LV1nYilqdNqA+7SNWir zht_IFs?)#O7J-54Rkwcq>_hdTaIzm0@bX6|{FA_dq2f;zc`R*I)8qA0Tro znKWW_d9cI3lj&;7D;`+)`~?$8z6kehk*^;`2nY6goGP%*^u_30cP(X^Z^+8a=oZ|{nTrsbJAkD1ykj3Vs z{77imuOSjY4#1_IChWad-5|gqtDagLDPB%{kOa zc%+=;4XHKYVQ4>76E^PBOAU>D23{8;Ykale#Ag`p69H9woTIn%^oy5WJtniwzpK^+fKVklGupul&7(Y)QkET2acPs5lPWM~v| zSrj~~zAN^Z&)|*=Fj}aX8y_6B zf26K$mOMq7`_#HMR@7Sl4L*)@P74*9WwJinC3D$njy>8-@SfDjsI>>zoke1SV48{a z+;&F0c_%@KKzhuiUpXRNtV_G@5~gqA1!W5$Ilc!Te+sf&(dmyWya17*6K+pL3R(;c zuTmnU1o9C`i_fIG(ttT}X`dzgiUXtIde{x4XmfJHXF$G7wQ%YaytR+E8bg+T?b3?J9~n85(ht&5jj^0wb0c-cGtccKP3Rc zMlqmD(Anl~S)uM{Bgqpe+H)Kin)M}THk_e?f@7b$AQUHuOvd>3fgwrQkHDDzuSb>UQre_! zcy5XI_w;ZvQ^)*r-3c2oy;bSbU3hIi`XUmiVz}jP?gzv?yTSPumb95{*awauWsF6+FHbcn8od-8e#)CnIv zSn?ax^W;A%9C1wTVjlcdii&f_F#buYRv91-`Y+qgjk50A$K)y~YhHBqY;>0^V}iwD z%?GT)SfuFy#bjCUPJ#}BZKk(fOZ$tgwx(~$XA%1I_&*Dz2u{`@KtGAWZsaWfn%du5 zSXMRonB9Z#WlMVHoS2))$R7icznl2$>`JU#_64dV9#*oV*L$$gNb z+^D!t+O5&fi$Nr= zXstDS|ErAsp)#gHZgn)Ae8jC1oPuxHRg4iCuDDJKaiuAWk-F7Ph@055t3tDaofU7+ zWZ+MlWq8sr3C-hxrtym2NC9QmG)&uiabt@Qyj;+*`e3?r@%?*M#!uUcvFr> zABrtB;_Dha7wq>m8uhT!Mbl3Q7CQ4*YctsB)u= zdrO&5y(RSms76-$GDjCxqV8{FOUQYh_uZ_7(-2=RTCn_eXw^>ukp-JddCYhaAME3= zL=w&AGWt{Y-g_I7sVc52^l8cel;Hv?t^ZgeL|AABDKzEeYWR3mF^A1UU0xBE-xTu@ zJZ*M3NwN_A=zt+Vft+AK)zD-6AH)6k8SV!2wd%c?u2f8H<1v?9Y(hVl5kNiKw6TSXu;f2#^dTdupie6>q0bjyhUYrhPjlE=h5GN1k>CugAY8o)- zQs~DOPTcAX!FIwh?MYOWUD~x_mCT+IB!RSW5UDBGzc(cot`X(|BuAww$<=gCpxF-y z)jd{TR!rp=@S^h1LwO4=X}1@_610dkgxzApHH59jMA%neOKr_X4H3t#AEP}VT20Zx zy_ncfArO4k`wf-2tkm^qHYq*v1O}(=6UQvyv9mG|$PP62{=iI+0be%Gn`Dm_pX$kM zx@-72gUYs7-8Kg_Ecr;PFEQOR%mYGqNkZ~Tp@~QI!&@HS>6Z3|1^q;Tj%yYb0tp^J;&h*{qDJqhbV$AF*`vT;uR&k5qD7M1&>J;INj#Hf9XTyY8p8HqTpQ+uaAC48bmUZ6elF(e$QLLZ;Tce0@8|vcbq$WXp_pUog@+H* zkqI}Igh4^zp}4RoKnEyX^JO!O?rS!YL|+fcYiJ_Lna-XSY3WV2@XS#rqyiS-`|p3^ z){F0iN;9mh^Tnbd{O7qhYQleatqT#tr-25Y-^J?fOqSxGZF&8kl90Og{)9V8E%_q( zz@M@qY61MPs9`4ffF$_`-zb-Rw^(P@9l{0w_YFG!DV%Nf$iN>V0wrY});&fx!kTgv zR~3TEOj3{sQEHtK;Qp5N1FL4Kt;zW5e_<7o&dE7iL-vDGZvTd&efBDt_7gYe1NZxX zVD|cQx`X3ePI zkD#ZAu0EoF0{O-Eu~8CGsht|F5Cy5Xc7&Ac;?ntTou>=A%Go{VKz|4YQmtV@hO9y| z+HXyr^1UJjGc2a(Su}2&e`{z0pzqUS*N1yjVpFCTF#LsU)U3%j8GyR?Aa~BRA zYR6JBz)J;vU(vc+6r&StOTH|SNr;l{+mRUWr;kITpeNf7yz%?SHR$Zj7aYpl-mN0p zt*IWK+}4Pvs+qZOzW`F&;p$>s^7j@{GhR!2@XqDZH;<2& z-Nq9!@uWK6?_|ESKuG8-V+juUZoiVV_11Uij{?irzWfhIpUgc844k!#BR~G3xlsbj z6%p@Fj+I3vKc+Bb$S;d%j^`FOB;ppXu)>%i4if-^QTZ5oyEr+YAmeG(%{hK`IW% z*3XnP&Ajfj_knwFVTq)82afd{6yRpOFGrDPasTQulGW#ZRpj3w2ei^VQA8kHjH$B! z0^TsRh5SO3dky2z3YZWLqe4)$3NgqwUB6M}M8>`+0{RIR9IyUHugw?PM{&LO)wM@! z+I(r3Awwf)kq$PPKsw_4FPN@ID-)UT9H&^>7DuhQ*Ri)I4! zk44`ONGIuzKiaYi&e2NU&4##(a(8jbAe&-gX+Iyw=%xzA9nRT+GIYXIKc=7jqj?~y z{Z0=S5Djgws1=_NjN!9qx+vc6%7sR8O1+D&M8wm}G7WtK`d@O8MmvMx-VOQ11g&3F zogOC;UU7YHR*(yqGTQm)mLg*)a!}Ioh}xiJp%nXqZ9NB3?a6nMTD-DjKb}_ySntJ) zqtVTm6ush(R>W7`ZJyBDYiVN9tg~|+8jEd#DmlG_V!kFqe;hGQNqWqI7=7$*!aK^yCo z6FfCd9$Q%M?GhIl2C@+m;n4xU;Gc!0(QVV}kU@fr=5L_aqN!~$gSqES{<)wAx*yD8 zdZ_kNnj$(Z`(fcjI};Y94Gh8KPQ)zlancP6#)wurUeY7wZ++JK&20g4VLr%~|3&<|!? zhNhaePAyU&XmpoZ?CaAFqIax{0rnDg)-6?FM59oZddk0_N^&MB#4n|JT7QX_rAyNS zLD)pBVUwl#=o1{d-1{-|Hqo)lg%IAcG|N60neU~8|0%UpK{D+eRzo83(vMn%9xabx z0fS~}Ru9JnzQ$K{EbxsChcYq7q>o9o!V2{+T#y2|QFxXcW3?e^dKHK*b}-^ z7K@^p@riPWR5Amh9Fl*>e)g}t_m>39UoR{^$|Z1Tic(0g`8d}8{QwWRW1DQH3?T|G z_h~wubZ8BN<0|7lPhm6MYV}UHbdW(ZcR`|jZc6kG0aVU0pZ^aX{N5a485hRqdFog3 zQfJU#oJ}r|v4GvhFoIyaT=8=7)5Y8&DOrV?bY`(wdIi`IdWGGP2$L%e{T!FJ}AG$SJIgzd!QznJjHb7}V+c=L-wWf4LPkw+F+Eot#_6 zuWLq6sGMwtn6|E70-rWGye8+%hhAicGr(1FUvztiX)J%`=*Mg;nNk}L&lrybu=WWJ zw@&K z5=Zzf+eIW*V%`J7ZI##F-qR$;fl)=__Mm`f==wjsP%3L6#)PYM)eqTGv2C=CF9~v= zm{5NRY6(USi(lh|s8s(ED&>ax$i&IxwvzThNWO{_F+|w|A9bov6S2|YIz$5heISDP z0-uL*&1MR3(V*p*k&TEWc7k=zQKPy1fmd zS@Z|Bg2S2(lV)vw+o9N;YsE{~%YpBN(eBYaGL(V2V64gUs^}S6<#2+<7(ufxN?6*g*BEj(K?yQ#@54h97$$uWpl0pCWx1FiI zy)7Q2iE$W9Av4&KYS#T|2J8(C6r9E4`!_OmWgJk#kR7$Is6?bo4u+bbB)B{%0m@%n zSxj%0aHHQJwkBv8gnS6en)+Qu_rs{Q>h__|n-jk!Ahdtn|8n7JGwFdVdu0Eq5pCL(6F;YdyxPKvHJ0!Suu4&e=bk59;C)&{ zWO3cZl!>z%8M7Rok{zm>4-qH>n6Mnn(niz`J9D8)>_sTf zD9VG$E8~hEpCn3k;`6~4o^Jz~%SD}a5y%;JJrI=78A+O z!vf;*Er?QD6lKA@J8V3fYfn;mbv6=$t&U*l-POvI;0Hbz}OuVsecWcZ8XCiAu^WK6ua+ z5$*-ME2iQ*$&@|wjxzDl^|PInN4ucB@2H|EvS}cbE5{J~Rzv2WZ^(RdY$oj2)Ss@- zL4WX3v}5kvQD>ZerY3x)HAy0K%!Zh!7@OUKl*Vfc-WXu|y$yd*FiwRsAe&wK!*17j zMCvFbT~)7`jA;C3IQ&l;t_)Xs z7=q>6;0_xK{6~?rqA~>_uaI`fgmIJ#+&!X^1d%t zAdmt4>GPXd9oAd2gY%jCeK(i5PCnR?ThKBLCXnMG_Zhg6Amvp_vE*)DXBKn(b%b8X?6jzxc@x-i}$-~_+9Wvr!R!X z#*DUd-b{&sc7@IE{(W!F zBMTjN))g?c*J)6s3dAdfm=*XuSWi}e;gh*zLW>csT%aq~T@BvEW;`k)W%v5N6O{I{ zi&KbVT&XBdRllELlu?94BM0y#>G4|M13r&JbmcFT1{6tW6*=Edh3v?o)fT{O*@0|2 zT&uV_FY8l7CO3>A1Z_ih<(AYKF?{`##i#RQKj7#Pa1tlmxL}^`;FOU!&M&9^V9IB!A-0??{d+#nUY=_&ma9INb7 z-}3Fm47PqCSzVQCA;Cf&|LUP;8ela?-_meyW&ZbHAR*LO!1>r1vo2AsWmBCiyDbZR zzSu`I-aXU$8i0xQ=ZT z*JT4$k%A@t?>GX_kNN&J)#Hf#29U9f1?OmHHlb(Q0s8cQ*rDxKO4c$JxJL%0ctl1J ztAs&M-xeA0v5q}|C)e)1P|8|&Sy%Z;q-X-kQYQ3eLF!waxo6JE)ZBCCJ$1!vl9^fc zO3Qv}qd#C4SN-cse`RZ+&FbhxI-l&^_A;85`yE|U$zLdk%|_tdJHmvU(7cIYj-Ku+ z!o*UOe+&C{IYM7P`9j}imj~ETn^nR&`s2OUDu!qZ)^?P4)mMCCcUwtDncm4XCjn3S z>N-~5PsgIIlxUW9tGB|du0c#rUF1fPi)FP^J-~(FXXY64<=`jE11d~Y-M70h=o47H zEsd^gY+CvB=YXm6D=B+#Z*{f*!K=W>_0!vT*q`e=!lbg2C+8;b3OQ zWmYdWQlCI-vfrMFrO2^6Djw(9vmocH94F}-Kj_JrZe@}+Ps<6i**zVGhm!Vyf1Dqe!k#`A*h|?pRpsdW&2vC@7 ztyb$ZRy(&h(A96~fJ&mQDPy|r0iM)x&v-bOqhZfDPLpd^%xGj5qtaU^SQqBD&=!NT z7LKsMpd>f%Fm96MsK;C%p;#r40D;a0yjNw|7 zjYLYxSAuYZiOfZ!)>3~T$WRM4tS;aOxuqDw)Fn&(4A^WAsakKC#Njnj2?%mk8;RHe=zB>JvDP)ciSN-1#_=UiuOH5&wywht zdUMSZL{WPIvT~`MYws#6)m~oWeN`-7tAe*IT>ik+;C8;;zi|TKGdN+8^y|jVL>Lr} zbhRgx^Wr|FK_TlWBwNr-u~YziBtwMKcY&xdw@`DK+?QeoAVYZK*eUv_@gZXnlMoQ% zJrt)v7er{lbFRgC5Y7He^#(g$VTMhr2dck5c>^Y!9Q#^pVLy$wJtd#URjYXwJ(#(( z3LSQw!VBgRCcvDGs(N`Ki5EU_ zSVlDy%s6)#hspb=dSfS3z3B91g0q1KbMBu3X)9rC1EhJ)otfMvB0FEI-ub1N*)uzN zftKxDr+gQ`=!~UhGaf~pd&rh#M#p`3UOzQ`4m{>N;5)KGpy?^^_lHqEK(iv5nE(xi z#%KK3j_yZ3b7%frpxT*cJMZ1BY#nDC`sBw6fFxo|LadbcOE-uwH zVT{M1WCi>;QugGik~!soC)mp6y&MLg0BeEiQ6gSKroD?1sd0oHX7FZgO`r|X#^S^= zIZ9spouo*30v)8lo%gOr9;w0;$^Xw`pK2oToF$i|S|#~0?3p9pYW_|cjjDj~PkXkg z{nvE)o%WOel;J9dxz;X)l8VY`J8b+om%h^>$(M=1vCa%d4_=A2Fk^1ertG7V9$Cd` z{joB^>jA50YGbg@4lqOSR-JA3msLkFw~ z87iAdhZcIkMIk7z;`IZ>T&`Wt$?gn}XZ-hQ)prEWDD<#|sO$iSY_L3xF6uaJm|Tfp zfVt{^UGek$<1Ud2UCf}4FDcN)pOM>HQ#xprIOI0MeSze&my3q63wk zT1LAiAO2?gJ1;KnjF4@MJ&>o zu>x*Tq>Sa?&lpLt0im(Es89KR{GC_W-tXa!%!lh%izMSgsn5MRX-ajkp!sArv++u> zKUKPq-m8$E1af$Y1D7t)@>6Y}*5N)hFecE2-&FWrP>_He)-&vCQm_%+OT+m9-;+7n zim0&!LJ-n7JBPM&HyG>c~8W-MWn&Wvcn#9+5FM+iE>!#M1IYS2Oxyc5440mlJx zjJ69Zycof~!$};Mo%%Ew{ZicM$-p&#oB?0rsB1@DD%H&sm+O~89(XAyi538u9DDP8 zlq^jDSiG*P>T-U0&fC8d{WOIYng&almX8h^d8D?94vh!&r%qdYu#sT)!fp8=da)#X zyz2b=KOzF_x49Mh=%ib=#5#O!%&@0VOeKA)`5Wp!(1;x6-lSl?>smyD1Toji+xNLK znW;H^?_m>uH6ax~j=ts|Y6$chk7N|GXM4vT?|x4x79?@lTWIjO(I|~;Mixr_91SXN zCEo7k%zr(nKmRFZ-Xb(I7DA9h=y@xa_owp}ssqdG#s@k=tL2kn-&HAgap``qGjJfX zxS3<{Yo`}UF9JcQ(iA81?L;Hr!S3nclN+4naCHZ@X9V_;AZ53@e|Oat-31shvF&lX4}_cB%?&s)ByT04s=>MeNV-oXuq?M{@nfq5A~Q z;?pcEKmA(ZEy{(h0eZ*jK_x@KA|LSKa-TQmQg71372r(0h`2wRGKDRAi`yR%_B=`tGor0w5Yw1;zcJ>zWcGjasZ_PpRpb-9uQ7vBf^u#boKky-Z zo4TRFv{%60&dvyy|I4i0)a7qw_9H$zILIAfaczo|ZmLC`;2<-Kw!FE@^CTl# zDy*0E>57og!Vz$-yfaIvM-o7MLEXQ)6L~~I_-I~7kfW*0dRpd>AAg)D@TLlY()Yt8j<%2xHR$lkU6QhWX)w!LY$QNaQ}s5h0Ua(huev>%$T4fOP4Hc% zbx|G#eUceogB90dz>;Z6nV9Q;4ENt>I4#6EufcP-9H%Dnd19I8;l=EMV80%1RaM(? z61qJ_fJMC8$7NZvQvZg^V|1qVSiyP%2saaBx>k`Hzi0`8jo1Aj-ybO%+_rLMvWB&c zPCDVC1_xAv_4*KUi2pUI71NN96sF)<~v;UzG`KSQZ+4AEnP~>Iw zAt-g0p^DBwcd{AEL|6!K|8@OZ`&V6@WZxtYeBHs;_B<9e)+vmFrh>!XS8Qc}Q>K$v z{gC_N`Q4fmK(ar&6@gIh#4e%A$WlCY?|g|*Da&sZo{+?1x#WKVW6TUlb_zZrM|Wj| zzV>^HkapU-#BG+Z8kV=368yG6IN`LCIL`w6u;;0M!Os`ADj1-gt=x902Xg!T#;5{g zHP-mSSP~YMg#|L>)?KIAe^|ASHraa<4Kh3V$elryh+;H6rk91OQ-NDthn4R!Jzp#u zS!#fkOTO22TNmI+o5%i(Dky3bwM3GjwRkzh**3M2&~hlE*@B&HcmuNfGDi9R@0A}k zHY88oEWmCJk29BA1piRto=Eu@J|Li+{S}o}J0)Q4`}v?Cj?=s~!O7sIG93NQ8LF1I z7?4dWt}PZN40q-VDf@_c=o@UgvS$=eKWk%`xU&#R2i_#b6ejzi$4IepWeo(~*IpEI zWAyR5pXoPLYm*K20LCxpwjQm;FztT&z=L46Lws+9XF+snCDXFviR1G z%oWO!Qc|D?g9L`+an&+)_ed8jfZ~1Gc#Zp@JQsCcDmQ!5J_=waC4tZoljIjyotNH3 zPhCZZ7pg%n3-$TI9toFg69?c2h^$tS#~Iz?nVMX;*g#oH zUz}Z@=O#-{nA=OGpK9sF_s>$#Wdb`{uAPK+dBE+ka&XEK4NdW6zGzx{uSi-jV)B5J z660s-a@eey4sZ<$ZmZr6<@H-MSmUxR>VENFzAfD}UC^S;y1nDc07aU+CImzp6UR-pqFC8yYeuI!OB z_zg1clNyh^YF-Xv(A|TX4Lh9h`DXz>Su)!Hv3FK$K}87^-h|TKAl;21-Q5j>bf*&1 z(jeUpN_Tg6OP6$acQ@lJ%*EWzBb!( z+PBW~(g16O>gtkzhmbrS<60GzcrV>*`tE}-WqS_nKdbM#FoI?j3(tHsGfs;+9mC?! z0+W^MHldG7g58>}6%GoVNq|b<)Z}$r1q;tJ1g~zzeSG)&o$AsUZ=g5AAhpToX)yTG z@&^lzAW=iMmsd8qg(mE`>}fXjs9$OUs)*&x9Dr!;UA#LC>=eBI`wdpiCxx8dsn0T2 z-`d+MIOlm%8Q@tsQCDV!Wk4C{F!dc<=Vw<9+-?(Hoy+WsAPcRi0|ASnyYtqB3pt-< z{qoA-=)`uys9>VA)Ob_JcZ4dbpt-}PBqDTUZI^g^i%pn+Ze#VTx`BdS_oi{e(CWYm zbX8d8CD{dw&bLOk=U4OFa2TU)tqQzdXXFegR?X8}!*LLRGT;2w=XRWHj|Ik@ju4kq z@9qorQM5 zo7K2zn8w@{>$WNu;?k&Cc-nVG{vdezZ(G4+P|QgKGq42L}I$1fomBT(yf_?-{te zlrM-bn3AkrCXv}>Y-5he+rOO}zu$>fSgx6gNdD6! zKCX+ai?__(KjpddPRsF0g9J2>Bj#N*kQl

    +LNuEGP|yFMT_Kz&dT2zpi3;|j-N zVMQD3{8Jq63Yv7#bg2wouGRhpMesIVA-Q~Geur{t>^*#@O>KOq2I}@iR6jqWdRfdv zCqbcUd${H5+T6aVlr_k58?2xS1CJES_EJw8xmPmMbXtwSKkedJ3!Bt7I<~M@^jjOq zz-VYo75@CcQSAwfjI&K;8ENZ(f!h!mkCtTEln}H1?%&epM`kc3|tL_A0MCISk#h1{_d5EL^y$*fRoiwZQ zPd7L8UuDhT7bqzy)Bi;U?oK;zM75Sz?I<^jKJ_l4ML|}~3o{Rv9M0@xyG?%pja$Do zn$bfY$%!5ujvJbc$z^BMIDNI`*4r-VsO$zuB0k1@xas5TcJZ-B7t}-sDZAPuFugsu za6`~@|MFIS^<961)7y%_dY?gYBAdIRm7^~E5v>Mh+-23z)@6Ppe*_RGP{CyG@>Lk}9uTk(l zP?nMmmBZ3Rfq0r9gHo^K0AKd>=A+(+2bt#UelPHyRorga7s-e>F9)4BPVdfO4vocg zK~zpY#=({8$QvMgdLVr|S`qG~#KOJGF(g*5tt9{UQt|Oe+cIw?j|GT*A)=NeyqD6k zud0##fSHBUP3v*$cRa`@X+MIK#srq7N%Le`!bB%ThQ(EomO{)QhvACjwMwVfRTRHW zCxHbuC4qwa7|0Tlk^O z+t~XT0H}e#U=j1(%o57Y>qjGZ$du`KE30{uHTqHBSLuZI8IXsrLkz_G zMZg``Y35N(q7A&DFK*U{+04OGIqV5#fPi&#ML4pJ(>`@u<7J4)dU!Q$b)4se`fpnq=mG+?64Q}ycsU{fI z;bZwH$>DxWQU>9L45KQ2;N$2OcQP%HByiV&=S1|geI?JjJ3DbfoK$P)?DsW05Sy*} zfhVXH&UM4ay5zt+=6dzE8+v6IuDZRzOr>rMWFfCQ&d(Hr)9Z*hbWCD*R~#ut9Trdy zp5dw9-CC}IQ{^e^Cg^KFQCeoL?(qwFR0IV!2fe>67;&Q=$WaVng40DDqwu&;{wjhH zI-v*-LK+Y5(qc6iwNu#4rjQcY*~R=UnA+*x`Vc9xQG%7j5nnqC@?bdoCPd=ir~?Q5pzaeFrIRS)Sa!>UHs@>_SB2kmum%<{}NdId0$9wiYnr;qAl2|6tPU z&$%rugRBD~#9YN6d+&rEwIXGS-2|R@XCJfUx#leX+FHM&UhzY_WTg6b^yJPp{%KFv zUj%_DY;ybQf{CD)K#umAkKE$e$g8Pn@O$>TQk_+4F2GSyoROUTsOCYX2St5zJxXz4 zj{3Sc8;JjVk9@sv7yJ=<|8*bp3&B55s7@8`WA&6RLTmjp`;FesHM4h^H{&fbcE=h2eOi|5!y&(^*jV#11 z-OkE^B>!$C1!Nt<<4WG=FNK1H+|&Plb3Z`8bdp;?CxWz$J=p4qPlA|7sercSF}s zTflLOR)Jqr!oWiM05eZpi1hJyHxWPROh`wFIiBrb zZ_AjcoE5FLV`e#9*@#fiZ2|C4RS9EzLN^6jSH_joGPJwMSx1hPl{-G8ZP?diRlqfg zu>T49FGe>dp2-aHbrj`?$SV!qIX|MTAXZ~W5dh_hI3Dj_zclt#v9$#ed)oeoO^wT- z9-(P-Pht!?3OtW%hQ&-Y-hZCfA@P-^XB;1cYRN;!ij;vLC3D0c0+nH%gG2Xz7#cNS zGV_J#0#O56+99)<9LsS()f$|0f(wMsk9r1^F9#SNiOy`~j${O*4WbI>Nw%DmMgq@t zAP4E$Jv7}`w8kezCFe|6g;deUvpDz&Qe3q{F;k2TTxzy|+sB=5_BAQLb-3J-<54py z)rqe+X#WgRV(7pE8r9R0>u#p(0^`T48IfwK?SJF3-oU-kkRN{_(7I3jZyByU>T;mo zilf`G-i8%YBz~RwnM7cX3aJA~R*Dm~@SICpRxQH|j!I7>GNz`m7pQRkpW*)Z497`D z#PoTdBfJxpFE`|@w5aNX6Q-WsA>J=7^bfx*(815I&Ywg_J?&ock6wH~7O|L%4!An> zr)rJSNG#Y1B!T8if4jn^Th9|}_O*w7QP^Ka{zgs+gfpF;VuX~0R zTy%4Zbu|{YWj-YF{Eac0Mu^K*%jRuJV3}>Ni;T(A?~Mnxtw?q>E$QdxNCwY*C`3?h zPZn3eMxrWG5w|WkkH{PIHf_X8TV{1i)Qi_qCL2%uuN6aZqonb+7x?^7=h=eC7&xR( zLq_fP=`5;G5BF8l#nlUxUqs|o;qzfG?!n)NlC}DmY4fNRvX3NK@H;em`!j<4NQ+^X zjfKaTwJElK0=4+*an}kcT&)sy- zTo8o=o`eyf-E?}lBNi(ktiU9sy6@|H2(efP7uymc+`e!+C;A~iTb7H)uzR=h1PHS< zIO?(APE+(Janbk4qB38ox(II1xOgsyz|~ZZ1!nw#L7Xg5-Vbg8Qnp468vep%I*&oe zghh;yOLtC)uML#< z6WPRw+59I|r#(EMcr0g_$1`>X@!431<@kl^S?BgFm?P3ZDKxvX(#RLhld|QQamSW` zqQk+Gx}E*=#~*Qwgi&uFV@zOPX{jFLu3@-d6k42s%>JV|Y;Q-;mm;IT9!PMeXTGhN z*^}hk9J-@(WW>uL!1=T3BjEr<_KAr;T+ha*5m+4OsyOZ9lxk%3DmHH5xMS7mUfB_U}&f=m}UR6qS;N&mNZCCr+n^-)iOvLj_;p&a4oubbb{L*Z^7udssR7 z(K)kd*gJ#Dvd@zjQ0KX|2q?KHCWgN|DuArKi6!F^*4lQ>lMe4wh|D=pJ^g14Vz=2_ zmwBtec`#R5^crTr80yO2bPZ>)8Kc)saWF7SkFJvVv3Sz;386*=0Hj#-D$& zZV9ihP!WqePj0-(sLC491+c9b%_5MnQ7&)E(8$w4O!{%SwcpME>}FkP%AYpF0_R*h zjLTL`2J8*((+m6vLWj*l1Z2jSP9DBom?;7o@U6=Lk?@c)TXhKq@_>;T7;s-NE4S|t;G-c=37H0bIEdftlBS^QPVWG2U>287dRU?R5 zlkH}G2hk-JwKU2u%WW$;La7Fr?rA@fPCkTI_>r)EBeY1N8NyFX%JQwaj1D*2Vif_g z-#9bJ56810VaGGYNd?O@dN~TC_~OJ99z!{Dj9`Ea&+NM((v;!&g6%o#_UTe~NOC6i ziL_JBySm_APGTTkMAEpPFA9xvU3RbHWkthM+mg`NkU3ga-dnIjDi8SI3&CB73n~YA z{Pt2U8tXQYMj>GLSyzC_VK7CB_65xG80&mk?}C5Tbuh@88gd*zimS!FUxlk2^^uvo z=mKSE8SA4zOemCvGlm7MkO zvKW~9vVTl%BiX(^E`s^gJ?NTU<>+y72uXTr(QZ10P}MP4mzIMsb5$GD?jWlOC)^2r ziuXvN(0*uqApyN4_ay(UJ_1|E7gxis0-zJu+lYOdhVjFchcnXAa)&$M+|&qOR6I{S zAVGrg0%r69nL&NNye8{cX_jOQx+D*s^)BaxL(#fRk0aAfpr#OE6)j}+1&yEdXFybn z3Tjm?@){cu~2{dy@V@g7Vo?aV9GScJ1;P2Z;w zTAG-Qij0Aa8nxx0n7pHUDT}n-Z}cpg7eDW_{|NjNIQd{gdj%+1={^WND!h%KZ;Ld~5cwfvezrOd z@e4k)`{u)zMFk{DT<6j(UyhT-Dsjp%wHKB4MMw}knFswtH0V2(wy<`~aiwq%YEZPJ)|357Po}EJu7bG%r>Iy%5vtnxi22+<R}!I>;O(~dPfWGfej0$kSo=-EDcA4c zxc|>^|9gfzs20z5wy$CSFdh@Iamz3n4^M=0(uS>_WI;~ns8s^4qudHboR|pt$k<*t zt7mVv!`X6(7?{80Qq2 zD1~5YgnyQ~{{f4;3q%?&ThD&-hvkC2+#ds=MSfY#?@7;mxAlTC&mN7~G?8c|} zJwI*n#E@npE7Z;1P@{87KGm$m_MhKPSpMj1O4I=)BycjFNvN(qxVG>)%CPFaI;*L# z1O+``|9Rg+FEvv~KfiZ}hp9~$^cNA^I`)X)c#A0t1@FVEHS^xXs(M#+jqJ64k$rzd z$V&3Pt6(?zDn?445qQjHPcxt`t&~Gi&tSLOYTf!bOgHq8kHs$ke!}xT1TiJ>kH{Gn z7Mn;7*WKy$9I!b7B%Jboqa6&#C#$lB$0#;dG{r_?hMRxX%4^99N3mD>BUoAe6y87#K43y)=6Pvc;Le zO8TI}^mkdTQl6d1+lq!5K%~ceA_fz0TEf~Yf>GrLoK#`m_h~)@r%>S(UHwSlqRoH& ziI3Ajt^TXT<&FAAP^I7W|SNz`)f#m z^$*fERFxs%czymA&F)NDNwn)ff^v;Euf_L_&pRoVb1Oh#BrpdUMv>CPH6pj*9ky+h zqL4pY@y^O}JHN_JoqXMsRx}4USJ8pxF0TFAe$5egob%l=-?fgQajEOqq8vOmRak)Z zFDLnTU+Ln?S>fFrKOL&mPYU5HeQ*L!%hs?6S&+TLclHCN=&A2=mVOF* z2QT@r%6|BM`oavx(*5a^y>0m0Yd*g3UB8D;un1C&EP^dPe;!6sbbABzcP0phWGfr= z?hd;Ft;IX0$qc9_j-`197-0`1hHD_H;oZDqq7_1;hdDDo8kiFreTHwM3L+1a`Y|OQ z>I3qrBh%sq8`!vv=dT{G%>;1CB zprhECR)k6tq!ed@K`<7C;k!gz5R-I$tIxS2F}Xkr%Zn!LlH*6$XNwWQ2{ymu!dt>_ zO?xzZv00JVF?;w2Ns?PfPb+>Ub;;3V2Q4}u**3vy7K*dcZ&0V=h8|XMtG-p99hD!i z_zC_9MBAd?j}=pGr0V%Jq8P==-*+bnHz7^k#Hkk2kPd2tdOFOjz`v#!ij}ffS{fSP zii~h!AmLRN%Wg)3%{etdAM1`Y=UdnOkcruQ{pk9-4Sm&~h*&kwBmJ>$y znTwS%eEfXCyJQOS(4BRdLDn2!w9(wXo7V#LOyO}9;&aqVl@uIu`Piu*H@o3%3D%*x zNqy)2<$(_bgX|vD)@*FF1{xZ5u;^q-T6&}pHZOS>#V6V4b_)SF@~ACgvF{*z0VjDO zZ2soO3O%N|RatHzd4H^GRb*a{9Q{$u3mT!KGC@iKU| zr$k;>s1>J?5dh2Ie+xMh4Uc!b6 z@dfzM-C4N8%)t_YYD1;qbd`Ki4}QHROKM^hKG(b&I}cKxZ=ZwAA@JPD(^kO~XQ_`6 z38gt$Rlqt&a#+V}lPM9NRxb`&0BYD3MTv|1Qw;pNoC!%yoq6B86A6z${#b+8Y!~Fl z0|?@r5>@3q8}H=0xm!&wT>L+jc{{60Oa&VWBsNQG1ChCqR%zHrE|Yqb5Gn!V7ff?3 zc2D}UOalt&I%_O`@Tmgpx5i3P7cYEy45e(YrDGE zRJ^uaV6l!N8GT~RfRHkph-mPla>O-irsjU-DK4y{ElVvEJSIf!jzoV|&Ip`-m#3Z9 zMUVLxVaK^G%GOxe!=_denEzs;sCqQajurQQj8psJ5h2`mE@{Rk;(>)I=98fZNX#aW zuWj7&5G+2a$vNegsv7hY{%owPDM+8=YbzZFaU3#K7(pcZ-BdOUlc85jewpd~LA1XL zw?gW$n9}{g6s{G1`1~>E+^!S`}as*w(Xkpd*Me?RFt)wi<3&I$-5%-~GFpXSI>pKwrgNkr{vYc9tuhY>uu{$1WKi(0t zy*f{V_n^H`_b1RZHX-6Vq7;c0aH`j`HI2nodn`R!aj^+TgRF0g>;QJBh1>PC&_#b_ zxPIBBS$0|=gn%?Xs_vW=cWl}_crccyPW%-!(4i!lqkyAe+x_X%1aL{Gs{a$wBL z#tii@{;B}mu*gz4oA+crE^}t2@K%M-;B-!AF*S*#_vKc*?Y{#F3#Zx$I*|%KGD}%9t6LZS6LpAilZLd%M&zB% z_=#YX*(q^G_Mll?w5CG7`myVmkU?NF?YN*uw$iMnLLQ*4{mO0hI`ub?8LD;*kz3ZW z$4gNL5Y>E|yv-+N*I zJ2`l1^69G9#G3JQ^rb(;fDPX4_*TmgA6BtN(Kns)omuae(k}&a5e1Rr1*P+aKz#{o zw%x#Ncx~^ioa{>@+n+7VWCJv#^DeBd!waq;AamRrM}Pab$J&K)SYUK_aRRUHXY^_a z?1>B6!7#-Y=pv3UQXIu6H-0CD3Sk{e!;QxEZWf}xYp7`Yg#HWte2?<)^AS;L_$)T?>WHK&PqiZmk%76 z7eCzX%DQUTs-dJkLSex8G}Xi5+RJZ9i;(M;+6DuT;w!^t!aokJ&QV2uyDW#3kYo&; zA!VX3&V0c>Za4?0ACZJ78qngsyjY2n*ubwAkAD4Ly@XmI#k2|$=>&wUIVg6qPA|G5 zLY&gWSoVH=QCH6|SGNWmQBwQ=#9XBC@Xkd4N~>KnPZ6auA$w*T=fW_UNn9}IxUHRL z^x&^I{hO}M%Bm6-S#gt7xTM_3$M9XXc@{lp5e^q2dLTWGYn%NG?LCTPr3k`!G)%=o zM7HqzKW>+|PIK4wj{uv5^2p~$nl_`>4$UQ1e2Ce$&}dswXuJb^%H)r$uVCgfAp|Qu zoSI`*wziN~>I(^iVe}E37b5Our0i*=8$k6GC-T$K;F)BMtoIET;Bqsmr7cwk7J) zB{uw7IqNU8gYWDH&k}M1fQWQ*@elr`IcNbESKYrQMA{1CrOoh8*kSb(jvwyYfY8yG z=!)OM#&LzXl74zyCCl=49D)&{S_uR6Kc-y~fYgZl=jeOnlDEGd^+F&5KDF+t%-Mbo z2_4jaqx~QR_$$^$E@va9j0Gh)!y5P>W!sRVNeO;&3UQJPEtk?=(>9lU zf(Fq(g*;q{N{i^$Te|F@(?v@Hgm64%!^)-W9ESakJ^<0ou4TVrH135mH=$kq<JcNi1)p#n3Iks5y->Slp7=T zSiUwe$LrvI=Fb%yA%BF5Q){mhYFq5O2W2EsdXy|^a>es}V|K2>{>qB`GR9$}^J z=CGIjKJQc|jErUehEyV~BW`RMyLfiGAA{iwN2m#0dmC`~7u z`{LH^L-&^XI_UXPx8LMxR=eSz6QNW-hyf)RVV%tu6Z@41QFK@IUtW&g(Iw1R3Uj@T z=p09*%uNjZIJipmwuc(5B`*#HiWfRvQbw|?M#%AEIQlN z3fkD)3;eN3$XLt`v>j`X737XK(NgFh8~58axu&Mxz}w>0Kz1n$6FYW+LwdL&?}Cig zkSXcj10o!?tnX?YD6|;F;wy}v3RpL?{`d}C+axOARc?O^AHT30o5$M}Y|#=XX_Lst zXO4SFNbAz#J5H$xT4MHG-7BL&_@({VI}L$@vP58H68~o7w4qG8mPe zqE2JsJ=46#dJmZPe1y=@PqO3gW?G_Jed-#1-cW4 zg6P{y4EIMZb5IjL%n-x&zFBn9#0Z&vH}5D<=v1cgt!Cb*dK&sm5n!hm$%C}Mg?htv zl1wHi&b!eb!qDxXC)~N|p)*tt1~`*{y8ItFVrv$2U!7s6!tGsd*gF1gHI_%ad+KAL z0xbPMS_!qNGcak5#1j}U8@}(WUIqQu38Olc>`pItbf5Nz^kIH^^52_ zr;X^{3boOQ$Q`dCJlB;E>+7HGH!ZRNe*7tP@6v|Kuw>1o5)JPl?4g^ z<9*4&FlynA7OC|~ML;)zqSbUs5JRT@3A;nUHaOnu9mXJ>_04r8A`kz2;Fm*i5j3MG zASZ(tlvGvSa53#z{=l?y)(qp3jB(x5G9m-eVK1u@7BlIe=J^LMGj;UWeUOI6FR)O? z5)`d&0)oLx0mYTxNV%T*uY>oNU+Qq)_XM389b0L>We$jVgtR( zhBV9$mB|90I&-&p!DU8yrK4P!ZCMfuawesO5Wwos$$+O(yw7&VPP1K~Y)2oZoP2(e zg(TOMfw7yI0zP-?`*uT?cqB()viGLka$3Vm_@5N>Ird7179nL8{%x5p7rm$EIux!E)5hbzma0JB zdDsf73VoE_45pL32LvRFRq+xWP1BwDwH|l^-1{O#xP#>>=8e)XG3V1`z({W=AZ(xT z594JltKIt&XYu2!xHLZ@R=Kej{z!WYFvZ&ynQ;6`>U4+(g=Bqp!{$tt^ZMp$x1=jZIN7o%7u|qHu z(jWI!(BmvM;TbU?gVK9|W@t7T6aTtz?BDk`{xW>5MrT&K?={1jYJ@S}%+3O^aHP`T z#kw2&K=RwXy_ZnPUFTI%maq1rjg+`c*0l@>EnJ<+(U96DnYW zL=C&F4HtB-4M8SqdmAyraq;m>1U(c1`R~ z+1aV5TH35=sDTg*)KZ%Z+jv;tr=%G9f$ZVXUa}cIGLM(uw93XW6Q&aZl>Cll3fy`+ z67QvXkSDAlZ5R6shr{D-jOCU;{s&H+CW!#BvUA8 zv%%x1-1u}S$5O-^HYaofd@J@HWhu2Dx^i$oG8OB>`}H&+LTDWlQU2!dMI-(T8VnZ6 zOGFwUiMtH*M{;cmwjLyotNwkw-6Zymd8Sl?oCqI;&2`87-`Zq(hB%1r6ty1j9+e4H zX47M|qC1NLOpLksCA@~bEO}Za^Ts#~>?CTe5I0{fQ>f8~zkxW2>zv%RyD3b#_DW!7 zQE{)P>5;?eliu+RQGdrMl2!xg?a;4s^F`b|;rM;if^y z_xFco9##4<6r{S4a2e%($l*+PbagmmSyIvJkb9uPf3cC@&LM%9`={Nl zfhFB^JY3*xe*iRxZkepad=|1bffgFm!-rgF0tp=IQsjyGe$X$(X-c)z5X1!IymJuVjd|~-_xDmSPl#8!W$ex~@0HM3nY|!MKyoUGYsKmD2r1AiH zPg#~ZJ(k5FALq3QxF!sO)?IL9rSiQFaHzt#Pvwbb!(omRt}^5*oQQ$w9htk7`DXQy zi&z>NUEXFq?>h%phy0|D$;V6L;W?mG9p4h@b(0XTM)KFj?+q4}Nt>UvXlj;QYj>U+ zp##v@#EZ!}*ty|}G=~Sc*O7u>HqD2xS)s+L0bC=@15FF@KB8i zn!9;Z;ch__0gNzfk8h-Uus@73 zV>>s|D$aTvVRv8v1%Mqj*P{GOdsy|0zmJ`^P0gU4A-=9Va2MVOm&+7P)N^w7R5dFW z2GdnW50D99uK(Fj-fEpHZD0*8NB;njDOW8bx{Isy4nOzzuA_HS_+IlgD`%h+G%i{L z%2@zWq?%~;tKgvjVMm<1QLbWjXCQw#=?|nPRm3Hm!V|!Swedpy&Xye5!%@-Sr&sR? z@t57Nn(%IJ&iF`TClU0!H()nlJ$h_)C2NnU7S(xR1*>RPqAu@iD?9%AMgg?HobP+H zY1r2hk(jBja?+`N^B|^Gb{W@2Ys0hmaLbPZ{)-W6d?Wgd@Hl<$8U#t`B4CEsv4IFrE>J1$H}5!Pwf5aGmTFsfWtWzME?zfx}66AYrL(A#oZIOS)18CS2sG*6(_nKNOk zG$nYAa1gH~gYokI$#d(6X+#yF2-9jxqvePq+yOjj7rQfe;hYsCFvK3XQivqq#lgdj z0h5j6-*oMEw^d@g;A1iA)uTxOcqmu~& zZJknK2#MfXo>BEu+zqROV9E~iq9jg|f&=s@5cRjA|Vmn9*a_{R;JVS{qWv zeoI8)X(@zh;|=sZ?W$#RP0GZ)?{D|~|DsYRIt)%pJVAB2+DZgX%K}mOI(>^k_Xp}C z&6~@!aU!L4|H{%GD<0fhF6TbGN*j(A+CA99jZw?= z9iYTFLdZDow?!3BXzc7)|7A}^vlq##QD|l-B+OIG4s^J1Dg3Nq;PyE4$tAJG2Cl8% z>12~w=NXbh6DMfLfz$oQ?+44Jrcllraf93N^}E^is%I3IYzOv|)`10ifby@^gfSwD zoF$VttD_JekbCl+DHb|nHC5eo9bd--kn=6_Ee<@v=R{KblG{H@{#}5_h1DjP*tO%L zmn=Gilluj|LkYo5?@`hozL)T3T!H-(r+b(LAN83ML({JyYKZv$N>{1K-G=Moa(iB5 zR@r1F;X;P|#$LZ=I=UOYwoqHM=u5^?w;*mZ_!BxmvsR+&QUDgmKLjRiE6AJ<(gI*8U@$0@;2eF8?G+ZY;BQ~eTJor9^fiX9o8c3YuR zC}mQ3fg-SU1Woef<*XXBd?9K0M&Og%xG;$~f5#zQ*@M~F0Rg`3+>vhRHbsukBt-H* zAQ!bpjCbSQC#-#ko-_zjE&nz+?c7?aBX>m_qEq#&r)4WMq|tTf^-U{KwQy2Cm& zJr`+d>+*@OAA(wMagARwSCZR*Wgo6wT7O;D(*OLzhdn}je2c=RpmEe{qw;|*2{+_l7m7g%1u%VJwd3)8D5b?j3M6eoY(_II zD|j<<;c^cjHXla}1`_t7o2unEDAo6W4?fTIl{WZ8IfXeUCE6Z$rGB8B0{HL?QhI0M z$~vv&Im}m|^4x4q#H~p8E%(d1@@0F1z{eoP=jwK5v8dgN!aAo$L-AUu2h@Cxd zB+%*(SbRi(+dOEFFV@UqQ0J9Df5!5sDiqsnx|_w(SS+0cs^^2w;-76J)(__}pU-*W z>It?_f_reJ49Om_enP*3eq3dHz18HqJXl-n@;|%S*9!=iodo80$~k6IL*uK!f^tW; zs+SK-i8jbk7+0I+ygut8T2Y-phd5|!gRB!wp9$25oM%|d-fTAOlPzcs9hR@AZsj&* zNjPkU!mofn*N!W17CryHfXu%^_Srl94{INXHX#?FxMlM$w)8=iTV9NMUcVsKd~}@Z zvEDWG#>~RJ&9BH-%cO2yza6mYCo&|Yw{x25<*45Tw`k{vzme#gc*B|tp z%1%}jHl%XI-XE?+3Mux+l3|Hcb6%$-|wShi`-&m@l?yyQt-kjCw=^qd_$2-JG zK5)bJ-tePcu7a_$QD_qa%P*vE*iMjl4=_aqX7BS_71x8l_yh}ModDKQ*zFc~*spLz z1VwCZL!}!Uo^#DoPnX#~6_J&m>A>FC2GWbU13l_Dl~vXT$H#8yfZ~qq45$q3f=`gd zUjXqm&c;pVhex#)AAywC7^y^w8)IQB3JzY)_RZK5F;J;r$;QkgJv1;%DiRom$uxus z5&3G!x;~4l&~u)04=!iAWWk2MB%G`>BOLp8t!Q*KO45iLTUm#mr=zFT|CZs(!?Low zypG2|x+;cQd*HxYBSqS$7gVr3xoS#s9^u2%%}i~VoUa@N4jGgV8k_I%|DWOh_Y9|| z2N(OD_{)+LD%>A!9NkX?Jc!&HcV{n0dLOsLrgea^S03h&R`S?FRYf2C3mi+=`E|?) z!#gwzoBTg65ZnN(r37#I3l};L`L3=%X1qAGo9eA)ov)qw+FR&aJ2ChuuM=#)B7vrU zJjQsg8B;-6R9UarIaZKr>`VTMks35y^Ga0y9DT(?m=is#*ZqjWHesnx-rB=>!~TN^ z+8o4;`D5GZA9!KOmqJS)7fXmE#BJ;fBAU^-2Rc*l&Vu+nnxE(I_(=rZvU%=9wV_Gl zD&P$$I6X^>=|ez*_`g(H6aX^eZWm{ng(y`0Ay>cblzM#j0?nV(DVxa6OWt}#b~Jj7jfRV_h2}CZ!X&^Lk!Z5hh(slC`IKSgG&j1VMTQdYAN;f1Hj-vsn+^7 z7Vy*0h9n_woWwUqsc3d$XYem-1GlzTAQ)iY$}B#a6QcNKcazq6sK@)JxC!Ie=TcMm zmpf3+3-sivAn!bJ%u>Vb*JK!L8?R)-b|RkK>`%A61n?Lz3*KhN{w?W1YMCHz@*|?b*e*6BcQiiYvrxAoI6{ zJ?Sd}p&(kpRkR?{Ue!JO1Kmit=czYzFrVTPa0zX9De{aNN44u%RaU(USLK?9bwy~t zda9Nqsdf0bwN$Jo^$H(N6I7;{7R@)b?1<%q$kOB-0ujtok)%IdFft$Qi)>Yu6HJB}Qs4hUa0mFq3>a)$5y z%A-L4E1KmknTo)Q_V?y*?l&D3G{jJ$KLR%Czd8szhOh!whNT37E@ThOzIRm`O02?^ ztIG^{!jxX#gn!h{_h-PmC~PQ^b#u)Z8wQrko2y*t=;WhcPmoSw2MD$YpZ@tX^}Ug^ zVplHgrQ3bKeNXAH?8bR>DK_k2tS#XG^!Nx4$A(dy$4LC&V^S#8f2n0BX#7LT)nSpp z<3lGt^J55zkvTUJKS9+)Dxbot(|yTMK<^r9rwadsKqm9160{4xQ2a%jc`lai>!hZ8 zAJego|`! zb_|aTh&q1kx9?F&0lOZ2h1tqRud2!1S2Me`2f2MxRL-m4EUjxy#_7HbAd>G5o}|!? zZ6cThn=?j!G3Q>A{DT4PBRp-o^G~}ifPP46srt5ATe~^IR*p!AZ_h5>mP0^>p{=Bc zP$}~ZEa=EE3JsLr*~F1;olgCHj1+HW@x6*;HDE%BmG0gEI$3g~MNOHMO@rA&U1c3? zY`U@;Bm3{gf@mcjmLaXdKE%t7bpwQ0{Am^bmh&6p)tncQ^E$;?D)ILY@Tw!?W~X(b3j*r!@A|)>3@g8 z2{GAGnA?BLa8WPlDyrpWx6%9PKDSZbc+Bb!Rs(SDnTy7&Ixer+x(tXt)%)CZ7lOz; ze*HGc69WHdxc@!FkOFj{F*$;_@xncZk~gvB)OXE0n-t`MAih zQkfM)<>D5kaP%;o*FV7kVOs|K-plX$5u+*VT;L}EFsvW6&3N_V!{>KL5I8D;-uhb1 z=FwWLJ?^Xe7Tbz|R5#I~AZ3!(8!Rrd^iUfh)vKLug?{|I?I~mE?~owFl1t}ZY@NFv z{4Ktoao!K`>BqaVwdb=-$f~5rjS9ZIr_YxeQVjDwCa;$j@eF~W1PJbXxgFD%jB_6T zBK;g@)P(IAQ-kZb+^fM?H`fCqC$D->rFRA$sE$51BQVUoBZ+0XLZ(G^kby?EMv4G@ zj$P&`*cU?w(LP{h__%Fl#$=q<_n=k3CyVTT6g;@;!$*vSu$QAmncNIl%#@hjn6VX< zMJKU=SI6$AoB6ldj=2D#Hl}i#P`H(YJD`ln{>8JNK!#@|k7QD4))>Gd#p$Yi$c*W5 zQcILkQJT@TQrOE$!xfS4;X*M}V6%IrVlZs7uzawR2C@;i)_j?Ue}77R!5j=E zzKc)sU3yFT1q*1n5V%wm09V(#sUTW^zr`cICha7|3?l3I9tHmNc8X(+C{{@y0Iw@& zoSLr-_c>^V--CAIoGLuB5-1G<@QH2Pwr$(CZQJbFw#|-hvt!#fI@7N(7jrjn@$J3-s=^0o z(#PA7$a1w+Fw-uL!fo_15R^@O3RxZh+7NuKO=|-X%YK)k9D;-f-H=Wv2OJl4GXjtd z*p?KjQR`VEmKOj7h@eE-a)a*QSo3u!HEG;x>~!8jVyvHY`CfctvW$QZS%s;f>fO_= ze9()mdah?WMOeW)pORBKi5RD`VOfCfj-ve-nxx~>d$$iAO&(9{MO{I+@2ofn;A`BO z?gEfnF-btbO&nxx{Unqq@zD0Sm^L(Bf62J4a+LR5h#An+Kt4XJ{-N@+S(`G0T$o|$ znq2B%#u!8d;#+yCyaTvnvjn?I4Sc>j{{(o8ngGN2d?2+8UF;Qw)~}yi!2z^es5d1+ zOKcDyaNv`&&8X46XNI4^-Z57YA6E2{OH8K8A-X0##eVIPYTXC@E->7m>!k?i zln0N<6_F-|5Fk%-Nz?LgIS{)aL!AE0>GoGXo87Or?J&=@eCk{ABS4^^-;anIL~7{4 zqEeB62E2lG%78rgbvfTqab)g79zalWd<9xSRrVCfUc7=Pi@^fJ0fJtehY9m`C6OH! z5>PJ*`EoAcj$x*LTEd&b_|dGtr~C`zKHdgn%YKn@6<|bdtn)%Wu$Y7KiSrEMh8P2z zSi(*khrnhZ-KY*_3CKsKx>(a543;6y=2^n)-rCX+GARNY^9SN-gj5Z~0Z5D&7*AqQ zk$F(Chh)5Enm5zoNn&AY#_T3cc=f})036|95p|voWKi%n+;-{oXObOI16@gJV_?zP zB!hEE03pUgU%IZ!kJ+3Emf%sE`GlUD-6Sv@f&iAQwEanXfG+MHr`5X`0~2rq%PV05 z+@(blH1feYK?!JA8edN=K+Vm6mF-=Zuax__h2^Dj#?#PsZI}kBzSwce!_W^3fW$x5 zy01pjmz=jKBc0sqw_Uz_GB3^v?lc)~g{b%fK(EcLtYegRakCWs!AB@=TQBgrY#|B> zR8GV(=!Dw?cw{J9S~+6dWDz8Da&1DWoajfSdY(pKP!{5IS-Khm$fk5{oI+b>+_p3$ zLipl)&wB?V0!I#uI@apf8hv^Jf(!oAXj(6}r=juBVKV;Sx z$+i1yV41x7m5-7`P`V-j1b=6JwwVvW$cpy&U0M%bb`@KaO|X;_m{6?#*x=<4JrV8M zng1j}H@6*B70`Raiyt(~(O~@j8LN}qJ=69;M3SD$jIjV{1006th}J51fHY<3QP5j% zKRUsm!@rqg71c9$S7H6PhAZ}rOJIA-{*Fdi--XR;Ej6~OEYJMG_4rG&Us`-a8mZ4x z|8FdLY<7^XRmui+GgRRJhWpX3 zfMU5{hXl7gL}(WJuWWOC-5pG_kUC#5fw1}J!d9gbps^t+Z043cV;{36)16(#Z?PG| zELFbS5OpMh>iFylkoc~Y2((HKc2Za#yIa?=gKMwZg^)xVm0W6A*N&hCFp?j z+%;POnI#iEk#S`vI490K67yoP75H=KSK-X4jqxO^8|Ybp#ILpK-ZqB4n|a$j^doWM z1%IO$&!V5d@%S^U-@2%Pog6J1 zLv_RU8(aqcrZ8UDe&8Pyb}2#*{@1K8K{PzAaKgixvRX@%iPw>7>CYR~LK3CbjrmK|JUL zj1(GB(xl~K+9Ciss62aeWvh+lbh8h}s$;jwlOMu73O>%pRmaW`FF}Aa&i(WhKz6hd z1wCFS)ryY`7O1Q(?jpdL$CUPtPqqkw zloH^99?E`vU*CnL4=jWEqXUL3(RIsCMM*8lrw<>ZdjMFYa{>mUYv)cImjMku*qj}# z<3LXbMfL;Zod#VNk8jTrr|odm2WGDf z1A;zICIOqpdbD`^3tJlseq&{};qeypx-}YcK*|vfkumfi0gh|rA-z#Or5BT;{7O169BRt*U z3DV7@tHoZ@=zq(+ADW{J=QKKf16)tRWG+K-ceJ9nxUW0h*c25;J_`%aG8y8q07<-w*Gx5SPcrC8y+wbnXu_>V1I2rEN6SCsbe0q$fz$HuV1 zO=qO?KJs`!!PW2H-NM1ZIBqR#hX-a_0JwY%-XjG!>xnvnV-K_NH8$b52#WpcR5jH3 z^2}IzfZQCSo`Cm&a*te||3M2wh<#sGKX!H7socwrz`s=pKzT%+`6(=yN>5N)vUfq? zdHZ{o7pA@NQ5LJ4pDSh~09MpBJzE)L`24UmTg{1EJ(v-k$c+^9S5H{c)V9R3T zd-D>pNRvsy6u00Nhf$+TuFf+KETNtfrY{W&U@#)+n@m80oTq9y2{yKO)d9=URbITM z@R1{B;v;tlIOiow2oB{2gA_#^JpQr^E;Mwoh0(56a!L`?evsE!^@p#W~M-SaT!}Rt>U$E)czxHAvUFx(p zRcZJD+HS6r5xZ#SZrPt1FzlXkqdS@q$G5Lib??+b`KDKJb=4p$0?s`&|W!KdrS-*^oGs(rtk zM;CT$Wva%FBPoVHfb0dLVmN4A*keR#i8(UBbb>8Ymj8B^>GIHB_ui?gPQlVHr}6&z zbKl0^>^=f`EC=^R3o933xD!xp zRy8DED;l1x!w&wF(UM^)PCE*i%EQzkZUAf-^~jXrV+2}*H< zl41@fV%k04cPNDN z(sqD0zqOiWf%LX7xp~j8k0X&SB(Ic@Utn5?moS$Mh=~8K;R?5i=aWSGcOK^I@20z8 z&u`zd(e8%#sV*)%geo~Bb8XM%)=Y@8deRd$OyL@qvjzWexc|A~wB7&4+2@twtjr1Nhey;{L3N1mh8Y3L_$97u1v z&_yvw%^z9Aq;I1f7K2@Whs>{^+tCF}EAxw9<#|XT&mE$i@jqY3flp!L- zklB0)=Rpi~*UxZNt+v%MY&Z?QCS}_v(mtNq00*NBff4t(;89qxW`wax$)TwF=@f^p zrbSFmYf#hCL$cJW=`;F!sflo;Xa- zMQmKqfsF9Ny@^}KCHEm5eUVbayc_tdQGdmYna+Y>xpO?4PuCCPpm2J0YmaAL_9e(h zpR+Bx>)axvjBXj;YN4`nAMw*kidY!rskM~X*-$EBP9Anc1=OYKs~ z`{ehfc(aP`&@S@tls%Xv0$R6cq}YG=OV8{ps#3miIKxKbUg)aenkw{VCEsrm>b#(k zI34^*%{7pBMb^FJf5DsO;G0|o5E`SEU68t0fqHLhOagw;N>n+6Qk17HsFNdKM|;(x z_&{3(8B0O;{w(IQUCwhocLu3nFKZ^Ci$&(IkncaOOSSl8Q!>GxpZ%mM%IZagsMAtb z5P{#NuZz{|)=qp(DN|Rb9_6^6<;7+|JY(_ck?qr}y}<20G2`D>9QEESjGn?}@3XbycB5&w0JPm`pa%hvCxj6b>M?DW=J z$e|U!3OF@G__#W+MM-hDqa>T-w!#Ev`J_!Ws?rY64SUhnSd@Rkgb|#n^+h=ud7yR7 zE+jxUb(1YQ=aZMDdkjM*04!|x9>Nx~H{nyqLlUJ8IagI9FfX9orKSMMP9{o?(sHQ- zg|V?bPu*-y{}Fi@>b~~}_Fo+o5Ugb)1Xm~mjBJ(j06LGrPcyxfX4zd$xwXbqt%;Mq zM;3rZ=0%7N;Vvn9NH~i!z87l@uBbImW8Gp#*2B;&SQxS*q95?9-xF$f6nVJgo*WOQ zKgFgeODE^ACr@x{zcj}gsDFG1GA+z4GjbtLhqIMQbILKHhg;t>R2>Zsi#SN9lsIs#rl8@Bm zopyfxjr~S{fPGV}an)7m zP!5jAoe*vseD*ykF_0kGyoS$m;G*CpK1kgH zo8#;S?^m>rAeiUCj2@FHMF>~;sfcTD7K|ZIaRcLe^T{||WfJ#%51%(*8naUl(g6CM zUIP@#HvFd}vTF&v1;mTTx}*8e6=i|EW!xA_o+SoewzuL2kHkzujc1;Oa5W;#^@$_V zOc9ldXOfeL%O%1}dQ}Ay!oNH!$it5tu^N?oU(}u+?Sy$!A?K9_A21g=e2*Z**k;l7 zt#LZ71pyY%8ojO5y9JiUspT72tX1J7}=w=p|Bb1E+bC#PE{ zjGQrMJ9RbBg|;lCPkQ+;S6(-s#y~)LCR_c#lidk|H1=#Ur$Ul4ATSGaT@CJUuZnI@ z{n3r?{M{BvS6ht#B4GFi{v6a@FND1_caaG`Pg_*lk=kDlRP(fNOU=UqZly_&2i3B5e>Zlx7=PBpN zc!yp;%~ZlaXu~^i@DZ$^ABw7-)dy}l*gFE8rL=(QNM;2+yu;t?q4l!9j=Q4iLbId+ z8~OQA-l^%z@V(RyU0z|0RyE*NqWNc-8azCA;E@NFfO!E#{J>#>Axeb1xUH*<-|P>i z5X6^gK)P*X zYOYwtUW;G&SlSGvt)FZ!>M29^*jSOcCM!9Y(%TObS<7`OzpR8ryJiI8JK~g*`nvND7 zm2}#_h5ke8N3fQK$Ah?w#X&CE{`+t}K=&uhhl)*&thHIy=p=*6DRa2MK4F6gS@H}$ zUNQnhX*^-0F$+cca|I}}ZIIWWUagQME=-30Xfla}0whSijm@>VNnDo!2$WnRtgj5Y z%7S%$cbp1u{0Z&9HC(Jhl4#Mkg=t^W+IUL3+G?H7y!3*~aJ_EGRiPtE+|`kthw4zD zvw)WaPKV{>C#2B-4fj7c9Qu}rPprpS0}m+?dr`l*6Ui&asUg3BgVQ!>%oVF?HbZvO zkf$emx%Sdq1m^E{!$w6Qa%z^s4gQ{mR5ei8g6r1b@Y_y`)fi{ zpk<=S`*a{+PpCFRq|}dR9SDLKAE_uXDor@_6Lr5Usg?!2ef@5=mA`-LgvF48Q;3%% z6ZkrxB=}lv@EX8-qHX2sImmV@5b2*AMlCadwp(}dkwfIC3)Erb`w|jtj-m=ZV4J+y z)v3mJS4jG@V=y4klE%s6%V{lcY}(Nml<)X}l#@G}A06YLF1o0j&ZPx_lvA?i+UTI` zCK{|*SprZs&oXq)K40xvo!V@Z+uAD%fM~I=-t|myOBh+A;mt$CI*nLsv&7gcp!E4{ zP+ML9GHyZBH(GtUXmH*YXtT^ry$Nn6aA@gLY&fjPwU)kB-S<+xluHdV>l6z_o--QA z<{GC3--$XZTN07d+o190w>7r&X}OGDpu^@l|i$Y|Wbd74Sn4c}Im>qlTz? z)5#qc&US_t&a8xKOg`;VZI>0I8Hs~ZLt|s>x3p{k zaV*Iwd;uFe-k$Gm{=SdZatMDeG&hzu;hAoha($^25OK)9G74V&c|C32=ko+1FrH-K zn!YJ96w-+(VrogP#+bESz^i|e{ZpijT9S)Pa-klAQSZH|?OFcF_R%?6*aZjoghm@Q z=n4X^!giEE^;JH|p8EwmcMUmBCl@1?LE&&7H;lb%3uSQh3k5ppJbN2eA6NF`uEKr> zf)3`wdtWNMx89-M(hG)?BCr?EkI{kSPXAA?b0bc6h0ge1e@97Fp1e$J^r@O03vP5< zDU&3P&1}2nl`wMDA^pGp8-FX}R8lVD&9)#d4Fm&6jUB&#c@sUb8n(Pznm#0vU0A&GgzUtCxT#CijAOV! zDxSb{l!+F8HS!n2)-Mg7<8oevYxazmzaxgpUJr;;q>i^KKy(vA^hC(!(z_IK>y7?f ze>b4smp7jXwyLfgIq&0?7-H9TO(X~!kp*ZZXN7h3vee9WNpLgyG{o}j(;Pv8n)SjM zeM}s<^e$mAe3nOgxc=N$&161wRMQ&IA^A2O0F#qpFF1T396nJ-RuJAO;o<$M-ZnZe zboOs9j#EO{E;_`)PxN#uYbAtv-I@fpP+Vo-q4!x3_h&rX*u6qh^Tg8TL3*w7?n6iW z6$8xe{l59#RNB4zR}V6ml=vQ|$Cmj6P`BY2^d+o@ldBiOKK=()z#9X*FPK?7H9#aS zd_05B#9~^u0zr^qRvceJMcB3*M+r)l~CFxvG*K zi`*)uVy7!~$mP(++WQFZm9;=*eYh+6nrF11uUYDiuoVjKE03`u;MOLnKb89Rk1~`Z zaN=2JY6~Q98hDL#H;$-beYqQVxn56mm|*U_$@kx z$sy=G;;$Fkp`OK$UuP`at){6Cmdvgct$`r|6V=cgTX(3AwEwVKM79mOx#qPLBrPUw7RJ4HsN<3aa1Bv*@7brp5jo{SL z#iOu@q*w>VK~6(nKb0VS!qr)}VWwnQT|4Y=cVe>vcP-Xs5~Gve5nW0;HwPZh3PUKQ zUa6ZuM_ma>N6T^31bOOISf|RueN_AkUY(?IvqKOP;K|(2y}vPGEWPtu(hqOV(altJ zK&b$PP&}UbSc5)Cnf>4wwJk{vVQM6E54KYyPoZTcnV=E3gPlP>s`vy4=c-I~b`lDd zsTU61WMPf~qxkX}m=jl{aO;=Pkdai=O_WV8pp!I!W16z>1hlF$a7f7jY|M|I*2msn zK6mWAbE(_WG;;4SqNK7JKaQ~-pDgl}Vxew8XjSkoFf;ZB-SXhkQzM)^&8`TQGfe1e z#v+YeM=<4GS5!UTr+R#ELR$0pZW)75-~3>gi@(j@cF28xz3499e{DFN-LQ`cwX}6e zODv%&zUi=mT{J0zwNzlZmMqm+L`VJ`wTRN>7q=GsbZll$K#B1G4fj7coI=*T5qcI= zz@U_ZaW7=@uXHrt*p;c;EnMhLwpediE0`MnejpdT%ZUy^#H( zL*Ny~M7*^DJ88dZnrN^+1t0R=rDWo~eG>9bOh({;uP{e05<(acF*7<0N||oR9OQ7b zsJcyIa%pD+k;!ySgUVn0SUim>veNwj7Ck|V6GNRIY|EV)hvv7nr#4R*@iE#>kv zIz-#0YNR9NIL50;HCSo3>F@vB9n6+9P=E1fZmEBcXPHK58G2~%ZpjP5;tjFxv=l1_ z^%c0UG7)y{X>?w_=5V*RI5_Nn7(4<4L&Q1(WHw;VwUO~%f|!fnd0r&S8E5sZlVEey zQhL%pRd!!4Y5rj6PJ}}Cs_RYLVvJBliL=xw{FLcM;p#p)ty$)c8~fuj5e&40vnm7` z{JTK56y9>Av{#sUUMY^tm#vrVX#MY>ZaExUUr@>+)w#Vz7yXSpwvC7BVTCpr&eD%Af8Mj)@YBfc?nY$+6va*Y17CKv6c{N=I`6%x zU|yL!hryazI!A-@tfqE-D1rLQ)-ijYM1@G7)C=nY7(JNSiITmI&dE}$ zR|!%|l%>StMIayvRSq3Q?+2FXIDbV+ladwfOQcHeyeb3vaKI?SeCdApt=D%0lx;P5 z$(EVDCVYtBH!E-Um?DUO4c(iQz|Hl?o!e@$uC*~7XyWNIaJ6Y&K@xxpD_Ovgj-C_* z7vpW3D9$CL5!S7@p5*@Py>D+2++t#WgMq~7H;1FhNlz$b^r@$e{HfJHB2AW2>Vxo1 zyMRT?LLx%{By792j&4mprpFx)L%C!!?c7X5?rR3N25qJf z+$#4$Pey^MsQI|%_+ZN3R!bY{IYc!Z?~|~X_!F4+z%CXbwNo6>t63cnO&iT$_H=OZXv^3pSD!V+?U7j1H!jCkJQFe{(3Hsjlg>O zkokTXXW4q`1CRVsa9gp@t1^qWLrT+&Mdj*HcNG@})7bH?4YztnOJeNX0 zVJ~=j30YUH2-J;6NWcOY&8nF0kuOWpSikYQOF?ngnVyYUmPQog8R{OL-`$Ndg}JFnkS9l{OB7pw zqf<2yX#9Q-d~3bkPtwAoP<`CA%b@a|CWV8K_1=-l)fN~MdY?jm7<^moGx|Y13M;Rxjl`mYq zE_(?26HM7oHqpU~vC`VfJw)tThYigSP3r@04W(V^uT@Gae183nZX@~xJRYIsX~&}z zOqCMDu+J6(GVwgL;>*&tLN%$)YzcFP-SvB+c?;Gpmi^Jf`>o55<41N~(c9Cgyb|C4 zpT)KKfMKR648O^MmwWlxt5ok&k9AQw`L3GSm}Tw~>!?0xph|@dDLdrhv`tXMTc?j} zW~2sRf^r@@?$%(vY^8AcgI-7|XZa2kVX!14FYZQWQS?(S*jq5!RWk{BPzxuN643wz@ zoKmT;oW72_>5LV$nb~7zK|AjE?*oiv0LRX|$G9{PdrkQ8Q}>VGANM(nDjWxxU-N!N z$_;A4w;!QJZOl_(RKClRa0?dJ9m-;62)xTs5Am+nnm}*Ty}uz7E*Yyg#|9fU*9&IP z{6}$Of;U|Ty^5jsG&+gCYpv!?`FJec?S+LcH=T%g8KA@W>LZ2;-tozgEll+P9y;wf zXg#xLOyTfe?K_(65rOuQ9$MJU^?(;Dk*D?u7rPRN_{63a5CIX<@rRp7PY4&oaDBCf zn_-l#@+GsIrHh^kIHfEmG=(kyJ37HS8|9T{#(PP8+SF!yGmb~SLaAUwuE~J320lj*A{JsSAPcEH6_SG zg@nDXuKO}+$~A*dWveoY{NHf@bHjlhoLq*#N4>g1-4nyBSu{4Xua{xNGc%Bn^i0M_ zan>-W(-Lz>9PFS;RJzI$#y!%L3YeEW|Z}aK=GRyV(6M{+v4taHfD;DRT+qW zGL4^Gt=XnC+6_Kidn)(fbL)b+!n008N}O&`#LuK*?a^?;mBprfsS8yuJHa4ua;a%Z zw}Tmn6gRIF*mRyk`r{hq8Ub?JQe_(CEM0mz#jKOCH?qBmQJG2?7jh(kMYyXMf#k=1 zq!li=eI%N|ZHh6J6k$!XcJ((tj1hq?@9A%oWrgeq_?b>g$VY#5=5q~l);;%B9B@X{ zRWc(RrEhokp2?70xevn3Xq80>SD=n%>cl`w(s5`iH$PpiQ%NgT7Camm$y`BeJb)}! zRjy!z@Xl*CV`3Zg(6%Q#=^ZgpJsB7mQnxji3)Zc6s7MLq{GH-xE&9C2&4iu{D{fy{ za<-DVEEisX)Iq&QmJip61#t|pf>Di)ek(8V5bSu!j?)p>kKoh&|IcSnwLPQq zW5g+?)uRlD4dL_vp}x?b#l%-y{-w(wTv_-{v1YJ|-1-FCHzG^{)TvQ($QuW;XGK|h zC7$<^O{I%V34S<3vltiL{v@%b#pgJ1so@EzMkCQ{&wN&d^K;x&qwQjz zXNv}9N2$%bvt~W%3xAclN5+h8rY?i&njTW+xLpz<+(rR(8~^n~+^3E97~Eb^;LSz8 z=29RTvFvUzxGQoVjg&W7X58}VEte5WlcNZnSj_r&GUg^yN=+(Fwu%zvPrQ>U`!(a` zbC!GNP_nYr`sM75h7%euT1ynM8G#Di8Rg2P>B_@z{7iiwE@J6rP@QgNjRBM=9$wsu zC&X^gdnpGR^NX8rgEfvUErS&0$K*|eQn=OWLbWaAwSp|b# ztXx|xVI10@$hN$dZD}8R?fQ_4)fNxu6IL?BPD_PQb2kd2)ute6tx*~**kyn;?uRc? z0Yq6dO-VQp;T{fy_>{fw)n5468;MK@W>g;zPhjuGiDhcuq1wXT$M>R*v3##)yaD0G zNB{=Q=qMIjBCDhd{5$I{+7Ak%VcO>bo80Wi4 z+SiIyvq72sehv7<0h$J^(Gvnj(ef-05)aSma1hPa4*t>VZVS+V$phzQp1Jc|s9TDM zMH4^$8${v(PGot%iSmFz!NPAK|He@C+IH}TmHtggMWPZVNdZfq55oy3lmyR)9SmPi z8#~*vRlG_1xei`g-by96&R5Rs8ShvMyqNtQrIjEUzYGipr}gs^Qwst8!&*j< z=4FbDB;#OIYrseMWgbH@3e!IQyn{t1_X~eUfzUZ@apCIpdi2#pqXSwc@y?_}Y0!Sr z+BO6n*UjCprMAe^@kqrYu!4k#?dVd4T;g0P%b;Cm6ERd}PRVPhagqLogYS~bNk9h3 zF{nP8*pcWB^{%SoA^HIXGlkX01ib4=Y(VlDS7BoZfPZ2m}dE10aly~S{QCb-9xr((D-m7Z_>}-xt z1&YTL!iOsfF^rpR2wZIzHt{27Z>p_O;zo_V+x`Wb(we{(bnz5Tem=|JUBt+ylri#y zO5cKXG#!-==zHfoUq(HOifCf{KNB5=kO{d39-wY0QMkd6YxM%O3`wFBKB_0p52}O4 z?lDe1VvDR?)yz?+-#4azKV&8JB4o%t!sip+00Mv$8DHEeqtPCEUeoa(@Domd5jW1e<1Wg560KeG@8-AP^FcJJa^Ev8jh@*{UN*rp?Nyh zV4pLvEmoCuIDywuW$Fb&2O5)sSMSXoqx9onI2;H4stDk-KK4!l2x9n7*d9@PrFJi0 z=%lEfDKNn5wA1`vSWud!Xeymb8?CT=FcPLvMIpy)RZ!Ocvg=2a%i-C^Q7S#)r!i0; zsJYWZ%Yy__?(eLSXkuTCfYk<@(14$xc&Dz$WGD8Hr!85$Oh^}i>G%wC-D4>njV&`x6?^c4^>M^dye2lHxuR*~-W@tKnY3)8d}YF&sa!2DTykf5ZLH4Tqd! zhvdaWiq~@cWK0TP-|;9+EU#(zw5lSqNb?$t+5(G6>fNPse>Rm}8w{C4q9rS)giG$K z)l$+~Uy!lS>Bn>aHbP(NU&XQ$Idb~JW%on}JTqH={@kVqv%TM5qa>ya-O7gl z1s@HFjW&0Av6c#Z@wajV_iOt`H%Ph%KU6j&S625cFO?V+htt)dmx{2g#!z3;mRh8& zk|AOcOuMjR(|V-ZO$+ixE+t*IXvEL;`|>daV~r|tp+Gd9tpD10*AQ|maGv%-uZq4G zv1G#&DSQr=kY)?MNLG4lQ?EmZ9^PCXs*6kfJX?)1>YH)aM3?Sf9{YPpXuuPJQLne_ z^&LNJgd#fjUnFzamih(=PgK7XdFCh_OB!BHv$hP*HOKQ{Cq{9(50t+n1L~K9FiB#& zBWYdTm~eT|uDF}v4iY9^GBO1Gf}nHUb~6pe-49E+PdZe;6{7XPp*19vfp4sTg@+q@ ze~U(9qjHd+h%ak5CyK0832alFq}#-|n3~HH*#AqhiQx-fX|*L?N|7cq+P;qmFu@{D z-Y*`vq^KbtCPtBzE#_X-B@EPLgtR@ih7P$R$6(V+a#0){JCNvWIT#t%qAc(VF!2&UqhZxkv`+!>3o7u2R&jEu@ct5vbu8{Dj6{hQ~ z`}-hX72rI7I`Ws-Mrj2_Q@j(@n^MaamNe5L|^dYeh@#1`%yKwv57M7AR;Ts|x@av(>QPl8}%f=uGjV zu&^$tDc8KhDrnt|y5CFQuipI@HPFeg;m%f;AnMbzK7Ted$is%PKn>G=>9ru6g49a` z!EEX*24`44m|Hj}g*nP2>P{`}QuLumP?6Y3RXYJWi1MqUbC;(7nOCQ}l!TMHxD(zD_AT==0?9Xtf%q%f#67D!QRy;~ zsKU?avf|YsJp}|#_N(fIJ0u~~MSw&>f*LqKdBBK*1ei-<}YOIEr6CIV-=t} z2!9wDV3Op;Hx($0dligypq0Xq)CpklfPdCIxD}O$omRGYaXA!eoR5c{&oVw&@u<#a z&2IA-Z863KgZF$x?znF^2-Vj4cC49D=joouO~O7g2Q$fuHiRPUmqVFHUQX82?&>#! zGLKzHL?c~$iRSE9)zuW=ZGFFrA5!JKl*3KeRy%tJ+QfNIl#lppn13n*MUoSQ4vzeSQNUbaQGIFzCp znO>oFZg?QFJMSl5C#{m15(Nd_otzV15X&I2JI%^{nzoQp_?Ugmhd{cg561k&d>cYxlp8*$gHI$xvNZ_EknsMXLW8}iBHK2gOo0CStI+(Z7 zWLU-aC4!I-l{A6b`4d?qs6~^yPrOf5PNZg#4?9U=CC?<2k92nyXEzdj<|n7Urz*N& zCCn52PRJe9ik|ce$wct^QK!N<11Q~*Tw7i!tzGL z*Q_C+PyWQeJ+}=;5>BL`I9J+njk0!L)7qY5WdMDsZ(rPIIE6|x$NVr)e4_-%r7 z>KsD^%nUZ(@gdWBsq^Au;$)Sh^ep&v3!4UiW3k(e@F%&}ZrasaE)GsX|Cw>I*TS)R zZcY5OJK%--{V|y#Do)OaNVdzB=`qJ4#Aec2%>G(B5upe=6BE^{A_)+fo{P2@F1zpS znbYM2hyhX4loMiGwEpXN8iz^Ac4pC{3P_NH)lj?x!dA1vvDaVcH}Z`gz9KdjCRB5S z5p`GkO*2LqhYIPxOPA?`;)qiCy?V)<0wLi9FANU2{H3RV2pnW%)qEmXqay>ZU&T?6 z)h_SVAU09?-?j0H=fFR zu5(NKSOnek+l|O^ySZ2(pRF*obV#HXT- z|7!w*IA5Ul+>n*SmzH`5Coe7L=bmplQGws@o*mS&2_udgf;w{*kesbr231jEgjF;E0SDiia|uqXn8qZQ7!l`o+2 z%4861SBIOZSD}k1gg)au{V=td(xvqK>j9f@PBts~d@-z`C6)9xA#xe-_H^-6F1peD z?-v?9Z2m>O)frM%SOXbN3y*vU2ruz7TASwqV>U+OeCW7|F%V&?UX?IR-%9Ic*B#u5 zvzA7Yb(DIDZsrfFcdEtvJgFrn8<42*?v4;j)g(0RySg^s}DHow#L@v1-V#d&;1ez<<<~NBvF$ZK=gyc zO9BhlcXvE*n9@XEO$yRTSAM#yN{M!*x(;^acD5s;a;Ot9B@k|XtKqBvFrHJ3_6Q9D zu8$ST*-;i8&DUwJu4F`!JX#uJ7KWnp?g;>{0pP=<5z@Z5Y@+~QA(75% zoM;(Itsa#jfX4X^4HZMc^lcx%;$6!M$Fgx1&mY-7w(drFm)Jh{EC9QvwTb~US*0#! zuLF&X5zG#Yn+lGm3>g@j0zG(u%tdX4F7#hL?#u0SeXYc#)5{T#pAx4iknffd?i)P= z;|utV(Ua}v2lM%tH48H3{*LtivP$s(+Qpc15xfz{z0Z;ar^(t{^{fKPP$C}T;Nx=Ud9 zCiAlQ#RMJA^f})&$fJw7@wA8#Fr#ByyngXcMXIpEcBrVYRl!mw9o(r#}Z%fwLB(rHAqZ4D3Ncuk+H1ze(5!9mT++u|f=d9=Fe&MD5xpu;Sxt{!Gq72b+d$NjR9IC>Rcua zvaGj-rsld5DZ3xEK7=4hq6$W1f>~edXWrQfT2aPtH0hmlGL=*k$NY4t1FHpymd3xi zq{`eTalTDvqJC67UzRJocx|5pSLp~77FWcxv4L+dz`zw z=C?4g7Wh?eYZ5FyxYpXY(WGGPtTtfGhh7kt4rF9$iivN;sNL+(bc}i-AE~Kvao)*Sug=Oj;oB;f6)~)#?S3_wX?8_2F>Q;_ z^xpxnJJ-&;vtdo79@F;vXsQ4x*=A`xRVlcyeTQ@ko8c-SmE`!13fQoHwbd^eD3Q3K6&E(^4o>FL2&AE8CZkJ;&)%*m zVuBXGfUz>5M^q^c_icV&C1cv#*NIR(f%zv-DLEEtxE6Eg%|{k(d(^|_f2bDZ+KIJ; z*!1=U4%sXVuVWX`GIt(IP2g=9gFNF}!#PPKynTOhZQ-jM8z9i>JCecvP8F# zzkY#TTAMeg`PrwJq~x)2qpzlKlrpVBQRcXkS>2MXLEF0K8;evO{#D2!Zbost^klZ( zt9_IuG6?9~rI8lF1A{ERs@Hjy3_s!qcX)I_b3nZO6q3I|B|yX#7MmERelNbn>J`~N zKMMvsqHOn-EuVB1(N7juWK68X=9HZTCkCutTBJnXR0uFXor~DccXXi8B@r!P^QYgD z;}d=7;4 z!UWv5ZQHhO+qSLKwr$(CZM*xlZJTqFnLluox!>+e_7A9}vg@f;d%yL?CcB%61H1If zzOqM}4B`B6$m^{&%`oOt%FP(A{S0E8&BhHjrJm&3tB*r94Q!$QR z7Ui~(Yy3lYqm?^1&{Cf}9a$!md`U%lr;+XkgsCUBPS?P#bk~WT`;Z|ON?Z_<`*8*z zOYdUe6g8k~?-wqw?q{}*5$CWmhL`bvIo^y>sO zJlyo%dSY0C5$`siFeg2H7OU)Iqvw|bPp|nNKfbEds*}<1d5fUwa=LBz@@kreBjV5M zUr6uhzvZOG`ee2eMY!U_nAG~!`%P9PYYWUJVOGZ@P15lpB0_O=Y1*|KK3{Matmk<0 zcTr{dd7Qms8;l7f(hRJyR9b#^F3F#qkqhoQ2*&XExn`qmr%rGr8g2p})RxS%j5bewWnFe zCv7wfW|d$g8mi(?z8Wwwg;}+T;2AOKOJQWxFb8{$1H1T+Gj$wA!FPib1yQRSe12^S zFqDC1SpU!x12PXT2rGP;O<)1v1Ysdd2$j_g?N!%do@nyQ*gvlrM^gJOK!>?@gg%6C zV^Te3`DQOX?Yg`&A&{dwbNc{`S@q`;=+n&H&MJO>Wp@e zZ%zpQYR{8hE}yE4k^%I6GIwWkoZI#Z@zJiyJ*ikNSF>z^D&zUpj9NBDY|1#XkHlQN zj7|aTq|P)YE|Z*GThOPDVArUAECU6gUN_4*{t;dwjIr#+A$)MJ=m6lpsGk5XKu_f< zkTGI51O>%!e9J_Sit3`*5s_2eSxVt%`_45>vBYMBayIr5y)pw^0`CEtE>+QCb?N8o zWElY#l4=g-D+l7Lv!rj?iu;u}HSG~R?vWYBRbK-+(NaTA)`psq&qS%1Axdq;cWIBY z-kI!>j4$XhbmT`jrAo?a^K%zVJk)|XB!YpZ2K+nJ?^vYNu_BXAJ>@efg>CSew0qHPl>qF@(DAKQk zI%g70fTDa{|1AMTagVT?{WX9^lK->Md* z_a`xqs`B~a+(6u`zN!p=s=Vv?76OG+JjthzXDlv|+d)0@O=7e=Yco=f;5g--(OqRH z?j!)g$|3$8DU@`^hYav91S!?JnK$Sqnu%FzUJ2{txW9sx)f5YcFeVNPNUcR+d&^FJ zJu;ZuJ8&d%8ZbY*PYH?~n$40j2*#x5oc@N9q7VdcP#SB;q^BgXS9sh@;Ep*NuH0X{ zdI}ma!!Z-NsSO^6oT{Pfm&@px+O227mUMy%PyqvjFydo`8lXR77aMrg+k+5I`t)Xq zA&xhZvbs!i5-%H|9GYB&MO`+K$r^R9lhaEy!!*xZ>Sf%omSSyo#(|5w$rg~Qz!Fw% zIpjX8HkQX!28wT~;x>wQZ$h*@|Eb|>FMR=UW|7W=Wy5V&5qkXeNsEt(#({A*;^$?#6d($+-|V0bggYrjW;H+7V)j@RT%NM{#F1ZCltvw+CxhQ+HiP-P9|BE_w%9@0`=Cm4Fe; zpQNJ>hseK*jdq&lL~}Y#gqHFvus}7j6IJ^QnbMTk96~Ew4kp|UuS+#b0lF>BwW|c2 zg&+(u$i_;qVrfsbbmXQToIpI%YmjdRis0ZZj95s3x)bvK`0XJ2cb{B zW&07F-OJ^M36+vMo!`!T{K;?aV8j>Hs8+X+FhqlabtvMjd;P51?{u~pH-P~Bq>SEj{@`9 zeey~sG?8egW2j=u@j`)b@4UB>(F~%0B_woWoGkO%KPvlP^DM88dO){z*DmG|optrD zhKqy`j?vplX7*=g>!fK@3XNa2k%^NlWY{fK`!$C*9{c?a+ z_+PNv1+RI2LKpy~0tKB1Y)!*ySe4vs7{AnYk%zhAl4*ph9=+!!GZEF?ZB4|n*yQHu zI_PVo>O`8#K2np2K6&ei)dxay$DtwEa0=Tz0?N>dsF34Ssa2Zb(Y<6_*77zf zz3)Yr%`7mW*J6aOU>-j0Hn=KKlRzjVxXad5!W`Z7y-cqT4TiS0Je}AF4}Brp_kD!z;o;D6dc|qB_aWnnsy${g2SBbLv_!b> zM~>#{E`uCGuKUcjU%$(v)iY55?Gx=2DdX;f0G-_7P=3i9DD5S;{E3p?3Op6D*RQiB z#f_+ch=*sy#-db2qW(T9)lQe~(-A%O`WdvfOf0|LzyD;fkUwH35%o(JYV6Cp(+KPrPHH`N6=o3FGlk4+2*)fIM}MAeS#KGx3*rK#Xto6 z(}7Z9L^NyX$cTLLBk_yG`zE@>YzRvWsesd#OrcMFSxxup!fWyK9XhMAOy0F|En91L zHKHPlB&x0?1M$ya?Ipj|zFWOTru}s7O&_?8eUqkC(6kJa*0%&S;DwsfV@CfBaJM}b zm#~c??A@b}Q<|7551O1bkNy}h9ntV0_bC87U(#&V9#@kIDU;cv8ZrT+N2i=I87rJ< zI^7#E{=_zMU&slvSH>`54@-3!bnfIzKh~8c|1@!ipOGj0;qZD;+EO0U2tPryh6~fB z20o%^Dd#wWAZas;b?!z1Df<9%jJx!iJa$wtDpJBydAXNjG@0(4i>k=`E>p$8EPr;W zoT7vw5w%?gD5|UfjtV7+;rERrm(fP_glTdhO0619jXFd<0$J1MqyH9K_2pB}!f0_9 z)leE$&XHKJ5F0?EhbM+&Cr~odh7y}Ppohhwye~Lg{)`Oj&RAb zVq?;F_Jq|+EeI(lh|$O}eK52-Yr{D6TM26!ppI0~g4UedZrq@!kQPAxNXIMdv>GfOTGDZNIP)tQVHI z4F7kO8rLs#@6QLB@V8kRI69e4bxoFboxRP+H5Xw`6yA4$aKsDhsetXUYLYRz`jEA% zz(ESC1-A7jL_zZ*NE;KU!fOR@yljelWT$qm5(}#o>D0Q{EE~!e+?oVUO8WKVRKvsZdsv>^ugKs4ymJA>{W>1M4Sf=|beqS75 zc*a5g-wCx_T)A44<|njx@aw26qc5$?Xi7T?!`#G1oi@H-7YDoc3a)lJ{L@hi1ypA! zM4n}8q;IQXKAu~TB}{`W3(FO_-SA*fp!+X+Y$^J8EgI5fk_z9@Alja(tgcNg2y@zp z!_~SV9+5d}f9tEKMf3&H*R|2_(-p&gk{Oqv`)! zCV&6{CXfIC$^V}J-<5I}xuFsKapT;VtZW{V$0AzgwP^ z3|%Xo)xzCl={J&<0U@_;UECs_Mjg1JwQ9N2E&p}H;cdOk*IiMq+7?iq)Z+vcnfhA1 zRp6^j!shuWePz<-lE~t!o3azhW3n!H?5@Fv*&NMRp06bmI7yPjd^l~I+M7VVxV}}i zRI(952+NRMVZKqR1aHe9>k`p{?O3(uLc0kasXYuj6~G%*c7}WLIzUYwP0fg0tY)W5 zzbW5JVc-lCcm+V?2ImgrtvgLZUKT0djz8A?@(OX;8b;oImg>-Qei%;_B%`rw-Ae5V zo&fZZe2)_qQ@UY;NPtT3%-h3q29LvORZK&HAs^70p!FF%kW6)xsmN1cU1ig-zI+x5 zJXcc6u#%B$mxyA*%Ej*7qUuDj%Iqw}oJ?V^vmdJcp0o@eMqv9| zl6#ot4)OMNg+FMGJPYtg_tThkG5|@uk>heY0DU|%X|`8$hX=QK+dPx7D&@De1rCDf zj}kW0$45bdsnIfJAZTD0$ZBT7c#1rZQIK2J<8kOEd2Zvf)4sWllCn($(a}0OJ$Yw4 zb*;IGUT4`1Y%H1+mO}yK7;=qNd$|c*+1dbUl2w)p!Sg9uQV0QkP1I%!iIDID%u`tQxr>1}_@Q-o|b+N0K9c$D;_FgQ62 z26}|@Bqv3!jpfyC1J7pHDvNe}FTAPke|-9ISaToG^}UKzK_G_HDBlphKTC#f!!7O3 zscK&tP%pfy;h)egVfxAkRI5->iqC~Xt|YMMG1T0zAT02NCqvehcWktYod?cs2iy|z=e>dfe??3J(pZc?C}D}ZWWwV-aSNddto zrMIe(bW$evSrN0BvRkMT-l{4xq1#1N9*D?u!`6}_!-cd-nV;y6rPGVA6X9oeul9A- zaEyaqwMjgx!v#KNV76!)Bz~OZSs+g-M*;CE-A1{22$9(xYW&bq5+(<{ayLX1DbTqc zkXhkCa`ftWTg*BZp{y#(A?3|>%m;UA*t)IDMeK2?J~|xP`qs1GHtxlpIkY?HKTbIW zW9bAVZbYP4_%Y?vAstD-BpL}HlHF|aQ~AA3Gh##2xGG+&X%6kP)A}_oci#F_5eP9+ z6<3oylgOp0z88FeV?qeCstL$*jkWk0Y~ZnsSfHCB{8iXf*5xUa>O_3Yf(?jnWX@xX z!(w}v+-GpXpRmQh2a1e?z^L|P73(+7nnUp0DJduLV1OjlCl8kbJum^Q6J%v;QNNa) zB3~7=lV)3^_}*aM5k?=vdKe_e%@8nV7EQ(hQ?s6%W076?+nKX1wivR?i(8QFOl^N2 z7qAQebZY0~qk18ID1-jyf(QH8^d2Ky9a&WvqYU^ks~X#{tTV^xnxfBlRF6QF`SHh` zQer}>vz6QV28rV4WSh2rm{zmyLRv;u6e8_e$DY*ZK99fBa@1yB zf3JcPV6Ut|Yu3H*+|gLg@@|*q0KR1y(mbH=2(`co!>j>^#ONZcOd>2*9)XB&QUBhC zL%(c-$7`(#Ci7GrEYx85-n7*;uHj8F8FxXz`!F}P*x_J(K=UEdAV*p=i zK$zW6L+@q{o8gTgf+MYDiJ6gr<^|eu8JLe#bNUrk{wC7R4s6%U-5l@l^^?UZ*EnElz)O8zBLSwDGUh> z(}78N27v(kbxNzm<`>{jsx&&c%ms~b@c2gXn(YOmfzn?i^qDq&&g{Dv?oX%k6`C9# zM$~e;v?=!6*-OA5geZn(H7oU3yTp;hs!{DuJWtfx`lgdpx2gn|>ijotUO@P|RT#F- zFGU163U-OE!e|BnRtUy1nGS0&P%8-=o<8=ge=CUNqT+(3%M(o&ng@qgvE0Zbxx*Yt z?$S&!$!q~^6+qqLWU_Q=fxXDDPatU32)~I=^F=sw>!!su349ixfa6d$dh$Y$-yi?U zEZrYl^WVlBp9tmDIpQBq%pY&|f~x#<^?%t3)EI`a97N=62f}v=sJd#Enl?V$c%s-} zTti2@`~^KF>hGp02yl~1tLFQ=c9L7%IH)U0EU59=UA?T*dJdEtzh%L)g?PKfZjG%Q zqU9TMHU6%yupCO5GAS$8lp0I^m&3e^GPJ8DU2W+I@|LqaH|Zg*Ff&skDVl9-c=UhG zucs>1Shop;<<&G~Zt%Y#^5u3wP-h#jBo#M5`eU%RKuVD9Y%AkQKqnUu0KS(8-gF|UAGIdR6C?gb zFkQ}9*z6csxkTwYPPdX6SW@p@1#qkNroBW4&Z$=o03CQg<@O22{}SD7Q_MZbhTHLx z#xLcMi1g7gslkgK$rA8g{u%XVk-64^z_&^aV@3Gu{4E;;3oj!W^LL(*_ltj{F<0R? zXp{5ak03ZGmcpDqTIf(F)|;k|XbyLl(&T4vh;RAmuhejB3}ou$3UrsM(hxA%SwLXJ z(q)FE=bC4F9T#a6KA}b_m-tyI1&sJ|ELp34FlvDPXwVrU)w1#tBXeuvy-4m=iW3UbM4M;ibdM8B`k@zBu=aYt*pO*AJ}D)HXn@H+Q3s1mtB32Y!Kk4pH1Y) zUrGElRCQL2lxE}!!@qp}NV-sNUZ$cquC7}c$v*L;aIz32N^D*zAJLlxGywm4f)az} z;xoi!zNxYJxjIlM2^^nkjyZqpt-5(Efg01qZN!1m|MvfxrH<{jXydYK#1&5^FK$|W z&KSXH$C-U&cjP7_(*i)=c!;;}wkWQuX(hKHIghIx6sDmhIiv@aP`fhTp%bs9JQ&qM zdOK_YcF=BycWPqJ0g?>bmV7;9?>|92$vp{Gcn%slKz)%rkOfjktn-$uJ5ivP>I5>h zPQOk)K@~Pv(1{Q!D=0#ip=e6MYKrAN2OOMFTg zjy=At&U(DAoQorrC$!XotV_XjJI6&_P-tpDOW>KDlS21RsHB)+-9-b`SZ7|GQ#K;K z=LqiD5q&atLt@g(Hx9eK78hKfh*X@qf_~rOKTv=lWPq$ZO_O$yy+OBXK*|vOa_fMF z0UvT{m!bByw4{V9ho!sM4AN%xiye>XM9PjwVN{lyKr=u%JGM_rxVU1f8Tlq#n z)pm-9i{+8af9-Ur{%gPeOAOc%S7Ty`;04*=@5PqiwvEivqfUqf5Nen-TpN)=1Ju{% z;3{p#o5Nc)Q8Gw#XhY5A7{uZhuMpao~wAg^f30>VQh32R)DeUfL0N7@?;L1k+ zwgceFn5;h2!w}dPPnoEnAWxy`reUW+?FdJXxWuMP$}Q4jPtT`Mu$A%gAmky1imjp% zpr`*)eT6n!r1gzIGcI=qM2_3#Wpbg>ueGajt*ro*p0>`C&9TsDYN8qU>_Qsnx56Kr z6&PUR;Oo)kbcCx?>30Fu^1DX3#Z&nFf|%_Gr>C_kfa<;i&1tRP(#O0lxH-ynSx9v5 zFBn)&?B9gjzhO~|h@dS&ZF+uAm?7k>+qwlg>Crl_VUx=(q_Qy~XdeX0HCXAgGLKL9 z)CHm1Gz#Mle#!(rq8oVwu}k zrFug-FDOD;*TPG@ah>O_IK%upz*Pj%<%tg)1xO2SE z0cC{uh5)d`b)iu|(<___?X;DcliXX-T14f|)0@yX@@v!^&jUD1Y|=SpZ5Gy-{H8xV^ns~=P{nXKb5y7VO^q<^_X zJ7kn!z&X0e`(0e3jOi3Et~6IxRuL&L{{@(Vg|)?C;VL{e z2t&0k2Vqhy00-q#9$;{==0=|JvdTbsJviz$|AmH(tdtq%&k2T6ot^`@^LD<8Kp4rU z(iRX%*F*yN26z$eUh;peVJct(_p|_4wMpg2?r2Y+6YS-JT=FZ#d4=1dBQm7FjnM*6 zWMLT0h6olq+g)i8cayV`V7w7J7Vnc0qsxw(pbLM6wQ*X#iK-#O+HqYn?O&7JDE?^J zNzRrr_~Tr7#+<58RlolhI`?a(N@|bfj;AW+9YCa-8t8`=gSO0@vlO{PWK|gpmd#+~ zCr@U!Nv&BKG+<^}>YIV2(fkwXmwB4Srqyn_>9!AeS_@Yb@NcaA4sLY1Rv ztx0QCBgGdeyx03>aV0>AXP3Bo4hkRwpxgdTy-2s1c$K5q<5$$3h71_Jq92WP4EJB}!)Y1Vt5cX~p$!>k$qx@5^n}-?S9_D2423{O?{I1V zfJ!lr;WOwO`9Ap4KT3K4>s5X=71GD-PFMBYT+DAqLedN}z2YuYNV3VfD?3=4O1wK* zX_e0>8PqfdINsy25Od7?zEztmEaT*lvO4I7>SLGGVO@UE5>tC-5y5=))j$#Wg+ZJY zUBAB=WX38vZBBl%N6I7s0t&y2*8Wa%6mdn+Yq=qKKh;J-QA3&|m45=4bKZ_LZd{Qd zCrh27LGeflYGaSXJb~ewnI=nSDgDiU#>J#PsUan!p$`Veg$J9F6c_Ecjrj$~)GG8` zp~b(@ImnK+2f)sC()Yn0dKdW0!|ED<-2U#@yZQZY20!?BiMs>KXN!S6kDBoG0)!EO zES<<^?3kAokMtfv*ZtZ4%dPPl;?PT}(FFy{iZa5;IOq+tzf|yv4&vWEt7EBP_{4fJ z+8RDNH5$jT0Y;u=*Zi$^iIzE+e~B3;5O@+f{XqmP8-c|j_ERG`^3(dE=~tzfeXow2 zxCTS!qG}^%*cp0B&}~zlZ)?}8N%`Qhs+F_2`Si7#-1JaX-;TGWoaDJi34(B+Q# z^WlE*w2sSP3YW=FNH!dY3twQug{yY3S20PuzBmmnoPTtPvd3`4`%DESS`aYKi?d%M zeoUSXdek{2`>?82DG}4&FF*?!u1iJw0I3{`1Ke>i$-M$Zx=wYZiq0Q=&4qP7>0Cn~ zBe~0MA;FZXN?*Ar48K{S<>AI~7kD$u#HJvTdB-jeX+CbJ3q3`RvlqTv0tS!KC5v@R zh3<|u-vo;^NVAiDIdS2sr97M$7%GnE1=4Jl;Z?ctGcR&FbH&ySoLR?#)zhYfU_s=> zh8Fx-|=Q)F|>igq; zOllZ?qlW^@WckNIKN-n3Y74sT6?K1c%@r0?)KVzZVh>MbhCu-SS)$dE_@z{bMye4# z$#?macvpZlydK60wE@ySTeQ2d4TRyImf9Z*mz#xF8@cx_0S2e+4!Zb#qh3k!WtYXcwivGBU*w*8N0Hzec6H!pbN6b{o9&+zQ8JYY`W1zCtf-v zwUwhVbgrSdE-JK+wy4+Crzj49NU(~Lhmn}H#$zv$sypoWdV@5|scfcER}GCFcofwg z$lL8a0St`%h>(2Zk;k7pNP6d&epg|>Zfh8^JM_<79iY(pp(ibHw8wh;E8AE+qD-o^ z>LFPqES7lEv`J4X_PBYc7_s?|Sb1no3$Tl8J_z1UiN77XTdKVPOWwo&FL0j)7FK+@ zqAlrGTg=r_XYBXxqiqIRtoG=sYUrvrqzCM?q2}tC9vctbBvA)y=p-;7X5xYib7_~f z1gpyz;ADMWGzE=c+2{CZuPi*iiaQ9vd02i1vd>Mlkk>#qt<#Rm8;Pi)9+m^pdGLBd zC=64dgAI%qhALaDK5NFk%(=)8nWR#M){1x*iwEc^D}aHJ@e}H0P5I8N2cAv>slkb+ z?keBlM8oEIgNoq>77r?C>`wQpDxm|J=H$Ji{KK+R@944?t2+r=`gfy!+__oGVuCyR z`#=e!TBoqZBsdjtexhuk&EdoPj+;(WA-4_O{X#HJ+3#V4e6BT3w*^BHCl0%g zD^1}-S0dN$A)YjZ7Od|~!B^4h@|yRyOX{JD*9|IyjWektyI54FJ|3{DEk|p|efr#& z`veowWEZ`7GI{w#YziTno?U4*a==FGm57Wy%x}7HNL|VLe0KC3e+cyJcQiQ>F56m| z*d0pIK3JMv>l^7E+aZVARx(2q%eX0BciXG62k}@RYFbn+1Lk%0(vEQZK%TDt?5WCi zZH~J+n}i#x(p>za=P{OC|K#%SGGo^J!)i*aPh_JsZWB#BBtyY8 zz6J}`fZwM@-$)LfzGADFoy-AZ8odEtr-7ZKrkyAM+3(Y0M@j0I1cba41r=+0R(o}Z z;Z{~rAIQ8`Pkt#e*H;~fv9vF3{+KhwjYEvyS32#~wF^Ga|G=poGU>|(3W%7+7EZoB zieRXc;-HOXDKY$I!93eYFOfGSzJb=NkcR~7OoGO0c6~^<8~N@}k1)FWPYqYKkG@PP zIbSpQtZBJ~n+m7C2ox(1U|08?#7W=p=E>`+YC|NOK8mg&bC(P(4NfinKg0dk4Tl%n zJP*NkzPY$v5sJrd#OS1nWRbYjfF*$B1)9oH7Yo+}1JoP_d*ylkV@-hm-A%>9juZi>cQep4>NPGvLbJR-&fq8Z(qkb=i1P$h&r^6eD&qS1!o@c^;-jKX`R^20I3214>X`oivYJBw%UJ)sXKA}?Q(+~Nlh zQsqS0PW>Jw=BwH;&PM6;{u55~FoF|Uf-}n~l_ag1#LK$6i>&aCi6g{kkTXqCdUK%C zF19t!_NM6JoaY>WAtdpV{O5%at!r3R10_EnH~a8yG^z#ycuB zvgP}hKr&I&@sY7*IDi98i`E-C>Ub-xz>-0u^DO|nG4mFQa-&?q!xKMq0iL^y;woD^ z*O=C^c7)7~NQVwQmNP1;v~}!=SD!y^Lti)QjW2)=s=Hwbg`Sc!cSV!V48gER5Pi*Z zh8_xGEMv@6g|C_sqJJx?X~_aPt#vM+WMUHY#PF(K~37m|~ zUb*ZC5Bi<+;GhW_tThbpL_fAXzC8ty5`{e7t^WBph=Qtqd+34k#N?O#r^IyaAg?QP ztC(qdY{iG~-_eOYCR4lqFYTek0J1lnl+G+{a03HzIcAn9tRMqW^+OVms+rbL9g{Z$ z*F;?kIYcFOkGs*A8w(?{2Vi@KeIe;!6QbP64R&UPl49*~{0Rpp*d@0H(nF+(QCaqa zMu;Is@WS)`o6joa^zUYfOIOa%gIu3Cc%#@pQPo)?8HKFUY+hf{=gx2z0WFsLHobmlXLKs7Bznvyj+KR9dn*4ow>d=5(V zE7R(*t*7jE2EC^est$fhP}E;QVHR&VJe06UXK(WmD;U8tsdjzU4{({&=(Ld9QOG=c zN6(DZ`&}D1AR4Azt#}GY(4;UOw%M2*c$IzSBj)Grd2E0txco_8aU2L)O1dA}`ABm; zI!9wH9$Qzr?L#B;a>eS+;*C~u8ClS!If1L+5y4^lz@*tB~>pf`=sSz3Y@xAFVH=@I+XJ8W{R_?nJi;wz<8>u&g7L z9DWuRzVJ4MqF9N)L9>RbZ`!vrMAR%EW9G;M-C5aB{UTiMpEUU!OZO5TGBr9lvW82% zJtUwbs)@F)a5q$?<{_I@fqj{z`Zru&^&xjE8F)10%(O+Rj}ZW&A$-fP3q z33BvM?lA;?L22Te)!G{2@+#8wo4eljEFd({``<^M7Em<{yR-O-1Sy4_krdZkK@2Sx z`ZgxfxIEm%iQel^q9T5=KfooaGzc_p0@vTxBOBDyvV~D4b2N-z;18FNMJZHX1fzcD z5j86TUcPl7>v{8C7Z^_j;zMAaDUG>0EwAH}^49N$HqCg8d=IdzXE!A!m-e;v`kIFC zI<^T?sTiQvURUuPA4CbyTg?{&&k_{n!JUs>WRE)RHl4yO;vPkqM@9VYVasZPzUZWJ zoli)#v~hoDCoA!k2<07~Un@2B2eg&?+^9yo?EbWBDUWdE8{z&c-IhL2&+?aeZJE>S z2XZe*q!8?4p}RS+)Jd@;mX#}0OUajYBkQWcT6QWXxJiqB2 zwr}TtO;0!Yvdi>j1&#E8^DJXjYe{_2Y41xC9DN5g{cOvitGTAgf zOoO*CQfUME`GK?Hx5df??ShHBdHfhpr7fzh)|3-m*-cA`Wx;aPj&@ohhs za&t;$SO@>7LVE|u{ZC~9bd}8i4EJ9*9MUi}P%zolK1wv;C$HFi34=`a1l9WXl4&=% z4ot5H0H;mBBSmE%4$fR$Vb?F4l{4w9rl)khv60)!o{B#@ z3rk4}FW%LLhHw{el^HI3gbuqxBhFy+xV4CL{x{X=RdhPbENWx>>%n(Hnd251DoAra zb)BbuPNP=^3ajQ{{OR2L`7UI*BNh2Y0<=FTC#YL6W-%BE`TG7oTV4Qu9M*^y`8eQ+ z$%l5WuoS1VZcIBKlDpwN*IVk%d8fDyWSQf~R<}^8Tlb<6G0&e_=A;;F8x+iK1@rMb z-)%O(7$F?9e2sgx*K%z3b_C$ajf7~;Xk&-G2Tt*!*y6Wm#Tw4?3my^)_JF#Hihu-dKIYSBd)O;m;oAa5%m>0*AYRt=VTX-S0|~MYKUopnl4o?-5LH)KpCEhqD@GhqKgOX`3{OM# zWZ)7%@8nR$c=|vzgL>Zx;_J2k#7xYGBW)@Pcr&eUSvZ-Yjb4i(^_Y zjUO)k+u5HLUHC#UIb^-5wL;H7%)T_!BV+sHm&n65GG@k{qX^gT+}aBh#Yvlr3%y#p z50hTK1hBqDj0By0+bP0{-9?y%9Y<22Y#Fy9Q6`ggQ5EO{xX(qDnsf$%G* z7N}f4d`N%(K(#5+D$Pu82i6E4GMt%WAPX9)kn6j7m0P&t8PG{u^y60|DJl)%NQt@h z-(vCm?xO@e3Ti-oY8~Prg20!zY~Q{@DJnNfzInk*v7qxkf4~HS8JZ4zw)+Jh1=|wY zn~zNyFzzx^6wGC6i`32Z=zdVu_t@=&v`bi;3gO?*n&nRw7$_j<@OF+}V$&a*Vg3{s z;og*ov@5nSm|8kYo$E1>+Mv&OcB`QIzCa2wJp;qEqZ+sxge7XUgAnka=IX1~@Vw>c z>C0gIPe>i9+%Y?AcoVUG z{FHV6WV3-^QnNB{<$`X;zi z7XIK#M&^=v0n= zmWjRD{mbow-@IM>02eoi{k(i|lT1b$WS~uxqhPvFTT++Yz`Gd8~qgUm|7M8_C zvm?k7%ZlA|`IIQruG;ICbFmrvnoF@}^S}lp48dSX2;P_TK*^#Fqs2fJfs^VRvGbQ9dKI z9lc1BT})yg)&6+L#PO>=y50%(=+Um%U%Buop>oet*)6g&Xs+n`?OI$qanHXkev#Qm zT}{m~1a$iz1R(yUWXU&r->AVs09(b}?9Y_l28ysTi!Un&|FATxsAJBel(osf8o43OUMd6(wW zqg~mwDoVca3&Wxf+b~O=&YuMr&5`0Vg*xw#Rl&#pm@aL)^vA^QB>)Il(&gcZOOsX= zNU`a>CdOJ#a`lxEvTjw;LEGQwSm_~<$pbUw#Hrq9LJ1l$jhztgP~k~Qvt(vZSM-(T z?E=O2O4f%~U<#Ln!@?wziVH?zoW|xZ8ASwNE>w}3^k2NuUd%qiCdSEkR+C$A#yCDp|d02@Z8Tk4v7R6(*8xtX=A#FAp1YV{nrf# zLhdN|S^g545B~OVn%UW%&!*pfH~GWam(WnH`0l=HnuigzM#o^kyOq28fI@ra%8dT0 zM7u!jYL=dVY*v;SSZ`@!y#S&_m;!~N&9$?Dn=zDvxlb@Uh1_LAftcYt&FnsG<&Hc_hkrIH6)O| zkiT&i3DBcM0YB@gnz3ohgSK6-)V~ua#l2|_XNNUe+JJYgtFRn8p_>fLc6{ZvbkL}y zW@NO~zi=M;M6s217^L>>P#pNrG&Ps!yw@52$MFi(dYJ?ga-hZOolIE`Y^|g+I1?MK zQc^5dJk)j*Gd&ujNC5IJ+=YaAsT_pKcqPW&-lOfl#ks@H1gtY!R}r$m3vH3}7fHP2 zf;TDg;3_Fa^|uK#Fq09_>PPB#GA5ZdI4h;Y7nl|M*iX&`G)Y0fcxHkPkfmMnE-<=H z%1PX4JHKk?^9>Kq4idD4$EQgh5`m%+lwU)csXQmX_9+ev=#VR5 zUr$3+RDtn)X+5fM6XQs&365R9^$5O)m`(mS0t&D$uk zy8-rb7tGht8CzRD8PO4+V2csV)o`}x-@6t@qeBIk_A*gVl)!2^u$hRAiGyl{x-OuI zMcl^5%a%hF%yQSj!7pLtm=oFl&YjvrUpjAZ#yw+Ez&6Dx@sdRH(#TAbyy38O3{o7U zqI!{|Az*`RvHpcY&-<;tSi{X9BCf|24@emC9&bP>C)Z|mm0`>~77`(Fq@CRGm9>Sb ztUzoZ|4tCp4n|(0*nJnO@_Rc@Z5M28tea-9MG+m#+rE1X=nT8-)K2I+Os#X3ReuRT z^#w^I*vy(mGAh#V3JZG>Pne;dh4&RM87znD3brptD-LNcBYrLoW}{Z8z6}UjxNU2J z)?6D+%p?l84h?aQHS&0u3!_@@XWb^ouztkLeRFHbL)+ouCm}V4T(zwx;0x1FL6pcv z1OR7v?hVQGj{Q_9LDOINKyKig@^22z^#^sI0|w0yl;{euo9y#RwPXPxH{T$(PlOyH0CE+kN(s`aQ3ccub5vT0dohsq{Xa4_$R4n!S&lA&_23(8AC-pj`Jh{li-@ zgPaP9h7c-uj)3(CTFi}d)aN+uvCAuW*%6pu(^!&b{lEL4o9GKC2f{j2xTcRfx%@39 zm1R`-lTE#bu9mwAd{mEU7uC4zoWhA)lwRn~GSRY489;+1r>9qx?>3Q3f}Q02XypDB zKL_36sicI6F;COVHSu<4c!09cO5dvlVWUVbhr&p9-HG+rplS3?tt5&g-sJr9Pg5S? zkk~Qt!K%H&&!9zIaq}a$C>19~`1M@~OG6SIp?zdgL4edyq~OFKfbQ`aXto{DNB+)pR#zxmL;0h@JWFf4uA4m{Yv0D2BejWyU+8} zf|G68&qDf|DUGD*&s)0FxL?vLnwWV!nP{B9^^v`G?#r;BU?2RHjsdwq+|kXA3{FKy zPu_Ms8ld^n0leAR$y(BwH7OY9O<(bvAahTyu9O8jqEnt2pN$Z&C4Yr&!lLMgyH+^bbt9b`P6p<90gSEzdfF$POVs2>-sS#GuWeN3Y zN2X@Yv3kv})1_og-3Lr1s>m{_1#?@BshdrJFB{Z0K- zz6IB@JfRS~`OPMLHm_He{v5WrKwTU#cvy6I$hN(b`)3?5Nmk&d9TOc`qiLoQ!a!1* z`UCy{uy+p&f<##mK+Cpm+qP}nwr$(hW!tuG+qTu^>0g+|?B*{{=8L!yWZ9U-P^5w| zkb`wK#99MS!qD^I8m@e3AkE|eMj#gT)=Cb40lxuuF$2)gf$U_<3&jPxKVF}HAd3u| z38sI_Gx;4bgIezYhWnoz4yFA)m+SLPwVs23Zb=4RbHW=vnYCf7*vr%~%n= zNZXYvQUPp!_e1hodWqs|KTTmD2BfGVf?n0GShWL#+}~0f)G=z(C#6Q*5{9W-LRzj} z9;kh^zMt@@2~aN)oSP1%*WWeuEokmlc#W9ejx@n2Ss`dT9B*EL+7en02o}|pP>h@0 zTVR*3T07B;7}8eP7*+Z1d<(D#`(z+^$i)ye`E1-gm;z+zCY(gsp-u1cMivIK=j&>UyFK~f7U6Xw@jNgs` z8XFC(TDkLHl$2(Hl#T87$755_F0N3|6&0x^fU>;!+K2Qj+6ifz2SvvfamyG_OB9Rs z;mkpxS_PPUH4R<^Wn-W^O$`69l@8mVj$%mzRinq3t4w(VUvBfA5wZS}t7>im!-oFt znL!}Z>EZ?{_d;%Pi0*=MKFw+eR?RN%hiPBh)M5A&s_bCApc`mYuh92Uoq0N;U7}?V*YTeqeFJv_!uc|UoQ%KaMtAEXq=S}pO!T4mHeS7x6RyiCac8f+?n`xAW5L7m^9cHP_-3bhs2qx z*m?qyWD3A>nLqV@XGzoWySii`%9r_fW66)SxYgANQ6M7w4ac5nVdi$h8fgzN3V}um z{1KT2d;(oF5mY&F2;eYlXSWc1#>s9X?EGh0uh!4$1v>Z6U2)o8Jmo6Mx3(PBRUi_Gbv#5vtNTI34IKlox@@+6%N0F2#4BWnsu`UQ2kijaLz z!snbbe7_&2-hz4-u6e)lXwg99aMYT`hH^jK^y1*ffXTbAGMH{|XNJO7BP3 z#%HeTUC?e(944V}I#?kq@+{V&mx>2Pfy}XQ4Wdfe?8&y$WEoEGdT_x;>4cngI?A7_ zj!R_LVWfV{7u8ADD~z!5016KFa13~xneNAl{eYRE`^r(bwR#c_4xI`YTBkM)0G;2z zd{_%yaE`y=LQy3LN|knVP(&hWn90r~YXF3N4Wtn=h?szia#H53T(>$Hzi=~Ve}wN_ z`C!5zb?X`1nOq?FQZ1J zN)B;`uh_5#?H~@_A&M&Gp(|kP&F^cL=(IuY=YtmzFrxS1(;Tiu1Ow6TT1C=Ay?RZ6 z%JpS)sEGHwO`au1Dk*uvk8sum*4)!>@i8j&s`Lg}@0xPS>ZP03LZv*yS-SjPJYE?h zPeMd`YrA5+y|^HUcVbLG8Ac z?g#D5mnH}k@VJtVZZsWRd&u*SqDhdgG;l)I#f24R!t&b&bfC@kDTr9G-_vxQ&Rd?9 zSMWF4yF=3=>(CXwi9dU-cQ;JLGx**2)4W2A!Icax$v{)r^nid!7^FKZFn#~*r7*id z^1wSI%3d*?#!!WJ!Lc>zWbJiy(Cs;_bw*^cX7@cT-~BCTy4MJeO*oI-0+rC@Vg@c% z%CkyalGGVv{+TjIj8wgYP~XW>4lOMFz!b5^*zHh9hBV@kFRj2}sH>4rv<*9PE2^VFPUk0>;0G8Um_hLrfBg0JiSmyr|@pA_}v6OvjY9%TX}kxL~g<5}E6-fd_k%N*k>9brox*d=~xBfj*89xT;adoOyUK^}*0bzu(1 z=SBv&ByF3)cugCJxFJ=Eqw0ii+3rFD6pJGQbTd`R!ycS$OK37_VLQ7WB_Cr`F9 z2)tsl9&S9kh@Mbv8E7PdgxAWj+f~jxxcs)~FSr17HW}FUh0y=jaKVu8L*?(<^E<@> z@6=Q+BXkhy0$wn&?S5#kCCSx5*^L5zTG$`2ZB?ONP{+aCK*~r#z+8ez67aD`1i`@g)K)vfE zOwYzrRJ6bDl3Pf%F5Sz0jA4xOkbEkK1^y#DGB(iEMXMo8xD$O9H z{8))o2fh&oMnp(Quyu7R)YTG>-iO+D0BB2V9sVnJq}}BOE%-UJdU*&Y2Z@u=^$zy2nQ-eG?%|oduB42s?~j@ZL@Ef9^8LPo&NqzPbykLB zw>GxoQ>nstL+QVLjMDH-7|rwfa*W03S1wkJ*4IG0k*J}lputqzp<5Y3cK2G>OW_!q za=^3uPlQP04bH6|8LLMz1Jqm^nhQuEyp@z#+s5y?>YNCw71!5O7k#M7o9Ww3BuDLt zQnh1%wOTV(c-+mKI0X+XgH`^r4l_-HkCI_9GT8fiNMu)L6$Jjie?r!X+TP-la(@>` zJL2w#*t!#HMSs8AaNIc}?^O^u{>)-Z>WgFJx{DB6E{)^nEk4(|&ifKI65A&CIoxe? z+KO6E!0!|DwLq(8r}i0atRk~?F3I{RaC*wysP4tzM+1cdDA%lTsi?>yBb^dS~wBvzg90 zYH`0wKU61Eg;fJeNd`&&Jal2curk;KlEz>HN*}T#b@5|sxbcvDH#>18G~Pm=(t1qFUAPq~P>dQ!DG_d()s!vd zve~{0w9^83t>&T?m|iW%OI&vX^34LlZQuzXCkne=bz(d{jhrCzc$&YoxqF&80?qJH)zP)Hd09cjQww5s%Vw;Xx@LZt(IOR}6>`tq zg1&sGY(L_B_^BfGWVG_iU_)?JXzK%J!N%+jQq03=a+Sx{gG=~Q@A7+23*!nXs^BJJT)yLiS~mbSQ9< zwDPjUM;V1>Ln^tK{B$==c6tbm;azW&36j$GjXGGVI*Y(8vyLlo>u4IFP^e=|%aD`l zW(S4_&Lo1f@@bczcDZ8n2k7fJW2t%r=FvUEz8&yMISk}Uw(J%?3=dY7Lzb<1u!OIC z6(@72@?$w{c>I^asvdOQvJGT{nH%xoSjx(5{y2QvLG4(|T~Rns5EXxD^byJ>=w0l(O?JcY?r|b9Ojk=Vl(Qe!d9=HCfYy+3o~h3#tt=r7twMUk9=F0&Wk zx|=8ZJe<26WW@O1n+noZ1s&n9cwLB)dMRfaF;Yqp%KaVW_1b6AfP1}-c64r5=8QKZ z%j5=QbtK`O>PvXC+ZR}AoG3RRMcOfY@<@~Or8EjS{#bW|?eTB@Zu5V<^amk*Modc9 z;ZodG>CoF)#w%raz3D;phGUd1%Nt8v(A=vX>K(R(!%2SDi1cJ&`2DI@w{RC*-l6;f znO}LwxwZKtZ5e!}I5({)1efRNZDM#Vba5qe-eOM)nea$*==abm{O#gAu$n~SG`|70 zcfC*=8I6pu4_8hVv$dLGmB8P(7pSkDWjCA=LNdk;PUp-h3pBAH5yklZ0_gdhSz}$#_}oEgkgb&`?7LFy z8Od;w)FIz@oxQqa;{3OUE7J*9Htp<+Le=RvuF4?L>qq9t4i)m`MgNnQ5b(rB%;y%t zwy!Q;<0bSh6BP`9tnh!s{m%`@)XX2uNLR(x^E+Yp+Fc=>d;O-h3y1bGDZK%+{W6|ffdcWY7dvtWgUqWkL2mtG+kqSQRB5Pf+N_o(@PV&X{N@mUrGJSn~8TI`e* zxEM$GrkgMNFITLt1lOGHrz-n9pVrPa5LEZZW5v~7Zd%W$3l=p#x=~_3)p+K1`_k!q zc#B5;*L-yGh5fM9v=h;B%1u}+Cwy190qs1)p&=ZmNYsHr-SzX4%C{IMtOWq!Go^yL zAIkPM)ZSmc#GYb1oi2&PNE?kjo*guoF^!!()F2-Iup>@wA!>vvCl@uylmuUYS<2St@mS~wt$-2(l19K1B!kP>MJ*w6id0)0{=q)pxBFY6qx4m zLH%d`2VK!t>xyFdPUq8(BlbsF&)x6UBW%CDF=o}a<77Ky2;FioHx!C zq~>d$3W^vIYZZ$}!ArVWZ=V9j#++JIpY!`*L)2dq2_22XMk)e-f-BKHzLrWnICTRk z7Z3YO{5$DegI8-R_|IFu4>=|#q~DX@*Lr;KWVZca3F-W^^`s#{PdDfaI>+7Nk@iL= znM=oyn*e7LfG<}9e*BcEFl4(UirQVLO2QjYr zVzPI6*hom>6WSU|J_Bq{O_Z&o$L|&6HoFt@Y}+PRN>x0tBi&^UA0?|(B=u13a#kG{ zF%vSS2x?Oc2z7SeFsG)nIK(8veoGPw?ek<+7ZJUTk1kM=DU-pH0?lX4(wdj)^8g8(!-6%Yxtg@4h&UoNY6Yg+z&=rSY!}bGjE%WW5qH z;y#UTt@fm;P(8uuX=Yd_U%lW0?;NGVmWyW};S}EG?wo zSEr>HvREJnd>YV?>l^_A<=@6ca^W8r)d!A~$F)BFJXLqV7M^nk|LzPtE#F^GTezwb z%`4^W(I~%^?PuMarZY>jcU%LTNJkb+tS)g2XB6XbDj5#c1VL-83R2Tq4uB{SE}wXZ^|&>PtE_A^Lx89$OUPF zQL+*d7tJM7PA$Ui0(2DO%^cy;Z{OiW=j`PJ0PzXEVEY7|e^3$Jz^eNXnAGd9oDx#g5f+|bOp zQbRsUdo=dbjiilr5bH&Toa&{^Gn#2f$m4jABwVMLGX!IjXE62;5MDW`@Hvw9#C;Sq`v;qR#zn42sa!;RzVQ1aq$F~pAn^vyjC4# zXjVRXpX$5os^7VVePN>~E(e`H+}z(E9z-$d^8!oJ_flg{H_@C!k&v3RIvE%raFVgtOZO+)b_l@4{sibvY*ZYO z<6e*rKgbko_Pj)-yn82v*` zw33AQ^gt?6;GGm0HvCtaR4zLiQipfzqh%r|!D6S>%Mp^4tUC)ehT#XOaR_f3N27U{ z%1HsRs2ftsb_bH2hi65dcYiYF$FNzEY(P{Vy2VTA><-Su_mDP7z5-YDu+|ZEibRR? zH`g$NbNIMhlR55gh+Q7+=EL~BfBGvv>|7%Wo{_rsF;-y`2niFSiC{VMfx!0Oc%QcO zO!A`z|6-@rX%tF85v-8Vo9-(4!3|<>T^sC%~yEU6tHERWIqibbJ> z!Yd(Q2x(>;ht_6pwKmv!uq4^~j0{@4i(eWbdV*jJCVrXG?NvutvuQj*&md{bw-slI zewVCPI`3|snJ3JqREsKwi^OVaMh<|(zO(CDD+U;Xt6qU#6`s$tYPkdY29kkoT)v~< zvBB%44oo%yK!t`H^GWP^1{egO#_qD%71FvQoUAsKl=5-D&d%8f>EVBCxJdoIk>GL= ze%;4f*@U$C70i8^YvK)R+V!+HEjeUOazimU`u3Mixz<2v`Na@q<_J?2?_J1^K%#aZ_lFl@N*nY6>n>1RwF8%bt zG<;5}fhNr|j06CEa zbPwjnfq)m6=i^D-YDek_v(|}ZhfzWwpaq-YqY6_UZy4D~@{EuR+ zNs&xif`4DltC&4V_*_;%e&!w3UOQV)Mx%%xm(ep+Olg&jKOmvTnSO&x2s6}mkfh~& z`Dn|JNVwuw6c?2}<%I6cMKZ%^Lt;Vq^Qf_^svvUwD8zypu6zHqvYX0EB7}Q(h!m3` z`#=KM;H>YHp2=MbPp70QW|b9l%qHP0F&D9Gcl0LReXBC9ff|N9uD1u&8@jF1!t`Vp zNjR!ED04}BSTE3lbtQ=BC_vxXZ&f^A*n zB${j`aUmytRmPsGUR!PCjyVkT6F|9@)+jxkQjG?QhAEsSHlf2=t8Q|G`E7mOM!c+V zG%?F2eYHpxTe`d@IoSPl6|CRLTWZ4zQc-DG=?^Y_NhgIKkEOu60D@9{D4opf1tBx& zV}TB;p{z!i^yt77?`!`XA1X`7lv1-$M{A8~0aFLxk2|riWyI#gm?Z3cywDHIUIMbz zWp?ZB>@-)ef@5`Jqnu{fFF}I*9%-ng#j=K>*M9*+C%~qh-P`w|eDW60ILiV z!=ImE1ui6*;`q|)Ct5*V!MgE(r%*gb91C;xlb--$lu7-DPw}D4VF=ke2JJ!`YePpz zPq%-(&zE$sho#L(WLh8tlpt<|&`zzIw32b+8PaqMnLgp=o58Kt=rG~P?Y1?s@{H`@ zCH!MH>C(6{V*%Rd$tFT4 zysB>en;q}OWZ)gs0!ApW*FK1G8xckRk|oFe4xIsav9)A~Z?rEuX=3<8b8Kp-`a)UF ziAY|6ZUXyP!W8Tl#71%VJ&n;8hE9a3pJssODq8*H|#R7N<>L*7bdZXO< zT42QY8xzPR$V1JG3?lL<=xz(S%t+;G=B5b*aCax4jHoq#_$zzskhNWZnj1>46X!Oe6( zK)#7X&>lXhADUev)w|WFYXZjNo$ad7?&-mS&6sEo(>pskyDo9b4zNE$_GTN#sAECp z1*yIuv*<6RtUJ~%BD*O~YWsqo@Y%VEMgDV*=8CSsp5eNGmaPw8no2s3Ld zPwK6YUzbHP#UIkl>i!yBpAsUl0%8*|p!}#)1}36Sts9bpMAi<&i0Bo`Z9U0do;@ln z8A*gDFq+0nSg*Umu@|_RM?Cs+WvUnXBGxs`_^LGQ-=Gf=UOZ_}t1&tT$jGod8&rxd zuBmA}tM?aZVQP5G>{W9UsK3}Ae#`|xluna5a~&HwdG2LTW0`JVY72wkR5apc%tVyL;CVa_+8S&ehBgj!(WrJ3>5sz)HjY}%PK|KLTd~{p*THT~Cge!X zb5Q=W(5`3N7lqdo)|KbYx+CijmB_BYv8Dvt$5 zZ`lOR$}z%k@$@&=mHIkdOwx~sg6C?A#DhI zb3#k4mQx6{KJxvc4UP%};8amn2=Pf`XGD?IMPRfakly!5*T#B;7_f`G75vp5cL(9} z^XAb!iLdX0;s-TJ!Js5xL-W5iTphxvP66mHDR^pZ%V$hsq^e~&d{*tVgt2{$B2l}^ zu|Y(^u={nIzhQm8CPfBOfYSdB_dhqB_SEFxhaU0xjn^ySOC0q(Kkl99dGD-;^!J7S zbyR;DD~pLjd)Dvn0t3ciyF*cSY}jZN;v#<^1I=m23+LT>Ce-3npCb38vKOay!d(O+ zDLyM7Kr0=nMRW?!TMQLLNLnjfY;_v}seLh6xo)eibN1KT0Hyxib0Z~?D*S^$#LG^g zFs7gI?35@*(LAc3Ffa-#1eFeWt#uXAztDc`^x6T1?OZD2p%MGNjY)+n^czD^c2BZD zClC+jxrmwi!f(*15$=MzhJuM!qV6{~92CKYA0o1O{H!O0H|^K%P(00$-u=1!Jb}hs zCtmr>OWMjQz9`NBCSENsdj*zIHX}>3iu?l!y)>((RgKk`iQq0>VFhq(a4>4H8mgdq zA|?=ndt)EZYv+*ea@jLw-hvpCMARo1;0NFjn~+p&p!=Ig>q@lZTFG&ia&VDXF`poY zs!Gb3Z*$zA>h(XS_>!pZCLu8!9hn=kPNU*d_&?`mX!^-5R(BM!4mO@r%p)nyk$&Hh zsy|R}2cb*%!EJuTA|~s&<%%>71walMGLqZLzV+wXaq<=>`AV~q@R+73>EfP77 zl5-~O!Ph!3;-etIZ5TVO1xw?9kp&qPZ72#^#e*Vh$)62ADB$2YF#lB_3mVMnwp!ha z<3y={*T!u^q+ICYX-(dmR0+KBPepy zDBWJ*f5gtZJV7$#bNw-%Y#{9?kG3b3v_(Y4b<;*x6NQ3QAGlD_AL%R3clTzk; ziD;<$BE#nvsgcS?C?o{fF~xO$%1JF8QPM@&+;P9Mc69_59E>EFM~v_dzc5kAV3du~KGO60PuUO4t=NB+t(T{W{SkE=PoYRbM& zaE%1)<=Dpt0K}-b$b-$kzl4H`iYUA&#GmA<+b)x{TrJ&B&YpPTP+XM=_@5U@T!`Iq zl~5DeJOK>%JcGD)&)e%Kz{b8v;YyZ`9fd-?gdYUSV+zS~mk&M3Op%94vKhI%XQfkG z=C(5d5uKCK??yN|9)jB$TgWvXEujD}16&OnrdUy-BJ?oKY`>s5@bt8J9)f5nGF>@G zft>?1BLJ(;1MZ1%L)15UT{;`&F$EbOP)_6|aDYpdH;8Oi5HWa0=ErUk?l3SIVFJ~B#k>ZA64fR|BOs80f==UPQ zcBe1yK^syIg%aAo_FY}Yd-8H_WS=se4ZMf?^cp2Ee_^&ysG@lsG%;^%8cX@J#MHpf zV@4=rYtW?QA}zo*cQ(CmpP{Rr>&jAM3Ss{s%kfd|Ma&&6)@XODn7Au+;2GoU96Q^2 z@{Hy|{4Phx1(o}^UuNpkylzwPo_z{$rHs`}lzjE(YX-@~JTG6)VGY@hkZ4w)T1y1a zB`&Ft+KruW3DewXoKBJxXxE$0mWJTOte8_%+oO@%NkL%-f3qW@`S_il$CQ!5Xu2lP%BRoW=Mc*ON+2W-bD$z=12_a0YFb@;ys2AXZAxBDN zvF#+xN$KF}j2GG@Oqs9?jJW6!`p)O1zCU8~a2R$r`n`=qV7sW8Z%A4Rbb6M7%Zg2m zJc%;Z%$U8@B|UVT=W!Ur&yCp=AVBPdL@$Z|v#0ZE?5VSB#12cc>?UZbYFu}K@)Us9 z0bLTonQx35+7Vfbk7Bz#<|(qSK1{;9=>?xg*e*8dsME+`KYG z$X4PIPv|iK;IWcd+G8hswaUAk3m|965}h&T9t>w_OyTj(yX69DrYfLjH-T|#QqR`g zWFDcbzU7;bk_^*k@YMyvu~{YShH8lg>SDAINHQ}Z?T$99t?8hSjU-X}>akKW4$Wk= zb}XL9toid9a&ye2h&Q3zko;X1h@}@+tBbjSG7N&gMtU1(`-B%v&eReLD(!!3xaOWC zn>Ad&3twZ9bk|gEoBHGrxKiJ!{Sq(eg+G0nQ2r?j$u4HT-Gk3GoH{Bz^veG?-2dEg zj!BnSwTlg6Q{(DU%IQDH&J1(L!;7a6p>oByr9$A!aJxwJam?gb(u<~56@cVi(z-z{ z34A1d`Tr(8)8RdnNnIib$xXK~AqH%;!pqqC`|hPFE|rP8w!f$|XyT6H6&jINWTCNr z*q|eqMFiF|q{y&7N^GLg`r#)AjQ?EQarUX*O~k1ymLJvS?sEVS&0qvqL%ZZIBI$2n zL^~v{DD27yoOgnqA#*yQEtTICm>r-8H~L0UW|7!pcI;QF0$dcY>1Hq{e~^w06HM=w zy}6{qM!3eCo(0P(2se*D;b5jG2|nkPYOpiqt;TGcBG;Clb$@-Uv|_T%hI|Io40w21*-l!J#Mv=?28IHRnIj{LO>p6}si=RRm z>Y9DkFx;a!Ep+-mF{Z``WV5mrb%xF}Gp2(RGJaZwG2$H*^C?((XSGv2|tP2?eM-L2w=p_Bu?hN{qJ{ z^ct@4cghq~hUQC%Y)dPhbEbVJ@|c-)rp^5D+Cwa=FM*VM5E|~T{3)!aY5R&PItBw- zDj26pkSR8jGUFa+1K)#n@?00t8WM(gxzvxIx4>wTUb8LQurC-u=H;^XKp74;&m3{X zJ7rdlJP5g1jN*>GPbRjvC|**P6Q- z2fqFpHEXm(nrYF97($12U!+{1oDJvDhPAoQ6hcEhWy8^Szi?A270JNb&5XFyc#E)w zzL&2Qr1p$A0r;^8EFr@PW$OX*;Z5YPr_ENGVlCDr6ot?61yTEATq@=Kw{z4l9vcS< zWJ!LvP=A#)W(!8X8j>f#pvr|SjhKR25AEXUVe&n#4U8-;$6<7A3jk>K3BIiSfwqJ= zoWwuY1Yn;H8Wqmj7JPf>XkHPXs(G1dC{EZ!M!{XUJ_3myH=s3dcEsG-JcT5tTX4St zOo`qY(Xz1nsiZ>o#IsYhcO%B84O>)f-L==2Y6R20Vx9$S{PwY>IP#>51dcU+VsVN{P)7X>|wy_|;@!>~+bS92HN*&TA(|BeE*# z3>ES@W>*?`xt-|CWYBvbC1rOO0>#BB$)A(uwW2;U=i(YvWl-N8Mbw<6r zupdE4Jw8&SHN9A(2T}Y(Z4aQ;s*B>(#Tm5->T{dR@r6t*}W$}&s_;xA0Qnk0)7a6+SUycWuBYa#VA zZFcksI)sRzA}oDNNASNgw^$EK&uYR+r@ewlFEY@JD8mG5zVxnH#pLsk8F1ixGzH4T z-Zmue75g^lEbrfCIqafwbU|2k9a3I*bC(C^9F++l^X69%Gdo(ln6epB_%k&QC+Qf4 zC}vA%t(0f5Zwko`ftnED{v^gpj)O0?L~*&gG;D8#6$95`Tdx{4{COZZHqi{-^`hfP zT`}q3&(Td(HR`Z?Z83EU(-}Xxx#xcV%@!oJDZFeC)AVruzl%{lGcW)$`Gd`U4%Zp- zml1=u2;3lTK=hVhSR285H-zoJ`iPO-&k_1?Qtr<_D$3XNCw?$IdLOEg- zae7eOH8yl;cQ6*L3UALBgXju$1iRx4<{k&IM}p1i)8MD#-{D{U|2}Y2$$~n=jDd)R zPyc+Y^oJ0r<*2<)j_}NvC3;j%R8|7}M13UZ5j*$6wtUMqlZ4cGaUo8unL~bgD#Cd< zUvvXe*RLYtHr%zkU1!C^m7Zw~M=~D}zhc%HCdIY&9bGnu75@2VHQEI=`T#8$?b6;4 zjAkgGSA;+$Az{z=T*hRQ&lRO)M)N&RuLAb#;IZVy^l}U7mzWt|53NgMm)4= z2Qbl;$s`$sq&8%d+KO zINfPFu}#xb0EH7+qyQv{mSK=@Oy&QE`=1+*G4l8h%LW{f0F+6&AVpDp-?a8}93~h& zof;D))vO6wF4nL7*inp`iA!rELg*bm+(=(mVb*NG3EjJajo`#a*nI256fVLA(x7{* zKr$s>#`4*k`qmfxLbBtDP+_bKlqxB#@L#{P8guP-pFxE>HByAZesJQ%Y`z53sEn6` zNe3Sl$KaEaBQa`fE84=4uviKKX;|Tw{wD5vJmI_$J12ixZrjGCW1IqLaCGdvGAR~8 z+t1~<^-$$bz?uTl76#8(dR&1@N};@MrJR=``UYh=26(VzWPi#_UIVpXXeMWUJjbsVMcZ8iQE zo$ia@A80dCCyNwU;A~n*h)X62mjg?g$|d%wQ>?|tv4LhBlv8Zot@|hT0rA_EBSoW{ zcQ5}CCUL#p!4H0y*`|&Aa|n^2hfNev7#$dv%#tovY^$Fr{{ydFaLQTe>PP}exSND= zkt5E~K6!KtD@qC3UKSY&b(%0M#E}n24+`f`t&5_=af#D52nM?c2)F zSe?G^Ep{A_$8tX?-I`*=x_HF1_tK{@4VZQD&1 z{R<#10uzWv?H)Ne2aXj@5e-$NK(R$1W!YA#Yj%3V`wf~vRAqgqcl8uBMGUpe} zwCN}f4zYtvl=N67o>Tw@#Q?bVRnrA&DAvL@4gMRqI%7!?ftgvmynoguP6N~gwM=>0 z0s9XhXP-?2TRQE;hSfn1wC-~Rk2@X-zrBbIt6;pR+Il-}MEq<_TEC4uRImmmBENS?hL)I> z=XG8;0;1Cu?Z`(8hqs3vz&&6LZSH(Dw5AA%a5PUMSi#&XaydT$HQ^0t*TWsr<&22_ zNw$KkhFXNYD?i?$HPJY0<5exRF+=>8&}=Ff z3V#f09@r>W^p;H$Vq7YI5bw4%gUuYpwa{LW&q%f}PQb(nm^D+bK|Fct4k8KxuGv{al~md85z{iHZ?8`(*FD-Tygc^ z(;H6Ug=FszWi$AlKQ>FWm^Xy|j1jWN76MrlhKJw0rrgC!sdI8jd%vSWI)|d@l<|xiH5`Tszr=1u<)BbOD|O&FJJL9 zPqCkEpNyM4X}?iqGV5s^MnD-~2Ue!iOQW*yWSWKnAe5XghbA|Yq7>P^%nS6HI`~1u zlurOcx5ppt?*!SKOd{l5G^lsr=TGiZDctY~dJJ*gS2mG=QsAkP(}!eHs4KlR_6;@t zm7{1OjZf;Iutv_y4N6tH3-I#d%6}EQ@@byNKdo-*_L-L>TZ|>&=nV`EF@lx+cJ*u0 za3ieq68vtPjG2kDgUwYJD4(!f5n3f!=D1{UQ}Vt7Whf9Hir2-SX_mAeV+q+% zslB_?i%8~ZKD8zKGnomy;7%6d0r&jzIsA=E+iKFLOdC6gb;=~0&F{}N1vw-f28-Eq zTnKgl+F8C_I_o=Fg1XjVr!oWMDNgf0$}OX;L0Y%t7{9p)dXp!%`XlT4Q7bZA_Gz0k zO_~q;Mwl!+hsf$}Uj7EQ`+utU=7(F8j@F9ju+xNqfxE=J2Q z?urt!OR)4&KWDj8_R|oQ@q#VAcqo(1?$1Iy;9BR={x%xTJn~OjZJbknfavYw@7ArB zFl(uxgG;>Y0EpxtR>mJjY3+~J~<5C9l{k)79ybIusG{ZxcU1z~C>B8|brAe6W3fhJ>jkp14_+zUSORR3?d z|GD8v2qONqT7C++)p2%$No_wO%v&%;zndC%F-cV8u9~&imdCVx3wfu^DHT*P)yo?7x~r`US*0wSBz^%ShN4(&OdI`wG{HT5ofWT9)b{$a~3! zn;|W}j3MMf&SUkyGJ2R(0!hQ9zN*u`?$MQZsnuNP5ce7}T+S#&5jzX#C)Uc1F(~$| zs4Q5&tIbQaP$@-IfY2Hn5B2nCaJm5UOIjO+ro@SqE{BwhJhV>lVYol!++4ZbbRl*) zG!DU{O<9ryAg~yn&*)5Dj|V5JzhsdQk$LcE7wr;>SRw81L-Vm7G@$xPc%nA}WzIlc z2y`}-A?2f2J}Cy>H)B=lbYWle#bc63TXgXWwN5pu2Nv=(2>7o5gx>9fgl@kAxRoQY z=@JB32TWx2BeIWUlb9xIzEwKMFVn0wD~A|Z7TIkqimcF#dve+5v2)M}rodQkBB!_Z zFW=@)5yP_Txe=Jw;*e(qjC2!X#`tIOnQdUt`v{9bFEt`K7EZ(fL}+zub+kzj3cEY^ z(){0z?*I}2MO*jc%*>3^ufd$0RtzU~#3?QUXIDJvE1AEMLBb0r8R!3&!AUXUq9cp0 zn!l&w3=ftN)T_W!#-TF+dKDhK1^~;&kRz51rs;f;vM(dmJdR1JNyFNZ`9tDWsgJlk z`{l>6{k9ByuL7`$0OQ1TF=Xon%zM3d>a*b{bp|Z*&72F6OOaQg!lNiQOfKw!c|)VP z)tjv3tgAl)w7pUcu3y(Tg(*&mh~$Xv#u0P1!i~5kg<*ny+ET?EQ`RQ!r$;g!Lt*y5 zx6d^DG$Hi;!Wk~c5gLx*(CUwf;49X$a9S9)Pwl%N7IAN%2U-P#Ul*7z6%=c-L;rBO zAq7k+j!*jls4>eL6wZ4X%0BEcrN1m}+H*eIVKR_4O9EisDh@0j5xh*l+#;gvZYljzjZZin@|7o<# z!#~G9^LcxQR&H5hHk$l6Qd|_tGp~OZyT}~9ch-8uuK%gKy`(_qLSwS=_l(B3 z*IJxC#np&X3>)Tm1~%zs40!+f#dJbM%mFJ4+nl zwHq@#XCtTQqHI-qBA}@doH<}1!unw-67>?Ya9u}Nq0OCeTZ)?T@hh2_z)>KvbU5u?pEdWN&c)6mGMr(V9QJfFrh zIXVJM%d~%}gW>CcS`>&nr$pIzq3O&;E1 (1gXhwJqw2)atA8H(vmc8-ne^L5?u^+oPyYDSLlN4Obpmvt_oVV3(F{vg zo`Yn^BaO$4N*^SK?Kv2Z{&n+Ol7Gi2G$xMa<6xaerOl`v7vNHFC%BWxdyJ22l(UIN z3l7c2MpsQeTgsF9CL0pu6^z*Rs7cnId+VRvy#6JuBt!l*d;s@e1jJRuMl2dBn&Wx< zsEr5A&npuI-p9X*)9(pQYOiO*C4M_Lx7(e$(H+X7$F<3SlBDLOKutzcG#n>k>mXy+ zI;LCU3yXWv#~^8o%Y%RRiS^mdcBD~;{@6`ocCz%U!_s(Z%XR8K89fR|1ULpeVs6BD z<{-jz^an-Ice4J@H*(WKMwZishNLLEk>MU^3ngCMK^=>*pg3q|uEVEKqEjsL6NJAf z$+78-4lZ<-I%M@f?A_CLCO{Sj(AelWe{9>f*|BZgwr$(CZ9D0pW822`E6l~*%`?=h zTD#7v?-X4YF45oHzxH;N^+xxs1_B8I6Dh)9RJJV-8ODCCII6l3adtfFWxfr@?O z!~n7b`xWc(yU0R$i+0)*{iz8`ASnabGucdYQ$))_DCRm@(1GAv`K)sV6T0>=i@$rH z@@OPOpOyYXG4|eKLfV>0%IJ6318%1@hSJH!1LX(r^9Z>o@{v=Yb(0%7QiX7RS-C1M zsNcasC=%DlXjg!Na*8^qR?CKo>s)hA5BR^Us&_Q*xZvrb_Vb+DGt$}ueUvf4Qz0|=|j7$*UXI~r9^p}qDs$32N?Ur9wmw^)MpKsH zgUiG`TX9T|jP;RyEy}#;PhimI0 zl=>x%+Dv6gb3~B(30)qyOwI{cIc+R}I{SoZ$CRYTAfO7Df3SuWa>_u+ zN1dB#3Ld;}A83cbM=vL07z8X$n<4GEoqgKFvN%=FbUsmUCK+k&vFXf25wz zpv;$Q%Mjf~KC+A0@%i6;mz;%paS&7kN&nNzqwq8&SetQfciYv0&_W0vHNY>@D`zww zNi(Lh(6XV&6+vJ+*N_E@uhvo^mgX)o(2(zE4T^LeU+i2~X#B^rmm0mG*Rqa?(Z&3e zi}wTsdcp6&ob}_y#7a3uxp6_SP7qQLKY(w{<%(Qq6mBiONJowEqvTCp(r3{`c;F2e z7RrS!gz$CanIuFAwS=Zw*f3~ap898-h-Yl0c+Ko9GJ?rp4^zuWmOBS+k_uikoke1kp?dZH=!BFcR(gf=dC z!I*^0lk^>LDM(hk7kMekt*+B!^8Cg*nGBejH&s|_))9HfBXc~tuS6oYl6Q&1Buub) z^~Wm37}tCE`g5C9`A+EuITFn<#>enpZcVm0HUrfm8W6@B@CdaCK)uxxn#0H1!h|f#-XGL9-N9A9 zcy(WhnR8QuYQ;_PD8QO!ET&EVnnFHv!ZBqwRcT{qDAfE?ORq?x&Ck3$3K4R zo@WrO3tILxCa^VBh;IYb{J%1bz87y2Qv=+Q21Qx)^0$PMJ9Tt*1@C1H>#p2`6 zRJ>+^z0BoT2EC<4N4l%<$qtWxcywCctB{~Wa#4PyLI?F?E@KkaZa1hUOA<~)O%B!L zDt+qsdfJZ{#g4L%rb!yVNYboyUe+WayMo+RBOOKE$lOi(t;z(&jLxeP6#|1onfdBPYKzO znz%CFyz2!K8ls{~yzGIvqPW7}&cU4_G?x>qMsb#D4^C^Khc9z_FmZINDJ+Gh${mt= zqZzhn%{T}>?64-?LRL97g-#vGCo=`90o;!y(ggj;+{1{V&Xo$8BOPS|JIGmyN)6qiUX({=s+%T`OTnzMqHW=#S zN*>vI)%Rnf@|&cu?vW`7)Tbk`YwWXu1sW9XC!1T8OwfyZdg9l&oF7Q49yhA}-*EqP z!#VCIo73;9-VgGgl2J>x4sYry7&ukB zGG#<8jFf_x82FIeq}2PAzn-X`6~?e>o`eBLm>!qjvL=T8qdXsTs81_%GK@`Aqf7?S zc#a-&Uq&Od^v^cgjjIs}?xa8N(brzwH!~-c%tA^fw=zi)I*cn*D|K4p?7`Q(X<_>J zLirOn+{w&d?vx`CX-4XP+D+<7xi9)m8v{BOOHBlPQKF!!uun0yi|K414EN)Qtu=Zf zWTSjPB4*8ycwb67NL=Mnv_vD>T!uFspm7}5`t_a#WF`~FlOk2Vl{#{z{5+8ll>!pZ zkS2pUIb~JV_cxZsebx%GQsK6)>6K8{EF{UUCDIz z?!Y4;4?U%={>mQk^8NI4tG3NHe2HnD)>=+lp5bEx5E@FC@!lppdvo%luU0%OW5fK0 z=;Sp7|T;7615<=M~O&GNwK*@qnFl6+#mYutq+j zRuD^#C%@#1lT=&R6PhG-x=hJRLiwV`R>O;$F7R{Yur5!@dX@R^u*kJDr^-^jVHjcm zCgI@}8nu6l;YgH=$;zx#;Q9qma|4Yf)egDZpWP!(lV|Usr>C9@bo(m0#TEtu$2GBU z9`FVJ#Kh1hpbe8ej#2!?HY7MA+XkY(&hox~GY)=N{J^@?DBwnnOt!AV_^Y4x+a3Mt z2YjJhZAIO3&UHc?n9JD_OtZ$cd|SDmH{sX#Arn1hxg~>0L(NSIjL>>+%00#!+gJ=; zLQf8NE`+ncN(Yd6@C}o0l1&Cv-tFZb^t`X0(`{2!Y7;dz16aSh^tqt7@uK;OskO+a zMly=EMKkG9^XykHL-ekqVL~4zR|^wub-{rUuzhw=Qkj47oc&5j4*~Ci{<0j*%@xR; zLNfYO!|X1Fc5?zFkOrnu!S#yAhWjd>`(^`?L_4Wtq#Jl1*&;s4C4C zw9+vvy=)STd{CM`Uwqp}&^xWmL(T@lcy4ior<=82D3}RuAy$aC z=T;z04mb^4Pb7;mbZxWakGmKsi*DJ>kkUHX^Z~TNOL)cv&74qie`={jkeNci4qrFJxh}cR?HHu4jB5&3fPf%S9{C7h7kJ^(3 zQI`|qP_274SG4Q3G(CIZzh@(&OL?BAYht=Vn|h(@TB{rlJExF@)mNb)sJou1b83rA z{74MSlJ)w+0%B`sEBy}(OcLw*fn%ijrUT@r4b8H46qpLoyX$cph~Rhga|KZkTMs2} z(ji!2pwYx;)>n%RT-y*dk7+AA0*ZZslMl#8Oq1YN6P`Tl#$uzR0dTN=3CAkfQuBsd z$A%(b%>{kFNZPxAyo|}Fxq|pBwo<*h9&&i7v`G!QAgFUE0N5c`X@E6;J~sTe%%97{ zL^fN!*d1RsR&esbO(AbFhUGy~U3+CX?6zvN>`+8& z{}Ho<nNDDaVOgKinV|?eL&<66Z@D zM1hmJXeI{|i4giVfA3k6P?9(jo~vjY1Q!;e)kd-uhC>M*d7(Hq)JMaVfZr2_L+ohO zt|W1`24vtVjKf22?k=~LYQl+J@r~v_@WAd^J6myxnuVG)XGNjTnRni2Kl`S2D7o0? zedWDpr2OI_2ZCnKc%@yQa@2*U$E4W&ky~%qH9IvJt^IB)+2p}8Gsq`C=y2>iiSRAT zcP1Njbj}iU4FuyAXd%2i<%UckAgl~&3gf^E;3kigY1Ui$s?n`Tl?GKrE+y%pF!Gv> z?L%M_%!F(cTogTYYN|L8T$f)q`O5nCV#ai6f{;nw=E`PT*p{ZxU|VS$h}CiK>4hnCn|Yp(dPxau-es8 z2X%8X#HwQWBZb`471M()FaJtAF5O4{|AzaY8%}`L)I_7v~UGvwn`2}wfrxr|bXVk1Of6iV01WY494`*iPG`3AePQTL}n zf)GGoT_5`tnjYZak0fBu8TFdOcgWkWN(hZmN#t!*@6cb`9`R|}YVX5DhibU-h53j- zt2=}IyHHFV^Wx;}Fdc38%MVZ!#&DVO`o}ILAN6^T-V2 z;ZU6MY#S*{8Y_^HE8w{=IkHsZ@ffoO0>XEgNRx{b8gJe~a zWVb4@TH}@m@hw82-Z5`4CBsaEYI*7!vOW^Ae~k>z?t`|$X5Q!}>aJ*su?Y6R-f!TI zA2G9&&I5_=@l*yU=(L3#lYLi4jIa`MHD_@7p$vNP(cpDikTDgB{9T(`UFwTgcW3*Pd)gkL;fYDJ(yYsu4{=RXJ!<1Ze;0aTLAIwuB0XZ!xPL zSH_9Ms4x2oE@alkqcbE;k>-hwf)za;Gc3~GJE7&Y(`S5k4xs@LddscrSoE0VA@QqG zFL_Q{oB_cP7r2RN*{6=9k!!!9%s^=&EFRrD1ARpq*{ z4$Oe_q7bKZ5levZSKc{>Yvos-&Ngo@O0#rX!mc<9U=BCwHv%zqIaLAuuVs@(>OPQK zNDto#waD&DugwEc{zMvDIOjOcTpgh$fFtl#e5$$>;#A2{=?gfUtsn^Fg`#|q+R~rn z{_b#n_KTLS1#mj*zQ=BC@||<^uD3{25_T%i^6`r{9Q6Vrl^~m;qgze1ORlVIYdRH9 zN;5X9mT!~ z9U$sn18GGFw=rA{CwB27jiJDQ6SW<3()LZ&0khLdgz{Dy`Mwv0UMjxH^Do5Am8;on zo(E{~Hotv!!oX5}s}T@JNC5FRV718WIUD3&)N@t`Oyw5%O40WiUb8bzvwt+6plm$& zZ1EEon9lg4hv7ps%pFdbR<^GK8OP6BD&e?8SG=8Uf@&xNcAKR6fnJUkF7QUF;8Pw( zs7x^sLv8*z+US8&Y%|5r39+s$>Jcs3=qo>V&t(cWEHti8DJUr_P&S;B+;=3mCpu*uJ z_P3@H$RUf8{~#B=5?7d)l;Eyjx9*ok^rfc-H=K3n_k`?T@K?Y(54i;v%a}B=(@Rtz z1-Il@jz1R%|ma2qzt3de7%6($y`JHpTwDY^i`9zh+37v~7xzkQ}qf`?N1# zDYDC`fz-{`MQw$eGS+-|G?EE@uyeZBq?h+}_&LuZpgs;Fd~dx_a5D&Qn^;ck*(rc2 z77uK+t5Y;jsxgVt_(L*+Us4brn!7?6am;Y85>u))T1>CUI5%e6mLaAb=b9fl#x-Yz zZfqq#&$Vv#LynC$vK+r0DO?h23D4p;ZEYLn6$w!4WN6W&KaU|3rFDQ~lQl;|@IAb9 z_moVdG=o*6s}VCSgt8wq;B;_yVl)L43b{5;->SG-{v@V6|2`Mid7+IATvG8-X3V$K z6sZxvohYNwZojr0KR!Pfhx|C5Kh-jcL@3_e&Fxj@pl!C&r8}HCxsNF6%M#tf_5Uj~ zbHYpz+&qB{m0N4_dpj7Q@my;p@1H0M{K2@%XKr|)D)m`yg?i6RAoCF6v{yK~chdW3OhRddUeo%V~S4Vj1?R9d|G0q9Q26t!lR9qiXDQ*{CZiTqy7$kAVU+XPppuG09w z;r{1_(>a;cSueqN&9O<%XPh@txV%F|*fogcs&ZV)aM5O6-~d5J7!v!lN|R}!ne0|s z9+b|}>OqxYqxyj&Ax{RVvU}BzPOd%DM@A_~w&j2kZ1o>Vis>a&`Cb@fhLZ5JMfase z?r&%(MXms-+){?N9+?()t`5-@XA+~`mBm`9^4#9$@1QwlTCT=}V=*)*&S>uy24%@H zNQWGA0iQ=zLX^jH=Mf4mU-^OMMIVm-N)LsTYYT^m6xUJyssDVp;g0t7#Zgo?HogR! z>@iODg!w2N_Fo!ey)9U$E2glJGxaPW;Xh0v2AFB#m>t=d9l5UxyYWMfuDzV$kk;FQ zX;qOw_Y2#=skgeBbCcpb?#lU^My*h_z8te^RHR#iJKh@u_!$zhsK-rNFASh9Q)XFM zqw~~WAU4|Fg8_Kn}n?)5_B?&}mo0N5g(dqMaPU*@O8o2sD?W7j(DJSI{Yko4b60vM~;ww;+_ zt;3vjMbbKgS<5DkuWy!sUbQ|M>$%VJ8O9$0aJKZdqD#L9 zO^4vq=0yE|MHu0Fz@pJ>uEruCd};Z{n`f!c3ENllU$b`+M5?0;tH%P;7@ox6val9E z(Nll)odtTx@6ZtR12vU^Ywh@~QARh3y=UD>S-saptlF|W%!6ob*Yy#d>)gWmR3?;2 ztnE8jg%C^B^~Ug$<{6bLvo+51Uq;q#S^FZM{IB|-7hi6>7Dvt_3iv;z^U@W4uPqc>6&P-K|Ahv3*0hDB!aD z8F7D(Y4A!J!)VXJo5Be|`hEkT6r=uCT@N(O=t}`H+R)(Cx|G%;c?>?(IHAz)5aU)+ zHe^Z3zqoq-#B(c_Ma+gKYLmJ8wQ0o9og@u=c`WUcn@qj)0FTz?B>P>tGZW)YE@A*J_O z5^cEkKxrSNINEu5^XucnAfGoe3Y{B^vy+Il_$HzT$X_{~cVe!+aRyc;7!Z0UEypdB z#muofkV3xg>Cex&!L8>wDPR0SqYV(8u?I~MlwTeMF4PAF{XO!^(=*hzD73_6{)&54 zVP2s(C(v)sd?a=VM|XGtUnl5&gs9k(i?s+TmtyzLXc+|Jw65vblVkLnLu;1Z1vhmQ zr+D*};QxhjR+EsaV6-|5h>Z)0+Yl2SK-Hq^TK+JGB{OCn)^LrOlSanX(=2MsqOmpt znI5{DieT(=W${0o#B6*fQ$(bv=%$R%BRf{iCe;$LMstFgqsQw2-zGrCEN?-m*x}V>P^5nX|>qG|*bqHM7L3RTOW!f`?%c{B(Ly z)SaAXB-@XVcUC7hQ(0n-0pFtlbW~!uOOX7SDWf5!091s+kMe1O=Kp(;{ggDdfjD&8Lax!4N@-$l;KdK zW;C%wL_{w8MxO68t*+cyFcLUb6JoBxCjY9Se^cR)(%93`Tj7?dKOP6@QU^EQH#5Id zl`4mIM{t9O9N?<>k1cUJ2DrJvyOwtBX^qc_3t%(%4|51uUbU$lIf2%I)=nj1NjpWA zNDeB{aVp05jxo4sZ%|nF6TO-j4ODQ*tM#|w zc&gc5yl?$029`&)CmtXvtZIv_~Y_(=M)dKX1lmzd6p|KVV@sK!2llR%f#Vz2BPiHEfG%z*-K+NZ91<_M3es$Wy$emyWkq?wPPi>b^mGhaO}!)4)5HsXABJ*@Bcs;~)6 zQ7bN1nYIEoT3}I{lY*n}^OwsE&x)|t%7sU00}IR2!s+8!ShZlPZH^%{Gn%W`FG3*edCM zXskOGx8s@hZ}W#q_WT2Fw}d!ycaAx=i64EVgsq^7$JzUSaXj3!@A{5u%VLi4*kEAx zKBlFUq3}i~G(tU^95xRq^dpA#8G~wEpV4CVg2k-pWC0=XL z+m`^g6GS6(7i0wo>kFqXvKwuNEpT)VD5< zlK0h|U8PXSLGXWE?{w|cH-)V@bYkkP-wNV->_cpW{-xs@OcAx>QQTF~p6D{8y|AcN z;-f$VR_l}0e*^5_N|siZS~ zH8>O-8jkYYPL0VFDy7PbJ*jMd-6V7;61AwG1upP9Elh;uh;cFELqaa2AcYeRG&VNUai>1 zxzNVZ+oW7LH?5c$0?3}C>j{?t7SVqwkGnmMjXw7GO3EzWX>%G0@IhFXpS{Os7>OA+ zswdG=QtsAi2dBCGV);Gj0dsRYELJbo$QpgEHMD3NX~0C&b#33$(o|5|?QC*Nl%_D} zI(OlRdhO*d6Q(~9L_NjeUp`EXVm>&T^?H#@@pv6?bo8Wo(AG-tyhMq0#PXnM!k?DV z4X6M#3WIhU@- z^pfJ911k1h@i;&oERP5&fKvW4%0;o#h-9D-p|WsvHbBHtVax&kx2@GU$;?p!rhEIt zml{Q}wJZIcoF9(*Ez-26Q{{=gla{?$#<*UGBmqC_P&6++i2%!!L3c_~T4}*UVBr~}kX&%OWSA;>v6KavicLEm^kj<= ze#Id~>%n^Ixz^t|K7hMU`5k4`0rN+;Z@Px^THy960=uxZ!i4)8vG8`&{?J!LYQ7k9 zqDer7TC!w77%eEiRGx&oHw0{gQ*&H3E4DPAjk)Ca(+vO*JP>S(Nq<`#>p4TfS^IFa zz%eRSFFzo(ur5q;KR79jj!2__{v+=!X4;$dIUU;!tFM77_aC;rr@gdgj%TzVQ$cp z?PUuvZyJ#-uEXAWKSRBfjI&@zO?PCFamz1elW$8)Qy)*y%hNWVbx-!n40cdx#U2M_ zUIbUdO{^_9Y#5J?_}hC`7@d?=7QKEd9zMB@+XB`v0Z%tN1(RQAkt9nokRcNK|29GI zv`La9FQ@~-_IFS&@kE8m4neHFDkP^KQO+QTC!%N@BsXkId{BazF%O8belWRjH6!6q zMa+fi%qpd_Om&JALNA%#{<27g1$#R^0<0GGl?gc!w>0QLjG5>|rt3AhX^ZuyMX8LQ z^oLsw+KyM_!qr~$QJz|H$t{8?Afua0LdLY2NY##{Ufnk~pzOP9&B2 zio2=+-I{`3=K|Qyt*3#|p!yZ%rnn#>=!uX6klvy4ejCQSOj7;>i(UgFv`y$j>`oqE zzMor$mKE;C-7I!$o^cKOot7*-J-`EOv6B+b$mamGcMpQF$oB7)adiNNJ-E^srL@Mxs+*Rn%&;eUXo#}x3 zZw;4jI7!{Q!mht(95_>G2!or}x?Ryp1nI?w=gTS4m{8}BNVPmEj0?cmA^Mk+ z%xUnKFJh#K<>mrQg!wFx6{oyn~%+nOIgzIF5L$NZ893?`mU4 z+PdJ9S}d^v`1Q%JNYQH;ja_R(9?pU*svDE zzh1|CLf9TcDF8>YLq-QKg?p%xy}G_YW0`aVas2f@&~}>|@??D0v%kF;vp7c zBD3P)jK)|S%oc8pr|HwvyXw|sL(S9g%bef#^D$-KO+oWZfju<73uQD@Xbm9A-w*IseK-CPn$C6N&D zX3PXBI7g;3$oGGp)JoMfeNU)o@X=p7sE|tJR+Pi0g=kbbTA{A6@g2yK`MOxm@G}*{ zKlNh7WlM9+Se_{zazb44WvI{HRjCQT^ysh_#1f&SM!;Bq48$FO3=3WJi7AawrnD5dbv?y`rd&~yea1AzVn?-)fRl&G1**`KIpk=NF z(ZVu+2atASiH&U9y#29Ky}bUJ@M^oKp=CPteM2z)SW)qXdPq+9Kp18S>H zv+X1{TWa{9(rOiorm=9PP@?0o=fnb&4q-V!;{|8xw*_}5W2caUo+?W<5W8S?%(p^Rx48^G%3P7y2#>_Y#HNiWo#z zHTIKAdah3mny@95ALj81q8} zi**Y8e1w7ZX0;&^F8sSVe18;@~jUvmNLyIPrcEmK`L> zq>zc2nyChU_QP2>+%ZRh%J4nN8kK2UGQp6t>OjqYFDalJ>6w*9PC~*4Ip9H)U?NM^ z8~OBzqqVvTK6rSX0xKk4kc&DFo4lZtMnv)aZMPHyN zMlbwYkK0q`m=pHq!z@)fcc*zwMNWc^A+}C3$@zG$>vB^NUqqN8l+ArCqj>|%F0HDV zh?YilFs28;`IxB!H_PdjX1uh%&k5u)7mOu5p8o|xvYe0#^g8L|fwp^9E-=#)2g$z@ z?hg4pL?J2*t!j*F6c!)#du{}NA7lf!=~%XEJcdA0v|I4ajMYY@b*Z5n>Z|UPYkQj` zuaQ}njtRw~HN;d74_*Y3wjG8noxf9_R%Igs2SqI2=>bY=^cekZUEz3d;K@-Rl-dg0 zG65b)g!<3l&=zfQcg$o)`fQ~Gqv-rWsN#)WG@~n-n{=qiO0O2~N8+lO#1_hk77VBu zt%DtZ^n@X070MtyBODD9*ShsCaS40TUiaG4P`11Zj1Lugo`%b{N1s+Xou|DlW3`qv zLRvu|U$OKI*B_WkK4muapprhqKcr_wc9IzF{7L?i)siENsr0suomlX$ zseCP3w|oSU;XYSGuKGYl7GGyc_drO+vIbp)MGU^-MAKvTV84qxpKBUIkL@PXKHp!K z`I9jdYXvgYV%3sY;T>C%UbMNyf}=-ScqQ zWC!ulB2y79$D58=j>wxs3Acg_R&j^5h$pIqh7v!na43*TF~>cJajucYn@I$ri|_!<=2W}rC1yqB1~FdN#S8DU8FA6)wJGqH3DHvxJ@ig)y%xnLr+q81aX$0-Wi?R z(q8k70{TH@!XAu25Zgg)R>&%F4e!fa@EZuYh%}gSY=8y&3<2f^bd(U7p}y|i$E}sL zwCtG)z*Mg2h%s)xI+FDb)^2TtL88h2bs;&xs0v>gdKx&ESQRq}bk6}`7zW!XV^}8_ zxyH+tprD2!DxbeN$-UJsWAt{N6s{E3*d(Ys05OS!)pEt_^nP4fXbbpsud|;oCvkZ+ zkU@u}@dOEqU!gpPsA^gumkf4tOU^e`Rr)dj@wZKwIR+n-v!cV1Nn!W$Tlu09#YUa> zr6zcEbPLT!(`V-gviliQ>L5q!Q&>p$DBwdlZOKV=EZ7Z}`7;I=1m!IDH-9CT!V*0` zKX$J2bgRDCPwoI)PNfruFgnRIH9l9Gjwn${>fMwD?V++{uHf{_l*h80-v_M%MR;yp z;m|}=nHiGym7%5F@ivziH2B)1SkX!PHd^zlqZM=%{8_EZ@B-t;izlX_5meLs-gHn^6tV(wK=83 z45>D}^PLW8`K0huZ<2S4LrMjG-ufRoVC3IT@Y#@pQ6KTq^2f|{4#%DH0ts%*k%P%_I1YMKyjDAkGD z)L>NeJk8zKRqlEVcO-wZ@(h-z8POaF?bm2qHdQ7T z>@p4rIHnjIG6&w3%8;e=ZXPpGBLuSMAOcYDHd#Mvp|4O%xr24IP zDx$)*lKXW%QzMh>h)Rkp*)gcqJBeYfdYX;#`j^SvVnFG3gA`$8ox%bKDra6Pe^?IF zl@|0xS%d#?G-Hcn>Z}+UoYS7&(wg||e6D!g;&*nge=>gZ&!s_YLAE@n<~QVTUj_df zoMcP8Al}?qW&t@n6FD`d0LiPNFvg}4g<^bATT;3ha7wd66h%l}n+o|3>WT{H*z;lm ziJx_`yn6;C@Sx)NH*kG$GOXj*Y+6f22km7LS#;67g}hdh3lHo8@!5?7d)~^EKqyfx z@n_o3@_+fOG&0h#Qb;$ChsNKrG-X0vqx(G4KykEdZl9hCEXL;-5V3joFjb#K849R( z7{tJIRRHRrYI1=iuwh!EmVn*Qkn&N9Gz*SvYa&eij(ecgIpA{)`J^YX^J~a0z=0zc z<#9@KHi>`Or7NcKw99*fXiE@!M>priOS2#~?(fX}Qu70G$J6gnml$8ApC} zZH=FblMv;7+f3O`h0Gje4cV^eGb29bhtwyofFGoF250+rZ$m<%)4}tTOchE;=A6@} z`DBSBb>cfzCBpF6HORMC)GlbLDGC`weoc4NA^2OOf=ws*DiW_F={{92NBNNt?MY@D zQ`I4&C#2yv<_hkC)(xm!4Ttwf4$CsVN-Q?+Z_hH1Nx3;Ot>m?n3;RwhN@qbPFJG6* z`swQ5e;rL9UtgdQ?cbp({jn|r{A%I9Dh$+2b9uOhXfCQ@%yxxhyR)*h{g7E1a4>`Y@`))YY* z+J>JsI<>^F+U4Vb5$z8u>`LNLs0s%Thav??ELGBFZ={wz@$z3#e;UT10hCg6fS1Kt zvPo8OQV@)q>4+Ih8XWOLusTDzkX*(mPxL>(hC;;&hyYbXQtRhdHrZ#@sWvU!^YE^y zhucp;*B&*4Ot&JFtN7eiL?|t9AU>qrh$y42M>D53O zk4d)4oJGjp4))NiOur1Sk{bq+!5Qu7=~PX5(4OGoEvU)RDo_n1My%l<_TD7-Pf+pe z7LaW&>2A4#D6^E%k=PminT_KeucC%Wygp@QO~A7k(;tuf@7W>FzC((+vAI~&Q7PwB zy_Z=&ie$Ts5jmDnuXYdkeFRlZ1V3(?Zu5;U%9*F~T>aiOZFaTpr-?X8=#lWVd9&21 z3Bp@f)hH#x1ri>3>N$`MafaKZn|puoP+%%)Tj8TxL7=B@219vA!##v|1c$EQidDmq z4`wfv7m1a1?j74EJZUlN4mvb;pcNF8+IlnH6+$jL+4z#G zcu%lrjh?g%WvriL5{(ZuT8;u&CYEBOB5=`s!h7O=pKxMRl%<5C^Hw=5Os&`7awv+! zP+b}XxEA*Ya-tM{$!IQE`MD*HG5U6m@|KbnzZM)70}xaVT#BUzzHw-7;oa(s{NX#D zL`~32&kAE_cmlB;b;5pQ7peep`F#j$YnYfy?+VeQDSqjPVnvWfeOIWIEpaq&dVUz0 z_0LNO6J4R35Hd{)J3PoNd%lb2E=ZUHC(SiG(-KjARxDuRX@-*X#{3>*kVyiQrw8Io zG@gYG;sPnZhnyQho-Z(?pO^yOth7T!L?NR|gt|$&f}G zDk@&cFy<=XTo$eRVaKw?Z$FT2#(A+xTL88dz^fvqxFs~60=#gcEZ+yK^s5`VmnZ&o zQBig2Yk&cvcXCWg_>><}m~bk?5h41aSi=4BgQOjp%+pS065wU-2Wu-^<^+br53oX! zs2#iZM;p__bNw{|V)gFeB`$%RD8MZ|6Mk?tEx@Rfh6w0mfE~y-aw4EN z&Sd#-4cDBpsAyU)xQ8e2@9{5BtXT5P1)na}3AsuUY49Ml4s|N*^-NzaLZ3 z=4isqh!4E7F(_A1E%E5vJNx?gOz*wUtTEd{fWrfs`a!AK1DK>24{vq^ey3Z9$A7z( zVcCx1m4($R$4ER>ObM@xe#;@oQX0$jh~Z;=9O@5$c9j|TYGJStpo%hJ*A|!k! zAth;<8Fvo-7KeALd-(uccjKE7iFt?GJd$;d)TG$D8^uPO;3YBw^Usa5h2dcw$Y@@( z!(X-TdV*zWrY_5{-o7N({Hgmi+$8|kdQl7Grg|)7^bcwvMby&lJ`ADgu1uBSxABay zSG5y5a@3Kt5@sd7qDBqHb>Q7l;v90wsZ!C3CI-tDi<(E1wh$I0X&s?Uy(BqP{J=(k zrNj9~^H`+Nl`1kz<@25x7e#tS;wk^JvSsSD@m01&;0XBhH@fmEF6X#lV4}F>!MF=& z0=krpd_T&$^B3waC{ELh6|5L27rw#I?7Vz&+O&XAwX3n8^TnG-@e4Q0m%3-o;F5E;Sh)YYi0?X5mScVoxZfh-#$z7As*U|)a1#{JH}L5#kRNj&?o0He z)&0mHDsz=`-Uw=8&3CFxQq%{^nv|V?mCNftr92Ear6Bp_a|>#LlM;#{D~uX`2&#IG zzXtAQYiola(->=vvM3or^Ma+eUBWHJ&-r*V8qyIon!%_qluD1lIX*l(cvGEd=dQHgdLo!Tp}J6VSL#OQx_-oo!=`2X69C!YlyimG#zOBfNhE72HU6>kmPf{hqQPq#k6O}q@q8Co*Jez&Flek< z4l>Ri^rZF{x4KrHJu1AT3Oigj7(A!nlFV&m1;~PBcBhiPr@1T7o_~3#>+SAyP}$0UQ#*fi zU;rGrUb9e&t&A2=$)s|Jgre43a!|R`jrp!E^EY$t9IS%l&(gyHSY%=pmp-hV^KErE09)N(-vtB}K9$)3|^ zwv(Du+pU&a%t-cDQ;iKp7yqlJtUSw+I%&s>de9)R3O{OX55&kFF-*S*TZj|p?l0rO z)c+OVI{EjqpL~@Ftol;LD2guW*K|bqG#vMM@0^ISMOUGU9OLOplQwda-RL#@K!f`9 zd1no=U)48mW496XYQUSTSeH1+qcHNsGvjzQ-$_a~AwLGK1V=M_-2Ev6oqZ(K3NsX`AFgLP4alx~qQFuvD^v;tA{QeX#*a?>h3>W{ix++^lr(B;kf z&tW_h??!(1oPGnPXGv;<>hE-K4_1w?l|q!@!$mCSn_GPT870xT^#N)r&mS^n_>{?l zooKY9veKa-I;%_tuo8lLjVlW_QMz+tfoN(L9QeC+j}AN!RS<}^WG6!whemv;9GPd= z3%k3u7O);}Y*Gyq>H=9AuHnVwh0QT8YGyBBlV({ZbmVxYo!Ro9I}Ed2K(ApNPA*MaE*Dld4T?o@Mqx4#IBKrXBYHJ zGFX?g%If!pBz5CMXyKeqe|GO%`J z#c6C%;D(L1_n@WQ$7q8Xc~KFi;I+y=quc*orY(nk}w!3=Rb)=q@B&he#b zs76xfWSrMb)i}%Sbo;#of&my2?)dA!jx=5vFWJ)?v!N@f{$c!g5op$N5g5#ufjOAOR!+85ZujH!w#$+C;I}AG#wy(3xf@PKtbHaYf{%Ll=*RvS*488Y z+8t^g#QKivbrBZ*?rHLejR{>B>Ay8x+G^CiL!7(-VeH>tHX3M21X>e<^cClV6Up0X zu=9iW-s~ODm`m^j1Nqf-Tb!E=-Txcze{MK1k0_vKoFDVpdFu-L)dJ_g;}+yX1)px1 zP$RV7+bl{FpBx;iZWl+;KkT6()b@C`prAU?Mx`=I_5_GUvsCW-6+_O&6uW+kyO z<%_bWhdbyeB-^ILiL|-tp-4Gk`s99yb14Zn1Hkfug9G|HH{+?m!s7Rm(E}=HNUV>H zk&9n@@uNgzb}Z9=yi;r0(I#-W4VL5rpZ;#q7#TnTB1ZKv?MSqleI3wpgqu?y%VUYlsh~2|3)Ei7DX^N zPrMst(bWEU-H~o02AWtH$rLTTo+S;$nJow(@U22q+?AA39hF>c7k*QO`V*kub6 zeS2OG%2JD0pMz69dD>*ewg8*AgQ0;8ZN3HfuPbC6mfsDdwxR4I_c9rYM=*%wZ?ySR zkkXcut={rZ*RnbUp(TU$=U-?&u@(NnI8eEomV`N%C>{OXKM?_ltquP+>|d(`zO)vs z0Y>GpP~(>!9_wmL)8v>qNPechGE90B?6vXTh7nmqxA;O0X z0;$K?uTXN;JdHQR3k`i=hIYdAB%r`NATjhb?B&jFr#hia9a~*uduR(%-Q^p2$_?5k zl^jk_h%vu|1TRK{(hjcqGBYk zWD)L*;$Pb1EQ|Z+jnZkSrq_Le8kEa~-L>!(pP1q+7YQ(?c z4?@MZWG#5Y*4__W<7P>`1yAB8Eqrt%ufF)|+wboXfQp4^|;6>T{hdG3I&tZcZMk84$VW$fqq;09knz12fac2K# z5Ihu(;!r1rGt;xPENiwZcl#D(9L;ATvukL4(oH}R@)|D_m$I$Ug5Ai8LLPDf?=?N7h=p#TW(t_*bA*-d0G{@urNUQ_)VdU5`@Fh=wkiy_s`D^xp>O%3-bGqmi_|yG>Y`5 zJu_ACD##fgW$&%%qn@a+F31eoNQUYip|Rlg61>0AT}!5MiLZ&{3`{Jjp*N=G7QyO3 zGMX#iko(e+kamD_Hm0COo{0a!`YIttgTxVP{lyu(&BDjqw3X5W0naQd?`Uu=hDi8A z@w_Pmhnh#M>!*7ekh}Z+wzhcM9S((WD$@`K6BSLB68QSqVJIW6eEb@+oFv0taw+>SFvtL zW~1ucS!SN_bgSV7*6n#!pz3Up#c|1pf?Ao5E8_n9Kys}mE_A6 z>wO-)9{dp7Urvm~2YNJ3=SIBVRQnIR`X{UPOC)LU3>pFHOdhxc}C0 z?b#Vi7A;Q$0Ae%0b_ycqH&UWvDivREe1$Par4VjiZwHy3H7WFE4n+D7#t}5;djB`v z|J-mul-uKi85kc4HV6jeexKBFA_h z->HFd=Np+K3$m`KTbhb!rjmd|K1d^ogQ?2CZsZvo==$#~mxG)y=$CMq3Uq`T!{w95 zeGzqFwCMldFM$fgx-P8wG)p(S_KgZ^SYK77j*?CmxV$=pb?yt$q9ef(lmSO)1cH0* z7VOf(3YD6QO9O#I#~c|RcE)B>H~w0!-{Y1`?n>72ZoM@|(9EC0)=-L{1sneSi8dkWx;-W!lv9HCG5-YKzx;HuD=2#ENn5nEAjb`nlyh=g3P8 zHQC!Uj)iDxGC7yYXBV0{`kgOcWBVJiCe7@5&@?L?OAd^wx*L=M+wLGw)dHQDAC)Cr z>yW&`?OXfnpk0$VvsKqlN$H&+-wlcR!0v-fVVXF~bUw0T;I#PxqGU-)(ghG@urC=h zvitbhH3;kY6wZ8HRFt;gBXI+h_(YOnTcKobk@GDm{Lw5_{A-M7`#ivTHL%a{@xu0< zr^K9DlGbgW;1l#@cIH`>DKuvYfY5F5KJSftAzk-}nC9uE`r1;o(ur^%c zqu-^UVQ4E!iMLtR5jogNGjg!m0)PWVl7uZ9^{Jpu540feZX+kuig>EcsY>a7%~9WC zk_3iqxVW*#)I_u`GS;X)f(*}G_g5;`Vv$Z)W8rfv+o3Y|q2<(lv9~A_zsA1uRg3Bl zyh7Ipr_O_?m6o`>?Nhi|_bjEk9R*h_!e!RV#$bXj zEn67&aJ$fk2|GS12|8Ph)b^2wR)zM^+*=+&s2FD0ED3GUswGlRR8my`l@b)uVA}V* zAsAgJm-p8F^>7DcNQR*GH;lZ(>VasO)DC{2K!Zlg8?P&uIf+BhO%$T_SE50vPx&&d zUrN3a6^R3myBC7PH&hO%X{M=@<9}aa5(vL}LA64W)NUeLw{_!xu=mPNQF6u-3T%I< z-7_Rfy13zi+KNqn}t;bLMDmM_1Xa$QTk-` zC0zf>yB}{wbl_nY#sNT-cp69qI}y*iLCn5R90eTj^)>y1$OGc|TsC7j8F$Wl?)0t+ zEQt!_J96JhA!UO*Zc47``?u~~Lj(!AlAbHV~WX~X>hPm=M{-I>+%KlRSq@dK%c}qJ?^3!8aVE?G`>NZ8=g=3N}jtR zo@jD7h!TCHm0SCB)CWbrF_IVp{~phS6w-j6jhu~PINBw4Ohj@{=x6C|kGIoWN1y6R z4}>pA$#676$fO?zN}q@!?AQ`F28G?NDm@^X-seoSQqjlHfIhTuVf0;Tw%3y~7wvCH zOqY0%(BE}6xT3s|rRpR!bUIFU4&T*rGC5~BI>2CdvP9xVl9?^4( zpW=#gPvdnXfs8TH1jA875E)p&zW3PFj_qnRB;%Vp%Qd0nA9dIDoUGw_ZDg}C4L?Ws zianhPLR|y~NhCxc%(ndor?|O%x13@KcZ)s1Drx`ad+~RtQ(K@&IW6GxnE5JBi;+-t zKoB+tZ)Tj)W~%t%hCJI~czV5cvC$l}3G#ol!Omt%WO*~BgF-VIKj3W>oZEfEgBvZ)fYAZ_GWxwwd>}gBdgfl9 ztwL9PCbu&4o1N|(&L+@{<3{=DV2jvtr?F%E^fqCiXt8}jre~DyM~K3Bx@GTaW2t+S zl`joJiOompe%q~>>~kIBw`}rIOjDC<;hA^C!e41SH65G;8-0}ZaWjNs|%?@zc$e+i4PUi)rG0Mm%%i%yR%jU zYyK@tXfNrP7B*Nx6&!;YLUf_~LsEkjSD;?fiiAp6T{vsxPRIA)tOaPe9&5R92#{}- z?R)<1B&70V55f=Hp%#Z*6;2 za#8XhQW)hND9ezHUU>2uS3@mv*~QKD>kFW~pPk^UzGA)Bwb)=nQuFeRC$5wPg=@pG zZk}k(whgT!VtHDj$~P3+2GTAqmOhykD`M2j#|E_=V1j}~hz3Zqd=;l-kpvJu;d zOLyu#zXsD^lg`c?WMWK$8Xf~f*-T_~$sZfeX95U;7Mad786e}7MX5=SvUF_3r?nm~ zD4gkdO-U=bjI^zTnA_WexPE|?$Dz88|E=N55uSHIGfRdP(4qeLS}^}G2GHab2*=gU zDHv;uRp5xm$gf|DmPGeVs7=?jP3zd}|KD)`bHgc5Kbi0V6{8MyNb1ZYOwkxoy}=K0 z=+I+E>cl-xlLUfls6m>LzPEo+2~%pcsOka2#quP0Fo0?%Jb>xZ4Nv3C4L1bs&N-mo zjOCKJn%0Sr&YNPLZIOk3=6F}8F}mSgtq8ROUcCmz6M;TWto8@A$rx{1hr5DI+Zi;0(qKjr|-9&w3y zy^|D@W|9Zh2v1umyv=9h{5uI$!x5t0(A$CWs=G^6{ieoDTB%yzI1ea_krN(1h<{}b z<^@{2c3MD6A0wf@ezOw7`mW?(SgpD*e_#kRc~(ycg)7y$ac%vJ)~pp%;X#p&ytgQ< z<)jpsWV!i*PE*Ig^{dYcw^8ZPpdseoPPBWVfY^;@oERjsW?8CT_w_=7y|xJE6}+f? zT?-_0Vlfa2#0Fk5uH z^|*`R%Gx+5PjrG~`S7Z3suX@!Ve`vuW?diks4~Q?%N9&Y=$flN^@L+xJvAc+%xOJE zEE)O%)kX(6AXuk7eVD>o1=+AYp+>D|?L!w{#I*HM44^i?hUg^jjUt@lq+i2ne!R;u zz~KPQHY+iKn*-5^+%l8@s@3KlVunhvf-WJP!qcelki83_mytF$j~Twdt#hFO(^kl`9S<+ro;yTZl-%`>`z?5OgUYm-w@_T75{hTF&9h5sO8%BTS6LA+H3bc4Jug>u`}^`L)|4*JKOMTO3OlT=2x;N zc$u5~;nNh2Z?SIfWCpprl=np{{%2?%A}kqk&Vib}XBKn-`lAgLl~n>%9AAL>dqNw( z1cd{(akiB+riXbVJkH`!<-@AXTw#l>pXa^1RRoc@W6kq>zRn7gQzdAYuB+85cA4go znKhQO|F=vS(6LB(g?>g;oK73Fd0((sPgu?qL(9;|M9|MFBKR_0F@{3@tUIJ`FkA$P ztWO`f-X}^U84|#7bVLbm+aH3+$kx@*`lrWjz?htLq%`%;dgPLeC+Q5ov;xd=Yzm}F zLA)93K=C4{tB7C4vaQ1}W_p0@>ek0wn;h6E#uabT;N%K!x_x9TFq=-(?|kshP`H8a zWP-}|Tfd!k_PO)fQ~N74Xn(J1o|;cN+P>I+IReN1YUrz(AiC8uy=81(1W}h!`m3vO z2X6})%!K=NV%AL6?7LU))YvJyZnGxj6;GfKk?KJDd}N82%TixDb6k)-MV@JvA{~VJ+348Va@SN{wCG_H-Ixn{V(2 z?p6>z{c!4Yb|TIDSDg2VYallI;32gO9PdY?b;ivyXN=Cbv_l zc_R4Cgg^7^o3kTgMKb{ft;>po+5*Y;59V8y&}EOh(k|jV=ANYJSb9ELb;Es;D!uLQ zXOH9L`+Iv{XpFj;;9%4XfRB|~bzz??0!>&R719yRS5uuz1bNG=i`9%2{pDRoP_b=4 zKCR!1p*->fc+5(%5+TVO%k$JmDW3>;J`Uy2}ME)uwF>r1Ou_xzn zAh?fm`9qFY_SjKrve%p{M8b~-F%dcCJo;9bFKJ;H;*gj?_wzJNRI)B2RapF4Jb_hc zu0!rR(E8Gh^~~%amgj{K@`~o%mprU>`EdyyveGoCD(n7=;mpzi>i%p#2N(c&bD7X< zJ;Jv_g8hRb%oy=6%gG&Gkc&~)0W3UpCd3i=Hy+OGPx1aQ&XZ>#a}1P%jS>?s0jGqh zF#f`ZviHlZUEE$l$**?>?)9OgSQMC8g*yNiEIM0pgb4?BChL2$Gt1o7!sE*9{EDNL zbrkqm?jp;U7f-=;KJPPHao4}n$OuBqE}Sy%vDWaW-bB0kW}-ZrU%4XTgqsJ+`>X^1 zt>G$y)1?F|_MqQly06v$3Xx0IiZ#xMCh+)|BUS-Dh2LVuPP0mGwz^`92pBpsNwFFH z-*EqP!~G$QP}55C)-^UZS);3)-YI$>WWf@FCUwmYwHazAapYBv!i>##{P^I~U<*$) zjcyy5HU~0?L$&0= zst!8#2L9dl;(1!3*Am&9IAc8}mOUr}qD;$f0%2^z@BXE)&71&Axrl$xX_0Txj+<~- zg*8$Y>blS}>>#fd6znVU2L*x&{0OB>Pva7t7uT~Kp78$%+&~rK+@o!)_ps6Rp1Zi$%zjCHgA2WUJ_6N z_4fEyueeyFUULWariBXwl1GiJB^JF8r$NEWu>?kG%ZhiflTROWIe1hIrDAuq@r&Y% z#zzC_wMPo7v<@XtFogD>gdz}((|lJ$P46HKHZX4zsZSZvzw`F$;FD^~cQaszK?@NQ zhZ})WCf8wfMY63&cAUtS`qk*IF+GIid^N-rwtvrmVNJE-k}+zLCBatN(N*xcV(VpR zizR(o{t=L@HjtL3Ze*n~O;j)_ZyEnKtxeALhhaU|XzP`H?(J>5q^6w%N>3mQuTT&IG78Z>XIy67#qmd|1I#D#ac7j|-u5olxdKH6T$Ag4nTHT3AAg=`52N5V5=fw>X zz)>lk38IVjnPm5;T<+1yG6bK4*mdGqfl!O+~CN5e)9;B4{eN- zr=u!Xq-Mmg5=SS>2Pca4s|H>h_qYH?d&HU}Ahtnr>>wi;P9z#kb640I*9q?)c(_7M zOA->JHd*2FR7LO2Z}9!6;Gt|ICKZZzFuw4V4hH_UagIVRKY!l@V%1;t)F@L#XkYy) z<7^Loxq*E*dK|D;^PZjjYl-hbZB1&eeZckRKDyZ({&T#G{>^u4J;A`$?si_~0#7XOOh8pNF4J3<7BZZr4GCX|5iFRh!{aqb>`LY1L z?xPm*WJ&gEVrjo5O`|iM3L2Epxts^%zKQ02cKpatZEB%^lJqpn-U^;knn-DcXP8TL z2!42`;P;_!Yfea^p}n^BMVzvU#+y`3*^b9};>wlnGS(4z`i|2(lr{^TWt14*%tb^9 z1j|w7G}B;}X&R*0+GUgN!FAI=@kmI6f~rW;M|$LVaH4Uz%%w87YX&V=^1+4N%J_LJTL ze#@y~2ofpEVjJhyCg&4`|CxG;)_p3yI%u}W^nPN7BYGg5Wwb>t$v}~J24qdNHCyGP zaslf!0Fnkp+ikrEGo3^>l7`ZZgdvTheFf^jHC*kHCSg}GxV)~CupC+w_-34=VbYAL z)mTld`_3IGy~k}pR_jBm-auVy3i5iCFxlV#8}5H@I0|D&izvg}rhv2g?UBD_nlZP3 zQ18St5BHJSWax;DD8Ygmi57W7bk#C%*&w~vPNe_fJhZqMrx4$`{>S9xrsPLr2H&e z2L=lTY)pRSTm;*EGb-l4Q+XFu3S{__c>Q`x3<9{&_8M(SXscD46VIvpgXt0sub*DM z7hC8#;e#u~iDyK4DMdD)nJ3@FkSsUN8{g$@af}FW-5grY_M6yZj9z|O{I7!e%m%;$ zgh&fPJH&#lv~950oxl|*EH=k<4V#}zOI*hN#}+;box&`3P@-2m977>Y@qjVDdJ;(x zi9dysqfV(RnUVETA1$kPd2bv8~b=S zYKXp`c@Mw%6Dcv=3pq0tToyE;e+N1XVxZ8!25L|)+J+B82?pVW7BwL0&8QD~eGe{C zid>}{TZ-G_w$u;$2J;Yyaqw;p_Rg)sRZswTC5zWqO$$K>&%jg>56{i%bDBL0__h}8 z4R!Inq!3j}D<10v6cZ$-Ns@xMOZGwO3qC z|79FQ|Fl3IBkSu-dICBW<6s(W$TOOW=mC0nq2AE)X$50&-MqM^{+4vj%UJE4I(mtQ z4uyc|+6dFaCT#dhB6jg3_v&`m-ZwnbocY_=jA{D)!U=Ny@?eRBPtTb*z$>TsJ6F;U35`pOJLXaL?g9 zCgD+(-8YZ&?hxwAb>gC%xDhNyl3=x_>e<^)36iT;*eqMo z)E!%HQ1%F46R|o>T~S0p8p+mZ_Du|f7mqUYRuwT=fOAcF(N>6gJ%7-I(Y`g)(nB|E zFBOt7m}r~xJd-WcD5CZpx~TG_BON*Z__6TJ%_$qod+5xD^~uOm*p))zVk@*)WdG4j zm(5iM{v~5~DsUdA&q3IH04*#rM_aWia&6$~E@jA?MvI2^B$Y(94y|qJ8yK%hz;54H zXBHmJ_8I4c?EKaRqT9))b3uTSgZHif>z=;hk=r4qUPbkdLVW~97jL@qniGOE=X0ld zjEbfW4fy9#=uk+hFMTuSU1G##Drvc5W=cZZAMKW8NFr9;H3Y~75nHZHWB|nbgho;= z>&G=dMu}AKIJcRhSGpt^$bK5xitfGkllx!M05gsj!zR-)&fm?Z*KAXW1B}~ z`?03*NI@3wBdirsq8|N zji$>Pi0qvo=uz)o2^q?#4QY- zb(G`aGQ*?ixEOYeWKiT-*PGz}PH+9&_{RDHtFUAC;~vkB8Uorqj~8`0bYyBWmr~{r z=*;Ge8_p0Hxfrb~@Hvz5^pe?j_2~bgpZ!S}XC#vZwwt4p=Cxu3AdJjzD`FPO2lk@k z?OJkYcpU0xm}m#Q-|cHuf(r~#M=}&!ER7lFd@_&W{E4}##(`iwwZw>h9K}eq_(0du zh!+MgNg&N!N{d%1)RhLP)apjMzcd&99@? zhk2uMCD&%LOO1BZrLMIoz>wSQR{O(X+c^tUrm0zl%nK-cI6@)zDTD*gS|8)LTU200 zE0C)V{AkB(63RjzPisHwq5#!9qgm2s-8j&`TS`{ReQ9ly=}Fb9j9d1tz!&n&S%vMA zk6G);x=enlehoH$LENy4FMHgkSewxIBtRwBUMtO*jnNrPKtmD-^hQ1p?3+i;n%fN& z){@zt*q=VwL)l+Z^+p2&s`&w)=L-IP$PgpIG{_Xs4UTI;4RyXwWm2kK5OrDQ;21ea zW2%9SImrV-x=Lga#O||v zd_=sHqV~eB;gcn9$&aMDvWHh+;wz@gMAb3E>TpKXT4EHef$=k#Y?jJGcwlfeZ-SB_ z;#9SFOm;C}7<=&NA=<@fslL}b-int}w3kz{Dmlp8Yn=z9(|jl0=!j3iTYpFJ*kaSn z*L0TXzcpNSiz;|-Vo9`>i_|r(k(6WNcgu6#^O~}D>kCSy$n5o(&i6ZT!3~;0`o^^GrCrqqa!4^yeC%Ou zIw{x7Kvp2`li5D<;>VMXY8XZx{h?bDY-gmCUS{ZcA2I55HS8b6a*QN2oycIPFN@)HqpR4saf{k#`JRWLS%Ge+hSp-Z zaaJHvCwgAh$$Zo{;v+TZ^o(?jZ#|LDO87p3X%RouHM6tvxX6@K5k1izVvA6%BwzAK zo}Im-_4&OTQqlL1<4KNo9s6b$iE1{Vkc5Yu#I{zx?jx~>T|%?8ECzYJXnOOe_oC*r z4oZxiP{F)(@jnJxA{2yjiN$$9&956Pgs$>{irj**{Vg4@o+qCheh-=^#zAmt1U(Ur zkVpUwX3E)EaGa&N#Q2*f9~6?j!nRuWoGMqKM0zP(pKpBa9tMxTg2r{@uT1~kTXl{JjC^<=c= zAihd?kEBr^3`x9K#V5>CpEih~U%u)ii-suU)~}B*0py-YWRtTm!{*6GLtb?v$tWRd zk*{_GsUNRSlftz z8h0~)z422Vu~nvjlc#`L64Amoc=1K-b+#GWzB?s;AN}P!tMbcPHvuvU@?%T9R)_)t zTBzfpVa_Y+kY~Rs#iUV$069{jQX>Skf4l>YJfO>k(MG<7XSvO&pb8mW>_kY={EW1` zEb#$?R^)hB0VrL~&AkEF%M36s~bnsM`G zcYl|~bwmWKLRU`{)SpEacO1Uwxg}(TR5}BD=$lr(j=JWmkZ4KbVbDWQuabQu zF|R;3iz7Fgx5eoC1KyIt3B8G|_q)Grfs)}@(p_(CxCr9vf`$Z(ah&{f{yHouicnZr zlb;zo)JY0@jeypGe*&?cFvy`pUYQ2IFOZr|7Xu0y4)g#!shY>hv(M|8x*-@62=S)it#>j5mYIho@0$@Pry!8p0_3QLhqXzYY`uG7Y6& zapWa02GZUCR*KB@j$@43i1z!vrYhw37`?EB5ogoWN8&H=GU z?rF^MT~C_#!wY#Xm7F057}?R<<-`Z&f@(-Wf7N+Z`*}AE+AAAQW7}K?*YeUP=~fWP zwn!SCbl*bFInkI_YGS(I+BKrRGIX>4;9lP&=ymxV;`s$o&LA19{A!t4(WZ%@5q+^` z$@EN^NT+}!jYPVz#;w?5{>hI2c0fcEJVCJcVc>>f>PvG$6aixS<1m?+!MD)Jx9=7A z&;ba#(MqIUSa&CNjB#?n1LA52kWQn^2!4-hm#7}Kr5$SStJGNyRnJ)C5dR{~$2YFY zUhc%Gy1!AFz6AkHfCOdY0UsRQHIcQ~){3g!5(s&d*3u8m%(tCBXoK$WF;X7rjO`o@ zH4dMkZ>d{g#F+nK=Ru5KmcKS3Ypk#UKnzm0S3)#E3rAlurGE@yvdR@8a2QxN=x)oZs}BQ z71p~P7NOPWNGG;t6d$Wv6fo4sme~A0W_p7opn-x~4n(taSF%?rRr~TYT9(g07a}Uh z>jmZjpkuO&EMr=#RRIkoLya1;xMK7-)yrG)8!S`%Zw*)K_PHssod+c;P{kC=_b-66 zfJx?-{zi8V0U17qYE^q|ZHS-N=~H^#8P^a!cvr^g|AzaY8;&@)^Lp?sM%~U6wsV#X zLdy{L>Y5r^Y|{=EiSH4R?;I6Zu3)c7YHjkXoQLcWFU0&yja=xVKYoW4fzAmyV3+V# zhthRO4#wV$UW5H=aSJRsguQ>eT?fgL8eqw~%xscSAVOu0gjYFEzyl3>HD#F~wiNA@$$@(pl z%Y(#-DMmAOC{d3r%biwC6mbf8_!X*Ec2m0-0LKkxW8v|r-1bfi)t zFO6r=ku48fAs_AKA1Xvav2Ya7cwIkbKmb5Hk<0)D8PiAW3?eKmIeT`hJ9(JU6tCuR zV1WJRY2O@3-LU=u$At{O=D1GdB*=ayQ-ucdh3Wk(d4I7TKuw{j^1hQAx zsUAtvQ3ix-5-WiBb8v04Vb*YD8k3||<$zVPd&rG}YZ+vXpIoYz)lI@ zAHYk+?Xqh;aEJ~o{;cS_-p{geB|7}gVsh(ops5m#7ATrjFTB!PAR;pjWu@v<*X`aO z-2ydbNipnWuNq=+_kKZA0zTYK(E5rl@RBoXU+@6+njXQ$?S|aVSZ3hpx-K@s#$rDFF7I zz#rWGL}=#vNct1ZqH~#E=qH37O9bT6#P3;AY@C=u4b+&W*SIK4AI$Fp#zz5aJV8RJ zFowd3Kne_OfE~t-V22bvf&^AAYZMF6*z_>CYvpE*5SU01Xk1l(htE5+T?2Q{ z9W~Y=XN6oD5__*@Fw2YSwjizbv@beRDCA<)dM7Y3lx14_h!Rx_U@U;}=8?Wk za2@=TbA(lA9Dl);%1Qk#m0KCR7-|F&S20a5<5&PvQ9I5(aE#_AVidUO!fTGvjIxMD z%Q~3BN5#saQhW_ShKb&dbpbQd$1txQgAKbnA;~ss2DKx3%Gl(_SIU>6Yiem~lh%EY-cJLMLC4-42CKRQs8z%YJWIfLYOc2D-u7Y6p2_4pj zojlN)1f@ZkHv(SZ?R(geFJY0V_q|A1+V7pD83$GEo{QS(=sPJu#8E#q+A3h!g`2X0 zmJ|icFl+&A>rQa;Gc#|s@{9ps!f$eR%;3Ib^hO!GRxnprKa|2(SH5j&=99{$lnCPR z^rbO$)9s)U2^-U?dfk%Erb~8jT<{f~aw z<^sMRK|#pHw^N=^wdMI~8VYtHy`;V2pH)VAoS~^72p7?>!6EHGT|(jH+tVZ)e6^G~ z70ObQG=@~blH%I#Y*GMb9$`REfhd(1*ad83x}1GMNDF1UKo4FZA$lVm8TyGMV=zZp zBBw&nPaLhmC*k}0u^PX^_w=-z;2Y;+oXhL>+|;HRr2y-+i9mk~6~epb;rtD7N=p5h zF!fP8TS-@la-dPw3ZQVct8PuM{@CE>SRN!Dl$n`- z{FQ~krS~%azv2GphU35!dbsC#(gNrhhNTKxFT*5b6gytlORtY(^1e{y+Z4nG{mcc= z-5_x!7*=%cXb9AvST=V6#`H{KA#PwK!pFNIC(JF~X(m%=w#GfQ@25V(!0*F!{X5&^ z+n@TmFsk-?^hS-|jaepqEQ9%Uvxt33OG-cfM>Bvlahrd>kWeZQZ1FK*V!FKw?gNEs z#u7Uj5c2fZYd&sT1)J$uNXX~}dM_(TKT>=>*Z@A$(jsZkWRKI@it^NYY11=YB$Va! zjBx%~PoyWTIW(#h9=sOS7jKF}u>L8DLT1Ov=Pz80)pwOId?)ICjM!|QMjYviy?3pr zz7%JH1W>*kADC5gfcczkb*&<$f!ne-uP|^^v;iRjVD4P0s*H4zq_;y=;Z0r;X{?t1 zCfUh|FyAXA4cOv1m+r@V(noFtE1hCCWvnj&`{7#*dk*Pp7}mT&C$;Iis#fkbUp787 zycH&J(sGiak+=+3(i2n-xnC2z8`}4Z)`3#C)ilp9wxN+tKs@rv?G9m*u<@iJca}xE z^Le_!dhSJ(F7Kghyq~I~QA|PNT3G+AmHBt#gMMBu!fm2*Oc-gbXCD#Pw3kcJxw++q; zZ8IT7-Y|OVQ*RSi&6niWP(+>~L)K%^`lMJmDWiN4_;+74Cl5!UF_5SpSrj*iO2qdB z$6u>`kt3F)Oq`3YWaHADpI$iXK>Xqe!}L$G%?9GzL$jc-vx@c7*?dfUPX9Vl+gynk z-X9`^z^XKMpo}h|tlN3SvdSbPDdqmay(L^brf&mWm5#9og=X(fvF0M9Q<@%(CgMDoT^b5w+#p&b?og8rMJzR`neIuI_BE^NCR=MIu64r}mG7>zYfY)BS0V$~+3Ou0Mdgkw9h_5NH%qHbSShHc3@3KB@Rgk!P`zmYFGPfCNx zs||Ro&Ol8%A?LDS5NtP;9CgzgpHW9?rCeM_7bjAoR`ov7<~HD1Fhqbo1B6J{8&2_; zV^1ftsg>?eEn)O1!0}{%F?^46$w+YHHNcO4nkMIYyAqVd+^W~GM9*(gVGSmKt)5-& zIc;ZL8qW;(F2<7E)`SI}`&k@4uSCeqItRr<9Uit>989wUKctEH#Wr5Y#osYGnRNZl zwb_g`(uJd4vOkny zjc&sWoph-Pu74T^3gS8_*`6X3i+d5Ui?}*hGGL!Gup$A%t;VCuh_5ys$13-&i->5| z8Q<@KD;c?t0WDm)Z*2kr0Q7;Zf8)m)f+=k_!%F?nJ3S)M1{`&juBqNP@qDgNaI)+K z-ij3gs|JP|uOf>`TIU%XGCtF`lt?KH#-&25*6P51Ik|2AR5TOI zxis1H2(2;IhhfZyd=dvEjIRQPX<9AzP-^4?H$e-#=Ke)iw~eiZlcE-UJmUA6Fz6sD zkCT{x&VK8&!Mz$t6+y#MZ=rE&1{?;ce1wM(0e91A;(_iIG zrSsZfDaENj>xl#>de=0u;Zt?zd%yKO3(2HJk=loj3N;J2R?GX8ZHj6eS66xOOWe4h zAaNN4tFxb~gwuilBH0dEk7bckB?bR)OE)}CaK}w%72_VRNs;`n*lifsNcr1gT#HKo z!4MO;QiZ0rrlB=S4gDuDJa&OYb{!FD2gg{CiYWQsCo3K>#(1hHneht-HB)a2p_466 z>Fkx1UoN!|R1r?xJs>p#LP`DbLbwCl;HMMq9yYu4K-2#l?tgAL`)bvmwt4f;^ajHrsw^*`e)llk z$}2aQtScwJ!1j?z+U{z{mU1c}e{(&xlQ>RPcK|_3D-Zysr-Su z)F5YEPbYi7vyI0c(mI2vh!NVAakUF$-E`hLWF*ZxK;vAI#mV2u%?(Je>!<)!L7I`T z=t`^s;|ccr9`X>1Bq-vc9I68X>Z6a}PwhVT`@9{1+(bc^kqOu^?JCupDOUj5fuN69 ztJClagDjLqdWJ*Xk?0mJ);X=*ug{~3_H{kN5z5Sw(4_BDlhr4jS{%$Bn3#O(Z$PAX zY)P-vJXnG~yTYRVB#JFfi#{6GT;wWyO^E%e&Tp)KSL01-p5q*8B~Xb74qRKMC1O*O zedxR{8c*|B#sMY|Q~@2zOe>1bs;C5EGgBt*dlcE6m(78bIJth0(q@v+>ldB?FrQb` zmwjsI>D(>ajWO4dEQFAW*yTO4IK=rX6lNP~w=hxN9opv86h82F0A(*Y^JpJyQnk=A z*c5-&vo z9B=fcJNcO-&ky8$0jRahMPW5kw+D3x3ESo3A0zsUG;_qe9x}6k%RY zZ20rzb1Ji3@4{7M+hLvNhDA8RriaY5%!!SAkW29Q~9d z>teLGtGTXRajn8`e>)~V(z19oW>AwAs$5nRP%!6uLuXf<&_CK=w2gMXjStIy;l3B& z*<+%Rti!SHf�#1xBpIjtg|4A)qc{TJd=@{c++dnU&aO%$ttolQ1_VD1GZw?>$@@ zf{?G49sQv+j7T|&q*gyu*TF1Ewq)*6>M011FAeumd5&&r_~a(CS;=1?20ACHJU6_n zx}a|hj80izrdw3Ysn+a%FF+N|@FmMmuOm8 zc9ir*p-({Ha;8a2#6$(vJN^I?6Wiqvr%%6**0mRDwe8>pjt}ZH0e*Gz0;%f`wF z*Jo#fN@Bwxf5%jcG&cBV|%%M-t&M#%f* z^j9>~R|}r>*N|oLDt41rST6JUf(np&P0DDA-Evk9EEmw$e0Zj7(1}+#o~h~Derr|l z?r({9sw&J9`o~|qI(860vv`on1z?kDGj43o<_E($N_n+ZZys_;PpLSD1bat{2w0k) z$n!m*n7IWVZ-QlxAdqoX(F&!2g-k$DvRQM}6^ZDSD0D3I13a3#Rw2XK6jiRfVT@)VN! z(PK|!K3DfBu_!=QDi~I>Q!5i@(Zu0*`!`}=e)D=a0lu2Q?~1-3U*ZG}3g?n7V%Xq1 z^Qp~}O-*7DURHKZ!H!GRo#`wT%}KKLAA_zWHOtKBtdeY}8Z(2p#MQ2~6z;;Tu0EV^ ze^p7a7EyL(@k*b6|xunLo0Bk3K=u7Yd#lpOFriKYl6&0$jn%u zEo=6F!~M?pq*621 zXxbT5>xDXSv!uCie-k1eNl@3BQ`U^8L@lE$wmMGv%tMyr%t4_-CltyMd-QL(pGaG1 zFH|e-INd}@GPD3SLJO3J)YFJ%r$)QI#yATn38&W=Y}#&L;~m&u>dIOeD7iORmjY)Q z#<69zr{X;fJF&9KCzBeATD49o(eO2fubF>X#0_OwiWS~?yYnx}V%rDILst>wHXetm z-5TqPt8nQS#6>CWaTcCPqyhF*N{#GwtQ;qff3o+sYzYg9%sA$H8as%9?+ereW+dS*Fc(ng68P3J=7XL(#_eiFnCRV}m4C2H|U`q(O7xG>~zR&N~;F zTPYgQXatPZ<=4__e@Jx?8w13 z(u?Lzx?~hfgd%+2pQ8Dv;#=1=m^IjcH`pv zJj35Kd*GScCsYSU-i*?`fS$tVI`RnCGD>Y{&xHrgiX@GZB2P zxiu#@Oe4+jH5|OJu(;%xjOsXO4^3771yFOM!w>J|H{Gg=!DB4 zh%OCYNR?ctM_dhNobs)nOxVQhRIlLjz(m!C}lv_b{)HsT?5wEKHb)gz?{)&?o; z$L*Xm!7uZ$Jh}li^&_0l_H~(s)$R6*c5m)SQ=$t%_35L2MXRXjLrd|QQk+1%ktm-l zwUZ<(k@hqRgSR&+!baelT<4p9i5(`v4;sYrr-}}JLIKb2)KJYxseV|b?>v556$Qbj z460jPXg`1~-c%A>q%1FpDi7K-y%Y7}#2%5fHf|~@mZMODMx5z}P6(R9#(fBcaW@N- zw_g>2=fFB14IM$yG+s@W`7sYSU&2Nr428t4h~a63BxkW%-gk331pEPxrZ(O;oSQTZ z_1<)mTIY}VI4VzHL|*xhsr?))Nm><3A^Mvmv{wCN0TK3)%mSodxEw8tBfi4dNP*)S zwKt#^aVG-Cv-D9RW}9CGe~x3WDW9{Qm-w~}85mR;DH(wC@)iz$a25;J-NB{;j8A2- zBaY^>@>jit4!$xGr49jP@**s_hQ-RYAN*u1et$WffH^c#^9B9An~}4Vg{MJmUZ#Gg z8Ls1D1i9`UmaggWwfwCpGP+*347XvKtEW(6^ooiLXJ(YH4 z586WKOSdJ(=@6rXqjTwiBVItZ&DVKc; zHCT(AH*;))^t>QIJmY%FC(Rp<^}x3AZ4Bz;fCoHDcQ?{i5wrmFSRm+_YI02=!)GFv z0VkZruymt}i=~Ga-qS!Az;ZA-wJ-@3O&OblD?g%XfL>1lk{%bzM#+lGMYD%|yRO0j zK(o9G2ZBqBr1+Z*CbA+yC13V_qeGvG6NBNi-1mx7)pYuq%_XZ_DI)v~A zk}hOksCxG)+U!Gp1X218#0+G;gHJnF)ShLOnAGWFV|;$5!kHgAL8e`!%@j9ueS$o` zh50W53yV8T^m;y}I7a~WwdxK4Et>~+v*Y{Tqe!>YB}((*=GNz;l69M)rvgxQMerh% zj?O!vtnp8tA%9F0+Yz3>fHIxy3 zKXw`&yWkZ1DBx1&Wyz|XBu?{42Ln3F;VsxFbQX9jTcnu7xRS2`g9?Zf54fanr)#}3 z|Ek~n`eY~Zs)6r_MhUu5HHxdD<-WF+t%lNqvEWP&r%L5*FJW7ti=RD(SwPCzMOL%n z#Qe;RwX-K$_Hg(#3W68V(19+xE0zb&5rOu_A|RpcZUsm&h?^K}*R}A{Nn2_Vs8#8w zNl-@O3MHkw1EU;H12Gwz$AnS3UWJ^1q^F5fP+pk)0dvBNKCrT)2kl>?+NOb5GI$Td zs}K1}QOILlBwnnFUoc_F-oVtY>~-JT8_3dg%%NKd;gTy0j%s|>c?{3%#QWMDWOeSk zXI5uX{8Fq7&jXpCar1;V!nto$au^j`%*~1iIIxF5;Sh;Jb1!4}!LE6N^14G}x?RA( z7xr*bE;R}o^Tj?-!6 z0xnmj&eYm*8X%@W4cs zkc=#OcJpXG1~b#<9kmrJ|DfzoTs`cP7b#$niGfq>XwYnad_!)(bzQQm(l0@Q;IbuB z`G%vAJaQ7;&zpbYpd+@d1IGOd^@(yHHJE3jOZ?@*l&^w_)v^rE-}m-u>^AgaxvX^1 zVDwnl{)NJ{*>5h{$UAL#e>VmzzHL0GFjYfgJl3_^-b13%5Rpm@dSRIDOq3;k$WghswE zP-VXCTCOaE@7M=6m0&k;8#0wi;pI_5WuJOr%7%UlS$E>UepX8*Z$p$P#6SQ-41<0x z5HcWbC^b+DAragZm_>zd>@GO*W`SdhmAfOxjABhz^txq#YpB35WJ#o80%FIKqUQSyYx0R@LjR8r0pvm%e6WcZYZ1+s(_2uk=b>m3sND^~LCeXc}UV~u~pYm1-4?Xf+%HU(5M z|7c!%^V(>{ofCoc%-6$Q)3psw(eWBs(WL=)Y`((+J&C*96pwf54L%HAK|?(!SQ@HG z7Y0+bb$TBuvLcrj%X4!~G}QV9MVP>DT~%hd+9@%N zx@*P4g#2#}S7d|jeNCE_U1ArS9(#CJyy;E_(v18O*eIY5gho|D=?AYk5-(VTaY)Sp zNNKQ+XYqf-{m%`j4!l{ncQ~Ada69e**I>`9QrRf`#Jaza{>goXXZgs>MU4OcjD7L% zI~t##fjMeLbF~&;pHI@}LSkfR?7j{;s%~K7{#8<6c!tDg3g9eq=OcaNcRt9Seg7qn zg*fXwt0PX6cjtOH4LspD-=$m-VA{iiXf#R;x6J`XrklB`7SBU(&34=DVB+ zVSf+x1?Z>gM7!vAAnIC`;ZlUQr<9%TP%_i`*4ahsv%YQrMn3`w zNupyis@y=m;~i0T>~MrN{HyahOW|CpsHaMm++^{e^_NpPk#O2xE2qKo0am6s}xHlsC=?_3@Z7bpvZsHnIzq5d#W$=;e7V zZ9Q(0g^n4dGH6w^XsV_G6_>js9Pu<(xGX4Qi$l=tuoZPEs>tg;=2Z!5EcWqoVHi!_ z#6)#wB|0vD9Ig`z4JJl5`nU6qJ4DG?gq^zX^O%<9MX1bK|IVR;PuynWV%nIU$OKNZ zDC^(|Q^@Ld7bWx9>(Q<8~3ytrYFZ|IgO!yKA ztj#HKjfO+;;|LMinLKdJpVeBDXWV6x(^PIcn05johzxyah2t3K4Yvz5qV*}S1NL(wl^LoAk_ z%Ftxdo)zHs-X!Z-pp`f$DvQ<>tou8o|{<3W5}$hKzAIP>8i;zEYRfX z;@_g1;m(=evQt75S#e3gg@yMkayvDjg}juqrnXi_pv3Sj{`3J&ZC7MXSAxxgtmeVQ z=(;xlwhZ0Hch~SuUuxsX#auqHH#zFxYG#_mUmRRm93B_+P;gvUx?mGhz+z4!`8jR_ z5|_b63P{itcQ&6W(Ev1Ug@uf|vGhXuc%2_fA8*GX>gc_ZlfxoGO>kjHe&m+Df2Y$& zrOIQEYUe249rzHu$(24OijBH9KeL$4HQ=4KkuB2Ov=n%+1y0lwGE0%X!y9wD2xscGol>@#hPJ~gWzjZ-ZOo6&QBpuwpv3=~$ zFe{IeVdjp@E>g^tXDEs!noe-!6TfpXhc)obfd=OO&FddmwQDr&W} z3SuO;dX{1x4cYD zKg1it!rF$Em){gt-B!aZ1}7ZesZu@z@;EVObU@)XEK$Mlq7BRER;GvypG*+>4zo*g z?wE2ofE}Xhbl3{g&L~yn?n%g*5Ppo_SC-t6ZI$o>Fj{hyLM`nJ#%Ew|Ayk)%bEkYO z$|T|Rlwgj|!zmLEN8vV93Ib(nrw(A!+pNzb*{aU6K*!BRI=s}JL>5xA?woA0l z^xUJskMff#BN_p4iFI&sb8^OanbyOiLa%qQZ}1KOepIGl4B!|0WYAuS8gMj;pO@c0 z?G`*lnoly06%}Qwnlqa)mYE&Z3@yQ%%1%Gg_#?pNGr!YYy@E z5g~Q}G43BN;64uhsEFpEE*;2AJ@=MjSaAa!g}d2^XVERC*QQ|(aL?=!y4z6PdKnp%owh**8`EISpsOpg;$@)8p!_&&9;Hz-xo>&%=aFM2T{B- zLP&&ScjeX^f)812h6fDQ;WC7>?DgC_m8A`Y%Zfqo(N7ExvM7!Zwn^oppi6E*=hc?Q z{5yHM1Ww=O-@#SVbG6>dnRWy+R7=4QkS9b1`H?i#lM_5;)aGxo-KAQrby>c11vz0P zvnEN*8j3<+RsL%W4klNoHlLUDDF7o!a}_U%*f$q#aGjvOTK#d;gnNm)25kvTp9fOo z&6{^Q(1WBVQ11qPBQ52+0%~QbZ{E-{L0?h;t+<+61sMSHBpBc)FwO1hk)`%vIi6J?DhN) zbr$Vfk{9DAl>qEt#pz*Tc)mpKywisSh|R~Sp%W5;g{-3RW#!@$;z(gC_!nmnK6FD$ zz!P3vVmDH%kEa^vMGQ{tc|qL{UHs{Ik}iiy!V9?$Ykgl;q0jxZb?yw!Xu8CagmB|R z(jp>ExIvSkr3*RO$5pUdh!eOv96sQvQ(5Q4Xw1s-$}Q`tM}93U_Cg8eqp0-%1wt zBu})DX1i=UQO-_FrpKR8D=$+uEXG(A!{XQ}e8iqCqr8b6Q0hS^dN8xF3qs5DYeb}59EdE($JmQyx{m>Z~BmIuHg~EbCBxI zXgHclT~W58!+Y0b9v`Um-JbBaa;HUR3K&f2mv6H8B2V1?Wv9O`IIusPsA!;`+y-Fz z&#BsgkICxY;*^-h*^}hE`7K$p0o>tT-Z10%Z*a$cRUc z(NSzX3CQw>zWlN&m;@t7mz*4jVe*Z>mX|AS@hwFv9ViCQ^$@(|YZS-6!%ftNl9R?U zkR<&lIEsBaRBIm&!miU>;M@uFuvw@RzHp1rFIyXg48va}(D%yr2^t8;G^$pbIE=#F zlMi=^nA=$JtZR-9B1s_PLLi{?v|w*L6Dx3p}H_oh;Ol@-V4+Z9>*0d*A3c% z-}zM{WJo_ta1@a7jOkuwjN2)heOg0_B1wf#2}&^}6qnRfCrO%%)f<_@pZbFw;M&Dh zI4*!xH(?9+n4|!)B#8@SeFvxTN-TDGx*4qv?j>H3xYB}laY%IvpU6lkz22d^i!EXm zz$t)z@a(zAL}=k=ix=AH#G2v_3f^cQt_ByrcfvZ!K|D_3`++;bB#&{W+5fhq!D0OdFYbRUg|6qZhd84qBKBgtdp=fJ}@Q}WR`TmDva zy(NR{7Q76&=WVbAhrBfnV){NkRV-u|$%wbx>B_I8AMwLO zW9i?!g=|%gvj37l6v4ohrmj@CVtU1b$obrNVsB(a@^^UOCWuCIsMCj;_)Mi03+SyG z*m+6e7m)o}q8xfO16Jj4r~!-tS7e;~$PHb|o%{nnA!|T~IjWANsRkM0Dqa;I8Kgj$ z*!TWJ-K3e#NsMHk(haHnWy)KoF>K1AaVJEF0fFd&ry^(0h!tLtge?xdx^H+Xj35fG zK6$3`*IecdR7r3y*lJ+l%uNumeEaSvTgqal7>;iH{AgZfWMYzB*iSO5|IlogTYAPj zRjhr2n6{m!gKns;7cTZL7|&4yL$K4r7l?T(V|=5@j$&;C-TiPzKNAdlJniB^1IXK- z!XVwSl3Hl&)uz}jS>P{5RcHv%pkvM40ap>!G<81N-G?^Xz}ZlMljepS=Wv_>gyO&K zn}x|!zydg_lvsksOSCtZ#S`ZXJT9EW`?yt__JrVYASJ&RHuUW8W=?2!zLFbsu1y|bxE%VHZE#A^7} z{4#$rRSKG60C$7#xF zuWK<2NY17t+d}z_>291H4kt~LX0@Gf0xlM#3|2`OsjZVEy;3T$JUU|@X*r*+INrM4 zBwL0x#{qEcE?S{OLo=X$jIv?T4SqK!3pIxLcv@hM)TX>-bN>MCL~OQ^@T7vtmT%CT z2j48NLW_`tV4i=&atJ8@t@Lcdv*G@*h_(&bCvgnh-W06Xh|6GmnGr#x_7z>ABtkrz z|B{t94Ea5i1vFK%uO?iVuk8J-V}{0V2k!@``uz~Jvgeo-`s14hZWV_{WoE0k2Tj|| z>S9n>fz)U!v?v+teOc3C_s1%NP{XuiffChVEk|0*;Z1Fks>H7@sHp{3W)n?z;KeD{ zN5b-0SZ^u`6E6{8Q^e0qZpwp#0zvF>-U%86!PmLkOttskLEI3js*xQ*z_gCKaT*Nv z2WVu6EPOi(Tf+)AXAn=Re&Ta@%MYE4dejwd=M=~T@!hk`F_QYX{Tny+DI1-mwKA0| z^VHsyCSYu`vT!VCq-AVR#U7}u-~*Vhm#E)dgyB`-PY0wp$Ds*j<{cRSt6wGoDltj^ zU(IPD6eL@>q_6~-sgmMIh%hyK&;54zrk5H9>Z4U7&{DDb7pVw+B&(82<5###M+V&f z2v}THZZ$Q%d3G)%3N6YF1$nY!QC7J$t)}!<-q8E>{M->sY${hf&Sk?<^TMLQH4=51 z64)55ZN?z&QB11e>mSN~bZG``RZ#2|D`AB=WTn!uxY)Wi_>ucc7aM;x7*6`U&KCpUxOHmx^`_sLQI$ zgnJ$R6X#ba>vm#*ZZxk`uzA3>f^u zfgo{M8}dG(CX~p|D=fml?}k&_bvA++5kt)NE@(PfwYiY5SXHxt0$<#QlXYb z`{-85*sOus&V{r3o?vOs2cVJWU2DFDZ9JEhA_Ertkz3F!^AautR`i10My+?T4m(nVGLrs0yG&8VHNAy!>3_PA)W^r z9^>rT@K~rOFmXRiV#t2hg-5BK!C76JVms2x)v9tCQL(L7SG#qPjayzNY0jlAwT9D1 z{DqU9MJj;)0tSITHgP_*l1pZCXexl_K9-M(1ora7DuWQ0I&V7 z(N6(CVQBWAtE`r_P8P<8h_O7rCS4?I-~>yVX8(2pK8GTdT`>+8?$H6@|AnR12j35) zO-8{E1t{*tu!f_sA~?vRPrxyf3|*l)R__TyCpnmK*jW+TV{e*a-&YEXinshFSws805VF08oBr<@)JSC@fj&OUFggLws;NcGkI0?kcx%C|l1r!oqK z7^0x43q+w%WY8Ed?|WaEWnE(_%FXN(>OpW@-&{;B21IC<>rd6P?ndwfpnKXIJgBOs zXTXqR5aVDDTUpG3Y@&fFIC1l5e@_^UY@+E?Ss?*-vfDQ$MVj3K1327>tkXB{!1BirX4h9+dKDubRj_VC#J*t|AzaY8xDsEzBEzPQcmR4 z=V-9?T3jwhO^yy6Oql8Oxw@@X``@KVL+>GDH}q=^o$YA`>b1HjOUU|g|HCWYZX6y1 zd7PcCbiI+i41%QS3vOdj8pC%6@suawz~RgmAj6h|8#mA8+JzS0i7>E0_%}1D>f)?I zDCns%5HMyJkF;}nzb|A_S(gKJff_Wubfnw4G6aMICf+p(65i8Pt1zd@C?=jG@0M)^ z5}HJI*7-Zqn9~t9a_Fxy(4E;h$oQ+3`kQV9TyWGPEz))~$v#hi&YUtDm-?=&Orc;w*D#bGfxaSbZsK{9qiY=8_iuiWC~V z>M@w>;Gnxxc4J}5<~3_)8}CEA6Bt6sW59A>Ur!ct70yYvV3p7d*CQjbyqXHCQ%j%k z%xU!RrSHZkivvk1i<}&k@-HHX@-)-$GN)&a?KQab!5BqNQf9TeSTr1pHeWrE3FEuM zpI!7LWd^^xqKZW4L+=p^kcQ5@Ryp5tdn8YIa zSD%L%V8>ZAGR^;izgZVt_%O;YZ!6*y>ewf~@c6@DrYILHmU})q>LRr)CqVFARs$G|M)|2FRbW@S8ynGu41CLcU#Z2y`U7z8%!5 zo!&fOfE23zq8xw`PRTguU@fxRx?BO>-XhnOp-@E+0)PF93Ous@vo^TnCuq)Rp2pyY z&v4QUaBfWtK15XSLbX%#Ljn^8o?D?tQ`BWtSIRMa%m9X78okBH^hQH^Y|h=YuVq%K z;vs~}x<D3lgLQA|L8NZzO;Rm5w znzeP@ECjag9;Z^gG(Mu(QPdAMc&^=F57N+So1(vAFO_PKHgC1yi-#3PG4QXI0iSzzT(bp0R0>K$YgK)91Ir`ABybHVbnfd@kI zm17WXuZiCefCWX}kP5V=r=(TE&~swUVT&vec+2Be6u%}xiPhiDwJ$iIUMS*d3z1B3 zEXmy(JS^5~3X0)SLkG+bRW$j$3SGdP9!`TMj$jyJU-U_>`Zs$tNB}8G@^?uswIVhzhV+ zXFoV%zy)8Gtu#PWv*YZAj)w~vs{hgp&krA{lPUg%9uYwhE|djgUkB0l56ia z^Td0g4pYtG>EsUR^hVBaIEeg;1CmmyX}!nqnFbM#POo%*)%|t5prBLRrR^uL7;B$b z-RGM)a*c`p;=nhv-r>L0j1QyJ27{j0!xw~T4;<;Qj(B}E4q-3w?`5ubd}pE#fq=?a z@FvVvcME-p(x8<1tg;z%0u{)w;j+dUr}#}ES+#ncawt?q5HpJf7c+ysGZO9X*A2S4 zd(*oMCv(xE<<1??3`qs&aWd090iY)i)|;MSyua#P1}K0X{4dOwlN^51Q_!2$xOru~ zcHtMNf8{v-kjvtt>+wv@68ocBz%X;Y+W5RVm6EGF%Z*1=wuccGQj}AQeKjVX=#eG+ zT!pc0G)gMfQmm_d*?>>}D4X+-4)Rwul5fz2;^Jcs&TUw5>ilJvV)`nsyo#cdV_AOHB{u&0e=IYqb#I*5mmc*s2X;^ z_XE9>3@JbiiqjfmPPc{|W%4ll0NFc8ww`(?Cu8h+!1HX8BYEPb=19^ay9p}I`Ys1_ zvtR9CYcevGhGSeW{!_pu`YJ_NyAtb^mu6O#Jax)cjs}nrA$y5`$Az=_-k<2WJ^kGD z-opbL*?GnCdt|N`_Fo&0STvBac%t)1LTj#UtynKPj*b z+oH$sJ!q*+s%_Qg|AzaY8x93{(CghMAJc#K8P5+l_}#n6nccmC`x`UL|LOxZ7}1VZ z24Jk$Q>yXs+|${)h4n7eYX`oy{=n3JcF38ta9IvdAPB}z_nlt2ljThlfwOkSMtp!O5SCdtCno*MO1BQ&;2OZsRe0-uh{kp0J0!IT3(wahh7looxu zMS^NgD4t7`B#|Wf*_4Lg<}`=Q?z`<)s~?ORIbLGG!A@l)cf$mIH?xjXh>Y`64>{nY z%Mx`qQ;R3W1Cm=Ocnx=#k6|~uv*hSCtxw7t!%R!B%W74KIAhf#xY*1`9D=QLGipig z=akCvql~jn;C7%e-Ep|ev)_y*n^FF0p$#|fK^Fa743ca+Tv%n?yUuy@YQn8x`Z5%& zaq9EDGDlG~0K=vJ?p5L>@{V9a<;4DT2S4acdwX<_<8;>vfsm0N);RIO@nJR+ews{M=r{fQE9}SDHg@}sbRV#8tXlqH=Ljs#9 zI~ouewPI}xrr5t5!=0MhUX#^BfjgF2pm5aY@J=FIV$oj0e3joI^pWEpdcY3_=w;o@ z%KMJn^u*)_OnO?K2kW`U%*~1GoNp6*(y9)9yPiE{fK0=O#m1Itrrw(19386#klrlR z;eWZeIX#?)hDmSv2g+#~!}fB7(6T(h7GuNz3jCDKu{<3gKH7}}=IocsTPA2Y?p_c6 zoAb+2KODGmwIrd(*@y<-CPuJ`+b+~45zA@I(MhvnRa{(wuM5`}!q=U;KZTM|e)nf$@e6dlGvxndU(7fJ-ee>vSCJHN>{W7mPHn{*zMOpq+95s zm)YXRUW?O+3vhFhLN1g<5bsC8hmeK(LpAKytLkuSDqw_7f>8&hCiJJ;s=e0Y2oE>O z6bqF==k#Jf&RdBkLE1&U6o8`|1zrD%>SgyR7J^*TTIr#FFfV?DgfHDdo-K97LTMl$?Rafkm6o`)HGIMWD91?dQ5>WEMV&7Gqsy z6RWLu+sevEuL#<`v9u#MYp=4mt8RYh$ow;wARYW|t-9b5L|gO4y!c}Fqu<~UD*M1T zs#tC_(HzYdiz8qbrW9H9m@XVzjoncS5fOvUi%wC)pKD{b=*Lu}oqaYZZ*0mwE@tS7 zdN2QtTEo=x`+SVODCeMqFY=={A-#qB6vA}eTLA|dU~2znh|)(a=SY<=y<{d%U4}~c zg(TIeBUmAYX4x99`>&C70`Cjs_7q_C(`G?8JMyaQVxE2BLb(A?&5q&lWRY6(_8z*K2EB5Y_0yIL!j4^N;ula&`G&kFZ0-sGvEmuxO7l$i= zOAxb3xTNzC2$9@9ND?F=m=zCwRzVFn1VyiO@(sXG>`%BY?1c zFE6SEX1mCvmgsEh+j{gtcgVFqsx-`@Suu*iTrUeqoV%7Zg!i|q^_xAa>IuVbGVr); zcXT44C)S@#w4!a34`cbLKc?}co+GVZ5UUZb`On+uF2`%ghKP5NSTN?uUwdhJnTZ|R zJMw+Q5N=VcfiKKpVP0y)2$+u!{|c-|{)Ko|BEb$y9_U=X+fjCr^=VNea!AeIX2j%j z0%1@+iIh$OOdL-J0zN1RNNbSIDl$53~3AQI~5XG_|d!{lKREzGJZYkH7=B=4~yA&NXw?n=+5 zq4f9bdr2gt^LPE~*}QcgehF$H#cD0qwl5#D(B{BWjBHn~tkxUcLfInrO3QL&haDBd z1cAxdos_dcfz??k+7hs;;o7`v2Fo^f9{Br-kd_U zMT2^xNLD;3$s18ju#31=x%xHcQ;v5#Uz`DuV%2-D9gl@aiRvWu7k_fP9=<;ds8s*Z znJnx{(O`Ly+7mYK0x<#GGdmk=Jh;KRzZ#4p2chO8yXlSTt56@0oiTp;wP9XRdG;pI zgM5NoGqa@31ECJE#3DI4FIpx%6n7JsoxbtsaIK#uNtjL7@etVD9&SQyW%d6|8cWT$ zL5|%>JrBm3Gy5oaGS<;@-hE8Jh`3ajk!&e`{MY+%fyP7zOiD!Gf1|GX5Lg$n9&}E5 z=Um|>p1@q!&6YGymAX%^x~h=Yl!UP{4|y}Ke>}R@NmqJxmu~XuYQY>W z15WJjrE%BbMyRjof0EPoDuKEmQ{#q7J=X*~x@qwoU~z+A5^KRuTffmGx;`MoBLpzi z&@(u0dV6T%=?kPzxX$?|b^6!<*xX^?qfRGl0@&I#2X-*%Rls*KKU@(qEu|OS1_cD@^NtNt zN3_`G?h!XvsY}4*&`9J(;b>ZZzzgCJfLykFGYD=E$cZVJ)3D7L?3;Twf>%v<^6 zU0cJYpSh5uKxAZbkX7Wz)nR3s>PMXUXnuc5={9J9pfM4V_>o@H&vUk0|L{_)&}Ty) ze|XH|Q~MT5Tc%ht(I*G?7N_)UJgfg0he)7vyfKQ{edTY?4UoX!M)@uRcy6a z298Cfmir3St<`4F9y}Ui#eq^E6QBRkE0tHa3yr8B4^h8ODR>yW5Es)7M1&b2(WCfz zAuHN#Ipts^x=0KUrPAI=Vq{1ca?KCvfn6op{bU$%U#6So4?5*|A5*gh#-cPJCwckd z{-J1l3B(lHP8*V;s|(6H@t=#sVF4IaM`zGJW&4kgYP?jzHS)tamXD)!L=C5v6JlV( z9f@8LZ04rGGd|%GuDw1BC#c;iern&qWuo|)XW;{~$0qi1jXvtNowALM1U%w2G|!{= z=8a!uL;3)3@Am_!N<={pC}p3C-cDi#9a#WzLk}G_xunvKvdf!J6z+D#gLn=Pd`IuE zR-mQ@MyFV+&I*BmoMd_QHnA+yBNWNQZ>hC6HN5(U_ib;*F^9TWyY?kUN-Ay6xY~l> zzTa}+e3}6ODLFjN1LmL9@N#7&%}FY6=%LEo2XM)K=i3)E!>{2hWGK~em0y4!UQ@3V z8k+ao*8>oWOAbU?-69ssaEd+$3&mfh;Ox0IaQVGTn>t1R- z>15DWB_7LV<-?4t!QmWpLS7$DNGJD5L!Iz^>BMsn$GAM!)&MDG{;4l_8Nm>^QQ>~$ zKGU=JK9-REGv?JpO7fX)g{(OvaeA{oi0v&Nq?C#_$BI61@8!?1)ona-F)=~303QcV zXTIr&mx?S47VNcU-)-U7VFulcRDNtBL}7~!z^G|_akd~vWk`%9Zs%7U_ZR7SMq0&?L=iB$s z^ufgj$cDLs1QQdxrjvqSMD`o%kSTI5QdiT0tswDmut=t>ziwtOjqzM{^5SY?Tu}9_ zyvdmyt^qy_YmmpG4ym1mToJlaK-|;?=d){vrKgyiCl&}`lPm*$1>-7AKqx1W3`NS{ zZ&qY*=cvr*Fm25(WX4+219<^zKky(+e-MDo_67D8w z7Yp~)3{V#yN_%ibCAuG)!TzCU6tC|^xIrYj!sE%5RthF`tA+u}-(vLAbr7#hj_=FX zVK4T)9&p+7o=p4l`1F4)cZBk{-&rkvKYuOAfx@)93ZNl}8>z)X6u4Ys{z@({VPZ3ee{yfDXz z6-mr1Y0=xt@!#PgiJT6=PY;p4SO2&-wsA7cL3l}}5G$Odqvz9`&xNvA#oG9rFg7JS zKdJJG;sXeHxdED$^a|G&9hwDA9uortBVR~vclO(_WXwDe;Re@?Q?y_g^r)z;Chi<6 zC^Z6bWNgT*yX%*F#jbH+OYA3)QG2(U+!4q05OaUwM`IMavw)rNi^q8aPJv-myt;l3 zH6D+W1pc?@aBaZSYYRO4MqZ_e9884zIgfUDYxMEG!&-OCPiAUs5a)Vq=1WXuzIgl{ zBN@k>TK4}p-2dEgLPr4E<#F8>QysJ<6(BuXo>IQjrF<%r?F@NKf(3l5az2BP`U`8) zPgsLu`6ev_;iFZY0riW@6GiW6d@kcN2CP5w(A!_syAWw~2SWqG$E{Z>+Aw$M)7}01 zA{NZVMr~uqp|T{sehH8I28(6Nalz>~?QF+}sWHh)07OGhOa#>uiS(3R>BmOgFJWNk zW1n}tK4D-029zF*MR^kjhNQmyo{!nfp&dAGbJr$1hgpd z>SsM8{-{&SqgJYYg^8ab;G#z^kASSb!9zkME zy%qwue--tkG1s5x;P=t9+PBRX%qBEsj`?Rct8;>@&7tImz;TuoI-Rp$X!HiWA@rxr z&s52riVs!1kZ^|BXhP48kmk*wO6)_Y0hNh}N|1gUP+MYQgTx+_Y<9Z))6LIJWfJVw zO*#wDc_}^@&pX_i#G6yXS|UOqI1P}N9x)~DP6u9;ROmu4(SN88QhWd>nkw9X!;avf zcSP*Uw>8v3oA3ufM(4(YK--615gOS%Jd_)%=c1nKH>J0q8<%Gnd)xsGhWy_Tq1t_X z1j!COHEO1>;4c*wE)^ z0I^EIEZhaR6)AQdb_rF3t%j;tauM5-S{o%s6jo*NJ$^Kk%fvs?6LrS=_a~Uz)y90s|j?jss|)r z+emCTL&|(V;&sG+xbe5$lpdr}RCvBLispI{03+6q#-CeD*TOFny3*vWZCQecT?e_;Dzjj~~S-oU62&25CB?&)5sqi*~kczJdHOVKfD3&e# zfO{u9Idae1Om@F@T{jGl5Qf>D>aLsT>dKrXE^VZS0Ygd^$fbQzZK}e%4g6c5Ht`Y7 z9IxzN;2PsXtSK`FP=N~~{dqaeP^ridI{LNNS4!gKYT)O*ngdw3vqKbt1U4;S)4-`T zC|gF3mG3Yru$pA-XDoscB0578vu&u(k5M*48hb=ZkccML4>+BCau!hceOsLvgngaE zMv42HWgiC&g>*w2la(F7`X~aX?RZET2SJEDAIWsHd+`<(+`^QB(PM zO>*i>Aq+ZLl?_*kH~f9Ygr9DlU#9c8?fD4hJj|8AAA+}u4}Za3ReMtg8BHXF1=>M3d+5n6eV?N1A#t+vmIEttBh7#7{1%Gf45;kM0|z5=0F{ zD_J~CDg}}M6#EvMoLu~0aru2`?Z{38pHFEoKsY%yI1%iiUSN>Q^(X23< zI{!8BO75YeCzuk%r-0zkYtRW5b zN0hXYXZqgX|AfoB(=ZUF$kZ8B&RtBy8a+EqKN)EMWaa>KGN(darbb<`ffjWv_}qoZ zllqFthKqVs+QM+ElQtLPl}U{_LjdZe_lYbw9}#dm=QX1lBAyAkuhX>sj8hzK&&rW(QRRNY zX<)?v6lPKHm9?50m20DhpJdQQ%t&ywp@<*SQP0LU8b8pbk*xL$XhHL`NpSMdF9*{I z#+JJD{SwzP-O|_|Py069=qNln9FbPe6ldI7juA?<0WVRLXht zLm_ise2TM3ufj3`KT#P)C4$)yL^NSRpf$M)8fa@Ot5UJ9$zTAoi!1ovm1nazbK!An z;9C~RT&Eo&_x>SF*(AS}>X2Qug*B@2h||L6+fy`oB~Du8M~i;*(!M{9s^HtXsFrQR zb+KV~S#s4Y{?S=_fOj&hAEo6Nj1_uCC!=mX3CcTDUTQdR!25&$Jgzv~cZ;Zun6n1h ztH#4n#R8#1rgp7|D-{b6N;3Ig6vM|eQPE&Z(+xI4DWK}a5(Ab!+LJ!xcmx-PWvE)( z@r{f%*>F)C52TT&Iop%p;rK7Y({~ae1rcuhPO8Y*JN(}oE)+#T8u+j763Ug*@0L~* zb^dhnpNXn3D{>xaABC^udR&RdRs==Kbng^kI#3bRHi!Qk?tgALBw`cW>8Xv4K{xIF z^&*%RHHw_=5-!;H9@^R)_;K*z({p=4~fOxsb~^ zaZJu7Vmk*v#yJg9P;Jnq(rP!|($ZKvx_j(8%SH1mYvQj%&<7+gC*#V}1Ya$X?fO8^ zeE-atVNqF?vq>m+ZtWb}SjF{(;Fq-DSYJjo&rbq_YTZ0=zNyjFl%Ajy0_XW}kEO2I zLrMS%;jA>s9S&TS8-1MU<;4Z}RjLCx0RY)DAZUl?&@;$`#l0nE21qtqMEv;|PEqkUR+;U&RqDqMkv#vWN@rGGt{Ywb)(8n1d3(OLC4a0w zfAElDd6EFWd}Ud4M+JBeMR8C~rgHI#Ne>^jD~;i(wPo*?h(x%K@O4HJx!|iJc07LS z3;@D!X6IN4XD4Uu*Jf*x#VsHJfA$6u4aEv5WbMRBH%JNluMdqPU%|e!~Rwg1NPMSuWPWw8&X^r#RLVlxM;{wq|juc=24-YfZax zx(#UT#&w=i@B7$oBvu|%lBGLPTpIF`EI3f1@I1I`{M;RlX`(OtddB8rGjY5aXt>~L z+Y=2;ge2bE(*&iBK=d0tsN)0Hh*8PYB2^k2V7KX9=gY&SpyE;=Pe5RJJMJj`g;5z1PD}z#=%v;4bjvHP-8q6{CDOAEoX+|%|hzi z(#Y;$9;@zyAl@;)ZuEMp`^}UbwM?lZTou)FH=NGMW#(>Gxm_knsLnS%j-`A`9Dfcv zAp9TZ5fPw_x({Fmqvb?4bw(3KTz$RV$EWU7J?^J4w=g^a{#))V1~1Zq1&KcqFxv0I z7UhX;A`ueLpC;odE4 zIV#;zgWojP@CQj=!3sLPH22v}pZ94UqQI;16{Y@M2N&dxiCjUxj@krY2XKk+0U(Mn z5ikXwUy3zioZOyXN6~l8WY2i^N=FNjyr9G-TLCbn&?j%5DK151{VCP&=31^ZZLpJu zh|b1X%3Ch+A_g(?qb8-bL#)}Pdl1QwS2SFhbe;Pmly-!gYd#RGiJIC4b)$nwd}{nq z!4%fJMyKdOI6~>jfN&F@@cPM$J4~18)F$L6ILdAtt>2sB))noJuAD|;;wWdb!)nu< zj3=&>xQ6K<(WP(dah#Yo>WFAu+4tiPJU)lYE6;)z@(jW3WISS5Wpjre(M#v}kd}v& z#=HyC5`8A~0Gj7+dStV6ub@;!dzP=HyXhbCqFH-??ku<-;RR4dJ%-gS32%>czQ2o~ zBA9D)@f{zg!2tPJ_?#7BoyT$=#n@lAP!`Agr?(FlgIxzNYZ?}LzhF&9n4cTAnGuA_ zP0k|DyZmfBdNzV>csVe$QSIHpEQJ=Kppj0wXmJKJe*Huq@IXX8id(U6F*z{yq$ph;O_L1}7=61}MoS_GSH0MwND zbJX9m{agL_Cp=0I-}I3&u@Cd=%uom*5T~JT6r#qd!g07NgG-T1Et6S~og>h}Dsw7U zS7eFW*M&i$b5`E{B;!-srZv`uE`P36h4fXY+l-0lO?OLa_vLukr+MK!GkqPkK0 zoh>#dXIyA2>B$RJUE$D9SGV>uAl8vPp&J(EJKTt(py{9=x~mcP10DD@KU-iZhwd0u z8bCJK3u6+2DWc5FS1*qeV^kxhfG{AHy{h6+1xT0q_O%J=5qP;W9oYH2f{f)gUJlGkIWBaL!rzhsA=><(tOdQE4xepnZ8_aN!tm+ITv&{Zj%B&1I z1LCU}lKFsHjhPdVc}78+V);UUe#9liLET7ZPTwT>9vS>=l_8rx-AT74{{of>V9Mq< z98TW{$tayvp*AfBMv9J)EkR{XV@%3@lE0O-a0suWht6Hf!RSjR3l`gxJ7OCK>QV6d zc0N#Ns}byc|MkZ2p8&?~N`R&|@?NeuTmRk%N z2T=v#Bh@p~u)md0F_tr@c}UX7o$c<)j|$}K>061k;&m(&mGWEG8LagH$f^=mK0rMU zv*m<@L-8eg8KaJ2MVpl_lD#)UEY|0+apR_uDZzwZG0Q~&*(x4VIcM6Q%g4DicvYxm z0dQp`NxPxy>y1VfDIa#lh9MgSrl0->3%wclt;0!c!G}N2sc9^4$P(o;zdSor8_c;{ z9$=-kT6dXz5sn8a)*TLTE%M#l7~q8WITaH13dyAi10@;qoW+4kWwkfV?c0r>Y;fGh zTT)VFvEFiDYeoZ-M%Rxv!t|ux8g$21lg7f8$w8lV#aYh!8Q=LocIPTipT}s(g5?;4 zSKr+`R}i<8KPGdx`tY8Ykv(^6=9g|Tl9pF>XhyGMf~e5QF$^3Qsc(J*CX~ke12|7; z9si<4FnROtgxVG6=3s#srKio;J4Nf-{kF-pQO-vQ*vbuno{AIbMoIfM82D4Epo#_@Z3|M2*o!qYyz(zh< zWm#NJ#CbJZb?d{I8@rX)K|ACXBGkxSf+1nvxuUBLqz8nIVcp}R8@++cMi(J6Zh~W+oE(FG>X?PUX^A0sN_OB|lP6d$bUxSJ?b|O3Ci7y~9 zz-67x!o#DDI7pL4@X4&zjL~M`qt4v>tVFs$UM#^>kfj|S<$lg}9x!|DBjnknTTt6z z;*(JI)WHt*Y{jK_tsjzY?f7W~0gj9ntVEx-vGv_EFk`=Ni7W};vHYJ6x@XfpI7)ZC$e{3nL=QcqwNhk1l);1_3F<={DqzV!h~ftuGNp0LKsI_ zFux53b)W=7T(RszmxQZeWVI{&fL@+V`Cb_jHOUM5eR0&hh`qjybvY|Owquz=mDh(_ zN_HePNKHkZR*kp`t+R+Xx~mZmNv(V(s_(t_CdX3$vFEro07To?1f4o zTW-AGvKX7I+rs{&BQ;MC@Mff|8lv#3ZCBO6hKr8=k^?fDvB4p=gu!T@T_)GFdt%=R zj?0bJD9oxbc7HkQaZG`9Ons}DAntq5io9^1yKFJQcE)Ba(n!+ug62KT- zr&l6o%|YEjYg*r(;>iK|v(>5!^WQDm!fm??uC=kZ)~N%hW(GD`Y8B?vh4ixl2)-p{ z>?%#95&}0AJ)9)swY+NTcYQ4cYF6L-6kJ@Gwzv^aJTiin z_gUT{58kMdb-?us9ob-wqXBTDm2Y19_Vf#3L%ftcdf?DTqF&`qA#2dec-*0cJJ!>d z^d5d+lr)nOKLqZaaa&-G&oz^Jb4#{DP2aHitxw=9Nv>1CkXAa6gI#cr$~wP;GnrBU_t?Ty7$V#gOK>*}K@v)PS3rhXSES3}J>!CSAhfj01V^xPa-ja| z0Um?D^~AE){>Rbm)(Y~ZB&dSa#o+xc>u{3~G_EylVy`e+VAbT=c#^t?5qo#{q5P{< zA?pN~_r|Uej!Vs~_71N;zL$ndX~Zy__tJbWEbJ4q(=rWR9cM-RJi(2S5*5PlycEZ8 zkyvWS1EMva3N<@HXdy($=C6uPBpEsOO9uq@hrD?;UbT-4p)@e3|b*BOd6l(V6G z7Br>aWP+6P-`>_bL0y~TK~8T)vdu?x0GLzdG0=`n?#iGLgo zDQu}rdfa1D+j_&K)49sX@j=V;7*}(fyF()O)tbHb=gfkxw|{XVqAyxpA?m+au4N+# zXbBOt2lv_s=W5|Hv6sSnuL#j)D0!=1Agdq)w*8eP$tp%x&bFdj4=+sp0c3n%tC0d( z%?3`IvG!l=M^okm`DzBWBJ!T-?2m1#SIi!&CDOTBf^q+A?e(04>YFjH3Uvx)k*YbF z0MQY1Tk|BivdskxYhhT%jvj_WP5h~9tuHaLO8PN!=-_;>uERKNz)H?#VP7hIYX8@ z-jAg>M0KyN-W2M==cwE!^>0A|Sc)y~p&Rg`3d9gh_(C76M+`vZm72stX*09a+>zX~ zgF+LegGVrXn!agy3znJ@#|MMCxloQYg5RuKXNgk_A6*?Fwn7ovUfT;`j8k#SoUaGV zwFEB9l#WP%C2S&B+X@^m5LI!xCnddeM|kfeh#ME4e)@v*Fc2zS!`o`JPDx|=HJT(I znv3ao$F_1u5J+8_km?6B5_8&!3w*bDgvB0doF%RuHsKB;zg0l(57E?)$iyz~0CHWm zF(@QttYzHK3-LEA}?@iu@Ik1Ar6;^=_#V z-Pb(h5Svm8fT_B#_=oU(@o@QpiV$o7sIC!&s6MjkC3yC^zo=+X)c*B`kQ)o>GtxV|=neoFVRL_#lu)u8$K0{+ zKGH9vq^D{SB~fBSRTXv+w*3$oo;1!PXrCwTQTDqA4wen<0Fz75)QEg^Hy3M$_?{EY zfKi!JRA6+9P_QnFi4^>Yg(XYD4TAGC%j#YS`f}@$Hl&}y@K7=XdKH0;GW-lZ8@K!sAR-0XzsWQ`$wU{+=|z*mhc1}ykVU;hM7Q$RzNM}B>gka$@>C8)dhQe=!%&VXfLum z9IROMRt1-cum~Fix?_%+&iFkgv6W#VoDW}YL2+XSa;JGE@AALG+aBLJKr64kVCM?l zrwpX=T8<}wl+_fD6!h#kY%)}K9sAc8j^ltC>3oB$p<2B*Q0?4cPSpdOjbEL>KzMje zsD}{DHVDS?=mt$3LdU`_nIHGOC5gW6k{orlf-2^Se-F%pRXFkA!Z8kJW}MWxpV zE%}JhW4tn8VZ*^TP4%p;Da4AR009!!n>tXjGW^E`y4H;}fO3%RrkC25bwwtR%exuu zIymx*a9?d>+i+bo(u#EJqsmdPm*h_N{((7`wAx( zEu5-!8e23%*YDmtxe<108jf=#=SRo~IiUC1Z!)Y3C1IR=7DA0irY``$ruCks!+KSz z-Q7UAYb34MXr>KgvLuMTgTyhm7U|Na7|OYe`osm*$Q2q6m!!OA`}v+!+>UPe%pfNb z4)YJbYyZ7fmof8(3=Ywg!S?>)84F4?z$$<=g!<*?*VGtq*ZWO{@Ce*zOo93Sljt-n zfb1_E8c@zytYW*G>PzjO#IDEpOLvDGUX&#_Rm|$qmhGZ{-4)SfD-*f^TlD$N@xjob zmApNvw>{?TI;^L8VD2qM>2*4j zj=seP<|;H2GH4NjB~%wsU;6e!k9yBEmq|~~??b%&l98?Ohc>I+` z*bzlWe_1c@mj>yr%$oQ1httTRbHb?O(&FbTwY!%cpvM{#(Npz4$F(K`g=;5BFol3~}W8R9iGps zjEB1sgxiP=EPG5i|KD)`bHj<36a~y*hkP#Ce==NI7ezBX+tRsm+qsKX&lQ=vZH@Br z!0CR6rvo8e?w0Om&bV0FfZJ#3VEvt;kW0BXpzy^=zZ|h}z6uq1>-e#fu$0Tzeqq6-|y2W0zA<*pBC*E#_7Y&sbf zY2=U!j_xb}4$?LKgt(_-ITMcJ-ZvI9Ar?-C;$Last>Yu$T#vS51iJ>oQ%qcO{}}iM zpL=(yAH5e~Jo;N6Si4C`_y_y;;V(f0o1DM3S4coIz>`RkoU>-5JS2y@KQ5V|8p~L{ z8CT2@5Z4108r^}I-nJ8CL0ebRy$E}Nb^j9&4OH&<1gF~5hO$nwaA0xrGDRkH{ZOnW zJ30&XuD6vHWpF2T<(NDog<`Jamycbqvi4>x@4Y{aP$UJVa*x9{%zks? z{8~&x#zV9-mMSx!H$_+L?py=ro)<|z$-=kn9mC3=sc-SdC;!cw^b}q;&R*z~BC3g# z)vrQ!4zA#BltKTevQzxNnuA@bh$-N+-I)JUR*5~{4sTk2{ z-hf0~d*Hk^3L1_=P=vSv&6)+_FH%#JE*L5cu0bU~uoXZ+ZTx91?`Z+_=sDv?M=Py; z_U*DO5rqNx46ZgEzMH&r2K5Or%3!Lo)oo}un6^M2?DMh-A@Z#(Hx$jTeXquJBoATn z545#o=3uklvro3Gwf9TvsJv2{O;h%V`VC?N+qHpjfUpTW&@i1hKM;FKg3t@{{8~F7 zRPC{Qh}mM<3$Dk?{d6PJkhK6FD(v`GdAl|^?kl3(O3`w23F>fw9IRf|u$LjBT+@VZ zSrqH_Sa9?M*$;<)Vme8T6VOIHY#q7}V0AwC>r7{r|HXY0-xHMqW>ci}jLkWKxYI=} z4K_TvFiIi)cy>mQ-^lk+4x=I2+xrg5HbFhnen;KuiCkCPEV=?V4ee} zJk25*(Ez?6@Y*6&*$0o2$KzSO_$=KayhmSg?TslRjt zcwQ{f4wMz1-880lFoHOkY=-d*h&Y*X=a^b_mS}x~14#E}k>^vLwM>bevYUrip1j%A zS^d;e(FtzD{ER7r6`^VYmLiAqh|tBSTt>h14T^^wrd!9|`bR)JxUE2vikKq9_`N}v z2@Ti5bgY=(${y?i%GB*2K}7iA%@UvF>*naz`p38#)x4JJTtsc9xB`q(>PC>q6(-QB zdFw(UTep#6g1UWUT}B*7=W{D`E2PD4DF}$oG~h3K=a1;N)S&5WYIy-Pq8Fm|dHGp1 zmbkbN1k$IABk40Jr+-OwCM}E0T}Itq ztUB*|(77-sCH9&KqmCJXZGTbf)aNV4y~|d+W2r3JmkxqZ*{8gFy2Q6`9~b|V_cNRn z5-Vd)nwgo6rln`#BBz`;6CSKBvJzXFPrBjEQ|t-jlh`kTKYSyA>5VI8@0A=0UIiEw zjF+fMr9ZCykgym%xM;AB0J_mO->s}I27Z$g?dz+LC)#B7;=acFrr$MjUk_((h2CK zWbm0&`^e40)mJWh78+rjyxR-V?MX;pHGKfiFOK$zlZurdqCwVD2y8}QO?od9I!dsY zjoK~8QpF3`tDO8GrVl(|xiH=)A6Lx6Ybh*3P?Q?E5eaZ&K2?AlW;lSeywB2N5-rCj zbf-isCrb(7@KWw_o-bb3<3yDZO&KIHKKz20N^PrPS!P)TSN)mYMKHdLa_2RX^xjeo zY%LL6SPOqoKI*F_ob=qae3L=Ns97@Ebhh^fEF8c6pAr#ftR|NPKIUumY>+Gr{VqCk zt2stIuR}txDfDSiC<&v)i&)g2WWCRrX(h zC97K_BFVOrdU22nF(&r=#UQKTS?B2aC$GTTw@)0>wr|SV}v>w4f;9OmrP4#eL^x}c8JPzl9uoSzL_V&%1)i~=2%6nQe377WAgJf8% zBid}XQcnj5>ifI|cgBf$i-mZaG$^@ND=-bca8>{hmfzkJuTGHoZw(g=oV-~{hs7N; zr9Z^j>1T?r;-2XAs)8ZISrBE%#ZZKT*V9Vt3XD_WX43GB7 z$F9_E>E)Z|Y8Q2(HC=yIRERp_0m2ZoD?6*Oc}pZuT+a5>N0 zP)+XJrPp-mjcUOI@@rI8O=a?e&x;W@=oDMSi4jfGUt&tLle@)(vJ2B#zoD7k*l@rMN&zY{Jy41Bh_=tu>j@C2FD z*ObwSn&q62LVo10R#zX0){oFvszN!_lvlxKFoL(ceW{K;`uBZO$!93wsF2jEqS79PE*R5{G~aS&A5*ClS0}wQ{BXf0oAxVW8_68X`TEWV57AxOl*1;r_7;?@d zK7-oGftZh?wHjL-IHCwOi_2|&qPf$1%0~5+CbPnMD~NW9oXgjqGXs`a8e$=)zq_8` z7<&HV`5j3qXEU&bKVbf7*Lfx54ZSZfCgx68#R0;#e?hXN9$eBOj!t|rZSCWxZLTIk zW9NLIr>Zla-bO-~kyJB~Ez)vP;Dsx>p@Ne6Pc+xI^5Vu~SDa+HXy%_f=FD-`Ek)=f z?lk<-nAmKduoNyrXcdQeJMn|3u^l`6_w?fQi4In#7eVVo#EIm@RlO8MbD8RKD;K6A zuHFALdt>7P=|y`IvikvD_5BH zpi|t9h_PL)Pn7HVfG+8{Zn1&SXw&j{cHN2Zj21+XooCpG^u3tW-FzV6L6!Mb$59pOulMC z)VNFH^dws*_T2dzvX#U}UG{ZMQNU>T??S}`hTNC2cb|S|~v*0B-EQhI_%fundJzWGa94g7~ktk;bWW9aJ zlImJ+xNeJrN>0>A~MREL90VcEj-cc#5iq)htv6_44NK?K>sbxQQkzuYc6CG zLeGV2G`I{isomBq*b6s8{9092BjzrxKKaZ^m&*^{KfOA`*M$!xuS9O}?buk#Qw0vK z6dBo+^J<-3FSAIYbJ;?NEk(Fl8_rA?K@ra)2C zYPe@&r;C{el_JGFo98ZvV>LP6ih(YBC-Tws%_KUN!QakefBTs)Y@Z$JRcV)|%(xuh zwMg0#&X?<%>vXA1Aqbx%xs`ryA=Ynl;WeSnZWr`BOADO$D7B0-S4ro`_ zj5-#KW7{taNvoiXl<$2&g_3kdZh}O)42X~{PP#jeC_O6LacyZ#y8C}PHE4Q`Q*}X^ zPQU?*$Q?1s_(|u;n)vSYWcNz*?0lbfaRT46ikx#zQaNH}yhpY+!v{@5TUZLWs)YJw z5WL+J&VB=>a#Nc_M^n14NyPbUMU@FnR@l&i}u z3t53*pNy35O*L$oO#E+iY;j8yv$aZWfhW%SjAkZVFl!~b;T-;&L;-A1&&E|0=`3o1 zaVPdF8+K`6P>}_oTzxC%??$+)Sc~MDvn%8AmlL45i=X|~2dj3iaQq}<$ zLZFufv3o^8tefkFCOJ}IVYQl+-$2JsaC&9x_w&3@bO(+aw+nTlsw3mrHtBQ;c*-%VTQ#-x~GpK zhk59dp_p$5ggLKMZ~C{d{~6%~#`&ZO-VHz==VGu6iO*c!uD@wgi?*C$mmDH`~#F*t>@>(ZQHhO+qThV+qP}nw$){8`WI#~yZMPcH!~yNbJp#w-P(~_(ZvfMvo?XZbrRK= zYJqwU!o6NMD`&c;3)ZGXV6_K{YJ+*eL}~dneLMP{whsM^m}JPt9AlxmlW}sCF}3GR_DHf6F^QH9MAXdoM4~ z5EE7k5X6=M7Mrv>&q5{P`3dSq8P)7Bp7l!BU>G)S_ULbvBu*-yfc}YP*W93F6IgF2 zS%7=je$(9JDKZE|f(LJKEFP}rPp*yXT=wv#Umj3gJ))d_c>#ASB7=B2YTW3rtz zrxY~023WE;z`K+hc^=cM!VVB zQYi284t6tFJ#VW^RB^u~C*KA*DYl*q`b3(C{}yvbae>c0i~F$-UGocyT?)E;R=1c4u%h4X$|>d=}zFo zTgYs&7&?4PYQ5u?{)}|vLDE;MJ*R$!L2%<9DSQ#G`DXkK?4WS7IDrg+MW6m%Tp_mE zh@%?h6zWbV!$~_}>s~)G{sRg*;L5(IRA+i;&{1;rjNea2*_%N@L731=ynSO666CWu1PMys% zeaY46h`D|xA=^}}Vnv*m^wp;ev;cNlh0fu%s=Rv-q}2TKn$GRfDg7J?@Q4dv>^nM~ zxM+A!pzgo!L^rEL?VS9o6@Obvkxh{a;6EYOyE`y`K*{;Qm~e4d!(_*A(Bu3k*Sa`q z&MKTAigJ7rtzS0ph;RCvq{{LFL-ot!Nmgzq5NDi|TYzHbb2_vSzw5rkf*w%k$|DPF z5#4x7!!?AX-0ifAS>ZT*e+^N;iPwB$kZd4?A!R|?j+Ta?K=;9r^*+6BfXnkQoRLk7 zwK#!WgP|oG<`sF#0Ir;lasW?is49_ACrq@Ru{o@8_c1^lYJwv?t3+Mny}m$s>Kkbf zi_AVrihDd{a=gj@PY21k%xAunB>x>Y3*F5WO7fQhX)|ZU7ON);TsB8$PLIh!JFK^{ zh@=6B7MJXci`Kefcj(smC{*{$r_Ks#7%Y-Y*+f`FnCDw59g+Yb{Ko=2*kJXV>D*-Y z>B@+GC@LA3mXlqV3P;?W1?@|PRX?X_t6uQwff zo1Hmx2|3bs9c|`s5!&1*k}I;1+8?l{)P?;3Ui6yLL)zE4o4;khT`KR{9us0|A-3`2 zHtS(N2i87uF`gg-EHIQ5KIWudToNV}y4pD*YX@SVAokpe*qP-bk(oihGl>jVbR-c9 z<|HF1<#*Dte2dcgJm?K_uNt6ZFP`Tcwc=J|JXyUckGM$TCLkX3M~i~S*3T=+5GyNh_QB|);ihfn;%+|N!O1rYYV4sO!9}smB8%}SdD!A zvk*?eqr1KtYAz+rw^IzKpl;!nwe*!GOlh1)L?HdmTFuBks+ODy@Px&Jep{sGE$Uxe zQQC-R$QiF1$?bwpS%fVGqtzcazYX~cGveM4yZm0Ni2tqOO0^r8{+chj<4Qcmo#2k3 z&5Kq)fjC>6!V*`mX13CC5d^w9-<44*+S#36S5OIdx&7a8|8v6`kX=+S3X0WPtMp4u z51*j(oPMYk3kxN`8hji}Q;4G)a(awid#RuQGO?r9K4k5ZLb}S~Y=n&-FclmgojcjW zi((AJm(RW!U@5B45;ykc?H4fqGL9OKA8j5Gl&?DwQ^E2)YnXECU;AAft7Wwz%?hGJPpqlqRX z_Teo!?v=3pfq!~zCsmh(WpJA>gwb{Oq{2XLClVg2)Ae>R5$=lsmMA@EgN!8@B~5Z zkHF5)2T+@*4M|vEd!(ApYe!E;GIEi}<|v9DX9fPerB>6@^*oq$dgE>D=@!!(?3AQB zIv!H?ogBA%PXurFp48<#8L(&Yt}#7tU>N8PU3uzos7#2V z`jcXE1eF)o@W*aOrVzf(gqa@z<JLLeD_@?>np&~O z>h+;fogYPUOG`B)oO1Yrp0)JW^SR?Q1VRDsT13DZP6 zHneQtmMhcn%l4%kV)EJ#QSL}1Tej(B=di20=tfSB4gs)m0)1~pOJF52&oNOZuAJKU zQ1uj@lxg3C-$J9`K)^Xb53rZS1F|nJwy0T|wr2E{SyDB$jr<+n?{NVZ47&n@ROqBs z4`A9|vA*b21_}fU_Yt_!ptLe$#4W8nChH$3dB7{c9QzS2{If7SjvBAA&KC#RY(Ieq zl^p2so%`I@aqHpyV=89~Xx%?`C~`CU=Dq##Vyt8WB zTr{O07X(e2K!<}qhSiIP=2tdjI?*!ooOlw1nL}V}^!|FrYAu}62WInySI*^9)~2V^ zOYekJlg#1l(X63?u#*2W+mfJL9pM6CB#YC+!m2gG0YG%k@x0XnKLqCtoBVayU(c6w zXrw`AGh|~3#7*=V8neT%} zirvM-ghibb0Uk!xP}{rJs(3AEMukjOXRr0yd>t8(hY*)?%UTX9E6M|;y^HE`IJ_&R)BKyo%TdEl)K4a?~DCh$9X{eJ$ksZfm+l^ z_tRTf;0A!b*hI>&1R@g&@{K~zb&+(dWaEn=_`y~?jnp4kM(NCXxAhvgL-Wmy2>}u- zGv)n|gI-$eEAN$yt$Ci|p=;d#ZsR$Y~vl@W<0|Sqj*!!fO|rTy_x453-52%i$bKhOs$uJbxIj zN)x}DKU|5G76BBqRcvtk10v45&sH%Qeu;M^*K0d@5mTGch%?>f;IMPRGI)bn{?Z*6e_D7S%T0ofmYuCQPme>DWVZ~Hl*n#4=Fi@&9G`ddoA zU=Ei9jg2lPT7pGBS>P5)P!MB_;a8}7ymyDJUY$dBzB7DZ&`x{B#I@ZR$VdeBl}l83 zzfdI?QRd*3gbd~yFfVcRZDMgACcPAr==WH$8ivMJ%9^Xpw@qYz88i42I?!TH3*k?-@U+p2^yov( z&;TPIVBzvztmPU6L#9|BCu%vtD8LV8jD*pCp*UxhyP4iBX6G2giGVJj5{l&v?g|lYOTn?G^GEOLi;`@+5JkdouqtLsF z-LHw8(OB~DL5-KT__zirR28pmo~lFI69IeMCh7Kx&d;#&K~}CiHmsYL z0?+7OfG-0geoMKM`5f?5xK;(umXPnvB!5vAcZvL;F;(!zC;(fl;QdJ`#6i8p0U<4>q zHYT3Zmr361*;Jp2HyN2X)v`fKmUQsO;3sC;x9t6knjYk#@rbv4JB~Cnq@=`dVM88+JV{(2 zZfN;CcwLl3uA8XZGvB>)z_)o*ICY9+F`5yc+&N!Y?f9t(u6S)wAryF7;d1V=0v7>6 zXFy|BY_jn_ln((|cDG~dugl{-W z8BfPk`REYKBZb4`AQLG4vtav5R%RR*>JXQ!ux_BXwWRN30DlD+#EeP73gye`2d5Fe z7H;3{BY| z)`ovU*7e0fqWKCzh$z#j{E`KUF*MDv@lQ4v@!gDy2hJ$u9qpVvOPAIi4`QnwkIF01ONUzT7`H$sms@@JR?B_n6i#x&rJy5CiZPWq&e$Xl$k%&cwWWH4$2 zFC4CMrH)k1!1fj7)o_CggJt%2f)E&96N$&%QfIf9S3*}rN^tJ=0JI&!_+b7TnecfFhf@8vm=mi$pTKBPEfGs)FrjygxF{q^pc3gc2BB z`n3*J4a`1nfDg*R8BPS z^)IdX*EoLD?cYO>>d?%V7PzPy4KO0RA?a=Gfj{JEbd4r#nh+={lwX1)qq$HiNpr}* zk3wR0K5*h#$if+PFB^i*%*Kzmf>zCw;gu29&`-XK#r*Lly|^|cH|K*=*rOs%9?A&w zdbfcVk%`re`%aNzO4??fK;8?wVQgnUdM{{#pPnjF>t7f341duG7|Qy^N1pWg7i6MM z@@yNVIIxxskFf96z+56+bwG~8Y`1gm`o!4m{Qg`}d@d00L8*+GvROCLEM?KxTs})w zD4KfCLzgkcbO#x!O0tWLntgL8V4bBS1SC2#3Q2sgMU4$6a~kJbLPYG#r6d5>#@~x0 zlG~|0nG;;bBp!YKx#1)1O~qgNknGfizfD=sC7D!3)qjf}V}+Wi7@( zR17IZeyyfNuqgt#%+~pcu@x+z?emqF6oq%^1xpxi$c~=Mw5f7TUO~+TfKPLVOTc1c;YiUn$75K>TBDh<%Cyxuc zUwwqY%BS8I6xQWdPxCJcTsX;l(T!lWF`O6DF(y}f&yBc)Dl|kk#S87|^a#Ii|vj8Qd?Ql6(iSYA}V9>5W5K_P+w>!>JW4}gs z^rFtmd|mXpcCGx7wygvPCf>lD@4b!^WjG4|ZfR~#W1bxwODl+M0pZGox|>dv{b6CH zQTd&z|7xqW0GqVvw&568XBRelmecLyV?iHUIQUZb_Us+s1r_3*^1~SYH2?S^SGJOH zKBeta2oB#S;Y_!%AD4c({#PclWrY4SPg>p*TF^8QNT{7k$blwdp1)0;BPZddyS%m; zw63Fo7j=M}%8{5d9P_6PD|83oG)N^Brox~JEJSU?0EchEhHUreBrPrz!K@yO!n~R$ zgF-MGXD1wfB0(t=Vk3dYRiuWM+qnhLRvBm4W>v#u*1rd!6zEY}WapI9r37!g&!ad8 zFb_=Mc9@`-N2*0e5fM{r^unL1W6FmGd!B>#YsCjWQCVRlQ!Ct*$AgjMwBC5#9@6i_ zmhsbxHRs#G>kD1|bnO{fm84rh%Ew!~w`KGYq-hO~D4X7bM7(g*@;-F{x4}q7v)~e5 zq%0n*5w}oc2CevAY)Vy=RvLub$W8?$oZ>fNB1YcTY(Eywldao@+jVRN+*YW?c9?s- zl2KJq`9b!b2~ z!Bw2W7JakmlW41J?1~GgUST9GAEylwERNC-A--j9vGPOyzc!pV15J3S`HM9?X>@<= zn~5Ae(&WEtO~Z}@<80S1mwT@FU@{XK0~W;rP9(eGQc;ip8}5H@I0;CsKklOg{JI}S zebYN*f{Pa7)2aQ5r0W<(WCz!45g1n{kB?($foL5|g6H!Mb{H%8_8-=*J(df3O^{_p zw(_1>#pleVr1`rh?tiMWIQZ$N=x2%7T9C*dTB zn?7To8Y&F5JWpRZCd_|2%O?GOlJvyLo*P&(~wl>vM9oy1Rc1yWq9&fc+oWEG6N{l3Il+0%Gl2d-lYZT2F zz*7-5i$9vKtT!Nc!c|$s_e9MA!j{_uk0cc?LxGvhaIO1^yI3J4Yz$9ZNST!a?5e61(}Yl-fhA7WRG_Z@U)lbWWR2O>ysq-C73W0BC4oLJ=| zGz7m?NGa2}JQzvZgOZ36XpBja4o6PcQFE18djKs=Wg7BaMi_x-6lF%d#kYq1GEpmw z-+AKvNVfchYJo_-8{4Y?t>OlV3poyFE(C)#T8i)&@KeK0ZFqv ziaI(=59~p9UYqAD%Ier}-N+Q{=1n{g<%vx5(z9k5=9`8@3pp?;K=HGOP3}RBJJ?qG z=lu|eL27o#7!fO+^E)6TUo6ZWc2o6Z|A+Y>TJ=4<-p^kD2QGCgQ4lMDJ?~s93jul6HbQ7WgAQG>!^0S&6 zYA>OV!GVMH38u!sz;L7@Gs0sXclE>m{kUyn6(Qf+d~C!Zq5Qpr)yidG_LvV8Tfxw# zq5v}-w^6T@h*I(oGmZRk73_2Ja`akk)2I?kAk<7S2vJcbcSi^Tffk$@n$X%)Dhh$~L=t#V(UwQROpElgZAHJ8f z^t-9x(BF1iMWy&_j#e@^K^zqNjBgtfd_&ZK&=|ZLO7bmp)+fn*08}D(lkUYtX-sXi zKe9J+yTFlIt0nrg!)wAoX!9NwH@Q28Sa>%+-$~7!&NdRK$=Taa1I>M4j#h16?-500 z%%<-C`W`zktqJWL{S(G037M*=2iLr)7yy2NisceKX1de8=Iw$h^e^&#=5Ey(tcZPj zJI7x~2%T>~ls{NvNWXv3r%FHx%PHwO4TVgaqL7ojI8jZ$tD@*oH*_{*?+P`DRN9~j zN71-`La9O<N-vTC-EBbgU^hm~|gVb6R{dj1b#N$Kz${-0M(c-k4WUrs~ z_uFek^0nt+lvNpEZa2kL3{0rl>bFP3PErdrs}cG2%g%W1GWzPwDIhw2yBrOC`mjZ? zre-BTs~U#P%0-H78k#(d$a@qKn`v)|&6B}jUY3wQ6qZw6-;y5kp*gDKIF|>g<}y=R zis?5}V<~ZI@16fj^CSZcvpn!z`;dK(c&K9)&uIf$nw$kpT3I4KOGpZL1%KhDn=CV7 zKY#51jCnX*(kPiYYBD=6s4OUQ<(=okP9=to&qOm1|AbSaXi3_UDA9wrfq`K* z@98Q!87uzl(`<5{?-9cT!P16^q~WNXyxfpRv2sS8NXvWe-FcyOl9L z4K)A{*TJk@Co0eX8}5H@IPkVG^4j~Xdk`ft^`4So>Px5mX&2rG#du_VeQ=W*U0K+- zuiQP_=kqn#2On{D@K_9l`CugPAL@f?gJlZl9{!gYBoq$G^-~f2La+rdJWzXbx0hk` z>7gB>@K3g~eYG_HTxQg`!BApdZ9>i%&F~dLWbZm;&jcH3svm!^3DQ9+rzz&AxW_so z8ST;wHIxZgu!hkoBJ3w)gF-@g7U{B87z`@9jpl2s%V01g| zq(F)h+l}+61coR*v8^jgx98{ghF^`nP|9YMe4KMA0GsL!#`K0S(dN-yel&!}^8tTLzj$x*q3zt2ePec^d3 z{4N4|+5fbEl^A;<2+%!6QA=KgA7IDkm+ zCvNb>b>Z$$FY#IEbFzmb4Pf5Q>$FYCLoKc;KL4S-B6t+ z9$7vWp&2RkMj_?Pa9*-bknQrL!}d|PHJO2k#-|o(P!me4u}Z$Y)IN6Gy?iB@Up!sY za=SbPVFv$b{_uFiRWa!^y5&83Mfm&%dKBBn1ps9S@noABl)|mDhcS!t=>V@f&iCx* zZL%|$IveJWVz24$ag78k=(Q2N+0nS|-LZWE%~-0P6A3IgTqcsgMHATC`W?aX2n8|b z<^KgwQss8tLzMMgkva-SG2Ly+vIgLG6zg~l@)s|$+iD(yLvZj|tD}Dy)fNer-um#A zt?3*RZHVvz38-zzH1plkt08n+l|v`f*z;_5_8j-nmirghxp^~^GLW>?guva)GwSq} zhL1$kj*-+05Tc&fyB+eq!sLg7tTaiX+zqP3U8gU`KJ6)Ski;JTx@R7`rV1I20Z*sw zo1X8@($t2D6T^q86Yj_e6CYjl?~2{;xM;MT;=_kfBx&uUmA&wSj)Q0TRp$s43?zL= zoMMW?cpEH&GJOtp$b1y2{}D*QEKZZz`4_yoz0ej?af`>>v|S?NRGv`DW$t8#_5p5A zd)+h%{kJZWAN+K)5E|_~Z>V2Jn*UxIU!xH3eQYSQIZj?%Z2)vV;g-rZRIR%2aJaI} zpFs(zaIP}2DfyR!C5cE?z;;EYo`YEr!Acu5R`S_az2xzwn3Mcm`HykB>^NkX2=|4; zGn)SG4%^5=^9=aky-k>Mm&XD;!hZ2lreS`}1De>G6%dSS6T&9aN3U?a!X_9DO*stp zz3jO2JV|I{gu7zDg?!`rD@nJj#++&i!kc@?d5JUdZk{*A7M>wzPi|(>N=7jO(N8GCqCxw zVkiyCP|$uhwEXRr2tg+6JhraqWOtKaFM0I{RxhH;#Q@b|^e>Nv^l#IeEEoq(Ex4vZ zG|V6<2jU<~ggTxnsNXz~YxvfAKz9*^Y|1RiPF2!`(7TqjPm!~#loecL#-#DqB zGi`>uD*@0_CnDI1;eBQ&{-AN6znpEUa2aGP$Y9x?aA_-DWABIX7=*y6Bqi7a>j)WK z*l23*N0StR`BJsXy}WjVCGGGxj_nAzmeVXd?*Q>P0wSUX-+7sJ6(E^FV*$;6<(OJ? z6*gF8F}84kfz)6!QKfR>CGQm31JiE==VOWBBEPuEDO`Om`vd>2;lks#pNEo$tg%K+ z;|t%E2JvN>4I`oqrlup6hDswvL3;<%WWF0*5_dm`PfC}>yS@Hzxc|A~q&q`s{fBVe z>%@)y~u4D7hp{`%8WmjUc%DnuG{C{@nQcB z0uQIe3fN$4Hs9v^7+I(1B+x8}lnk6F%5Y^yZ{PcNd+I0$-d~ z0{$0MnsMvdz%e)-@?poLA!rCj^YKo5VJ22FRf63$>HyoY@ZJ(hP!i^nQu_>`VOx70 zpG6%vG@weJ0ewb?!-e$O{dEh37C@hmD5_cNCcC#LaJxz5tGKNWjImg`2PnN-Sw4@j zcHQ}TKz5oUP+RW3DH=Bte5odHex{V;AU;ifD#3y{iU>SDn0VuK;c|mhqUh)Zu_P`Y zIV!dZWK6)mZ))XN`&-Vp;?asgS5r?3JzvJ`+*f0<44&{Mf-MFT{62Q4RrZNEXNlS| zS92n@jmqw$pa5tc-;E0m?&zx&zC`5km8nMvw}=k#6kT{4lhlyrxOss2;+b z$%I2Ukk&+Z(r;rMdW?CVlb9v+Z`iIRTJutIKpTQ5SNZR&;NPA7(lCwF1>m)~k@b40 z=%=z(%xS8<1q{!@LwPId!i0|9Mp!T5OGvY*#j=EQfZn6>C=-3ueH5vt`IcBf(6(-c>6YH9j6)3u9_jfGx|MqRXxDZ!-#PRoxch=lwVxb=q>&(~ z7-s+0vzWH8Aiyp{X3g#x5e&O1+P3V?iz&uj5N4g1Q9TEM_2GSB?Bn+mGGNXf%h6+X zW`Yw4;nB78+r%s@!Kk ztqB7kVXT2Y*J=Oslmlv&@rr^SP-}xsgYSmnP|gYiqQIt5sp-L|e`mTY6asAZ_vd&IT5xO;{}IyHz1 zvzYoIYOTKT-!rhI3PkPnt75uX%a-;g+^kECf`xsE)SU)!@=TFKYto861zeK5tEKEE z5Dk!4_vWNeYj<;>4q!B!Zd79p_d0q7`Yvn&WTB+ngNo;D6-nHaKvBMz$Z_xs4a5&b_9n3-?IRR@8eIw4W(O|Q&vr*JKB4Ng6PYd zQ*0C06e(b7=!$n~_%*Xh3e_@8TK#IIH^ulUJFAxJbxjklzRkHyP`68@hv(366lA?G zGE^dtwHZF~my;Mrz0Tq32jc<(j=*TL+q-00dSIhKIz_N}UQ0Z`tCMCTtq+-$T798u zTeC4548)K(Enp^u{UABc;%om;>kGcektQ}e?GXcSB#%@gEvc4!8+ar&lnC#!7;E*j z1hx~Yj)fLYoEmQASr9YvWkmCK#>czd1oDO;?dPM ztNM8@bK@am8`SA(ROdLj0~{|aIp)hW(#$m+cr07oZ8+3?YFC{TCeKm9l8rM4UO0#S zsrI%HCd89SuzFrpuZwA5`gpZ{?V#Yp?UUV595mf+5d#?Zp6&a-8Z9yt9#yyickDcP zCOv~IPbKj^i-k=-c7Ehz#{!;ilwCqvZnaJ@k_gv1~>y+HxZ{>e$xIba{)}Lx5{wGXLQf;^ zR%I{iJd_Sn!o7yK(%%0!-2dEg%sDq>0~*rRtO|M~%z~EAGw4x!T2r+I@)Ngt(L79`E9n`eaM0e{~X7+ZIUeFIULSn0ah8~K(JiddjVM*@5@`Gb3!#lO^ z?TKRD!yuNsSLoo58ts9o`g07p#3SQEmpR)`B?<&Mt5}Kt(SYuYGxZmtEHg{z6j7O} zeQxg6gew03>R7WMc&O@Lb6=kDYmV7cm9ZF`hRS9vzVl>HeLI27NYj0k;`q&-y^~_%uk{X#rX9cJ4 z#HTD$Ms|ilQ7-SoIxwHdV;_r{9o=j|&cq%J1U-mVQ%SHNBpc-BBd*ncHtATN9%cGz zwSqVEnkguuoM<`4B{ydt9ceu3jLZ#8oheZ*(AKNtTBGm|Hjl zqju0Tfp3}dncO%=o&JdB$gEoX`VcZZT<5reWF75QCtjbnAx0C+h{SEJ9+EJSBy5@v z!kd)kyfATe%BxOthjzWjQMe7=JrE0yT;$m3+S@@Rc8|Id#viF0{eeWA1`tSanYT_x zRmv;4yzwBjlkYEg&xf6@S4&xXW~n$Aq)9Ap3c!P_grjra>>0CEro)&MU{`W^Z7lPn zzIC!MweB0lAl(%25Zbr$+I*74y*}c$+%iX1zi8?d|0zbWr-mN|J&W=X^aVWQ%*RKl zec-ZVF-a@(u+h^dpO-~*x4)3zqetCYAT!ZC}#!;UY>Z*5vZrz zomWcV6D_oKU7cp-0*fm}tcegYoe_AmuoD%3gtBT%ph)3@e419O|xPe8j0Tj?t)6n-Rck+da3e01+8~R)%&eE? ziZuNOLkPPDgh8Jq=9$6&v1n&RQ)0&5xvALnwg%(MC~jG zw5S1#npp!f1|Gu>Qg=+KA1da1gaBm!N_$vc{IIK)n0I-}J(pgQfz~$8*!uN*czr$! zq|;uH`Y+Hw*-Jjb1$rP&4&W;oL3qE-{<0UfFWG#`5zfnP(OaR^i|TYm(z0JUHA@3m zUNzkrC5T`bdq>o~s6iN8pZ@(+DVz&8Ax`8xW!q22f9q9j?d~Sk^Rml|g)1vv+IbG1 z2XCbwU;JI&Q@C-;<)&y>&X1!##RzKL&Vn~ZhY|Lcq(^qrX;D+;j6o)7`CV6B2#)md#=M(;r2v~z+Bp-Q`%B2tr+nlbA!$ZfMajYn6Uh5snqP`-+n~VoyN+)h zy(1$%fjlNN{M}yGZG}RT>d4t^>2YBI5u13kD20Hca)1TfcK^wr-sy z_G6j7yv3fp&A#uK(G=Ap_i%}NP%Swk#Q`$Dw#K{}hkp8lHyi_@%V#GW{1{F9PT*#W zZ=rDXju26oF`&0xFD+I-7~@`Bn8ZF<$qGH3C5cGftFJV-$=HF!=tFRJDVT7da@T)R z0l8SW;#k;zUKB85B$1aOICcN*Q;cCt!@D1wZK}S`gjrc=$`A&c z@F6vuE>xUroGye{yZC19I=tY90@LPxOM4IDIscB(aiQz{$Sf=#pk}m_lP({7RVxoC z82Nr@plOQB6GR#?wp@q3qBc}n8% z)xTyvjIoiEs~LG`8w>eMcL)ClLlFb*M@MXC+#t3B2n>12?{{@`?Tb@>p z1YIA(F-ik)rfZDyaG%))DW-AnEgIYdYGaH;vCCw>^L6cKs1ad*{L<^MNJ6Wn`(4Uh zmq%OeJxe){{IPc;`LSw{8a#Ti>^|<$nD>9@hP8U&!Zv5u?MC-ugai0*4cAh5^L=%E zQUiDlvuK0s(d8sPS|m6*&JM42$8Note~$pqhZzL)2xw6#$36-P!|L;Y!~M?Sp6Pz>AufjJu(=FhBYqaH)-LC z($8%)a$yM4%6CZA=U*jD+ey!`wxR1v4R)mWZkNJ!=E=^PpY{qC%^X-Psw|Ku2VeLM z%CON0yH;ktIFt+?mY`g(IW&H-Kq5oc@bLzv(_O#^qoaiA{2sobtRMF2rzuM?k4X0Z zEW?xFhf?Rh^|uPte83d3l!Hh-?8J>{!^DAJCVA?Qn%#2&E_1~DQ}?@jo7}Lb67-UJ zrX&i{a(LT3vf zL9|ddpBT%mt2>yL~UG*W?d$lYhmzpG}795cY>822PsDlQrvq%+SN8@RER7g@e>hU6mR@g2%28VRX{sh_#B5t!GJ(Jz`_K zE=M0~4YDP6#35i9ivL~viA5sp%HiFSIsug1dB_-^mXv4nJYkC9YC%r(J&4S0Q|uiT zQtP~#cJt{Gti>NqCW1I?uiFQ{7~q>yE|=Pm05>(8??j8-3 z0**7?k42FHn;!_9HwD)4G^dPtCvI_Y4CC`70r>1_6{u}kap)A_Ub_Z$%ah+m!XCQ< z8azLWgy3h5hEBt|EQ9*Tj?IGg79daI{n3wS;_Zo~iB8!oYRNIFr#4v+m*1h;9gs34 zQox7kLL0X_@geqdZ0q>$#jo}wz0L_vjh81%JcIOrSH;CFa8{+}FCxEUX+^(gVfYYr zSI6B^FhMcug$q}oN(jRam&M&`!nNZ#zG6>Q_nm;a?HL#xiu!RucY#zOAj7g)?!u0% zjP;e}%u$n%km%Y4j@Mz^EA@sKPcw@EF<;NvejdE=@~%H5QtpwInGT9U^b^o+B+2)aA+HI2;ip z0I)v3;1@p%^qXkR1sEN;4gD86e}l&K56+qd}Z1#D_BGbNM$7TMS^L9dal)VHaN=6t`RFgdnIc` zv0eWkro*_k!wB&~M8lSZ3*R9Ol6V~7Ge*=_7}_y=aD@3W_yj%}MUSsJ_k-O=Bg5n5 zEF|(Kss{8xJ+h7deP3!A07W7R7G1V(#}(0UkGj_5xlWxNdekXfpVImu4QCN$X2f(Osxsd+H#J`LNE%UWunud%Gnq2v}(V96T zRfawm>mKSsZFp0SP#8n&O*%7d0o0z6YRNlMLo(jwMgNi7I??sjV|nYPHz80D0DJXJ8ZW-pFe)ThS!<>BjNs(p_WvF z_GynDu@{_=E*#{#xGR)u6Bs;MpKEnxrO88211C+yeIh+mTt zut>cV8jtL@okij5ztL^wJV9no<^QeWV(PpzE@cN;lY(rG@0nbvlG)H730bUo^-gby z`jUMfPJEkQnmC7}Nuld*CSO872mRl0|8v8c$x;Tb(mSxvfBlzd8*othphfSyy9Aq9 ztf>(-LWiE??179>cwN_m)3V9Dqba@}vmSNEFJM_B&I__DL7 zfULCp1n}|?aEMK;jYK8ZW>wR<6{o>hr7Fl;Y8r24^6{2kY1dJJ3z1!f)JBR8ejDb~ zPcLCf{W0^|-;tt#AvJP*iE$@ss&>7R$pBg zs=LE5qCdsR@O%NMMaSmHe3%d(bMCf*4aXXW~>Ih5k$?g7)C2HIDHw(aI1T#R>@jda29X|Sn+V-z~ zn_uqcs@q!>EG@8)YTP#M%EpdA*jAE6WcRhQk0d|yKxZsltPARYaq6Wm|B6?guB1SK zb0ct8vD&GttN1G=thYt+to0(<)%0`!BI z?-!*aU_ECW(JJ7CnUJ(1iRf$$+pYmFzn#p;8fL-5w_gZ5{db(~`GKvMxbTX_yj%4O zl#YWj6Xx1~6^@UDh?Eij);AfY`XM;OnP$H*<6qL>E*+_Ql8ia~4m#8^sMjk|1a*%pe&&s66d&p2Q7Y#G0L+rwwkO z+`l%bE{AxOAs~%u;Y=pw;__NYxC^`%&t0f6RfRZv-@8nBec2R?=iz%__v7h6k9bOd zw-eQz@a#R()L-tFlO@^IX{~Uc?RTchlxCii@iqS>G*nO;g3@tYVu6t+aPDrI4Hj(4 z#nLlof_F(mgV87CC^A-=8-0u&ab!LTfd|QYfN%$5xeXmGZJoFCm=1KQ;6@xHb)3Jm zIo~4D9(kX*bfw>_ykbiT%Bww>@(pST@DdCoyy;HZRBRn~K0B#c=MOWhZiz?j^E*xt z@m6Bj=VDDElpDKZ7Z3zoawdfI+MUwC~gQ?YHy#Y z(^s7dh>H~_tN5b1syBi85GY;xQ4dEX*7U*2rD@A?0M>OE$!e9(i!=ivujSHny#^zinrD z;v>}j`<3#VAhpY1W7~qcaoqShxR$@wowv@}X4xQYyDjdb*cWarP7a9dU_z^~=eT;{ z$A=W#A;k6f;wU0cKT^!K+0wCNiD!OTaXDP%Ec+t2Rmfd4D2H@+s>e?R_T~Q+| z5GTsvP;@@vhuR@cn$ULJqE1P!X3S>4c~)OKa(0)9_d#o$c_$DSMhbDmd}vo!=W@o; z?^r@8GAhvaWT*?OVj~a=(g^;J^F%w;QIS^ke+yuY+XTn9ovC-a4+YZ**G3)89JI|+ zcO)*$Zd1+c%^YoL%=WZxjKF`Ywm5G~-035kqv1~U)~0q$LqbG0Wdwv+SJn)NG8+d3 z`*CQeJdqA6vIB<>DN^;!$z1I1i+k&m${cH5n_Qg-dRxlsPA`TFCu5_h49s&WD1_kB z$7@-T0Em*_r#zPj9BmTSKimt5?~u`HG~LfG72T!^yZ=NN*kJ4BX4XkZo%lEr z6B#0B@^%G>tGGgjBS1cPyXHC=(-i}NylTOD+(tou@)pj>*z9lS!O!4pggKTP^A}rTg`0rnP6C%YhChgLnw>!wosMNMvRpKk2HxHS!TE zhs7L)m~MLH9lRB<&_u(}4?uPbAdIEkrKc(l)R318{GtA1!=Qp2KivV}vF%pq{)1WO zFnQ#{>7FQ=DR8R2j^P%gi7u>nKW;eB;wap-=De)i@UUA(V|{LN3CfD5Vvb>&g+_}N zH+7QHDS{6C_)Wv?4{s7&%%0Oj;r!G}AF9RyQnh)Rh~dR`U?7aAzOZ?ImZ2Bg>3;0w z^_p08gEWi_&_>ty8SI-3n?uI(jZW#{igr&3|jS z#7*q7iF91Fo>E+ic&HCW&C7cW=YuFh!QS`++-wbde4r{5SwU4gp%TO_&NHDl`2U9c zpBs)ANkro(mjn)mevO~X=~wK#Yav{Hk6I(FGKas`U(idAd8kZD z1cv$hh0WagrJnWZx-7NKp0yRK2lycD0w^|yNU8)H2{w>RHnK*`Qm)r5J||1Wj94t|3w5u6t{TWU{1`>q68T;!l-x)_b8L z7vwJ(>q8c}Ah^Jz(N&lu7N!^(2onYuhatiExJm`_u)xa8d*UmZTKQh2rcKq@0XtQW zR$=81)|qYwP#kcd`t2rjbV~qzWUdy^ng+HzNi*MLKTnE0=aaQWY`$$!7UK#F_Teoit7-u5>PpsfMSSRcj#k^N z9bSm@hzN7QHj1YiUTY*_b3ETOxkTP861jY`Emp^L8+&%#hm~Bps)SKAX`dw*k!C6` z-T;Xvd)Uywvjs!%&DI}vsmgTI7mj)?2skvzw3uFkUSQ*C-~v`FAm4LGIKgfBLe!W= z^Kj0{pTyB8&EXXSl81s|-{LT~4r5S;jdIDtjXv3yuc{k0Upcg6uwU*zmJ0S}9|-5L@$#5G zL7EG+tubJ@L%t#%OBFL-&>zOu!{2Pcu)X{9eR#D>_nt1lno~|}2T2!Rwp!zJ_Jwpnn%YENC?(S|%jx{9}txWZNF^T*5-ecx&+w-+=6sapJ#``HZP3Nvx z#;V|P=<@D?2VwONujr3IMdhIoZvO>g_q?#0@ZDahAG-~+q!ZGKL`{O8DP5K)zW{wA zQ*0OzDh%b{r{86} zLMe&hS86Nz?!3~X5JpCfA?zoZ3K7%c{GBDbq8dUs=ZWbGUE$LyugAF;A2JRDf3cML z(F(6i&>(oxbPU_c*9+poQm6@eaeNxOed>H*Ju814bP7sKy3=`=}tPA_}4xnNxr3D8Hv5?3Awi#w?i_w(1=BM>SBT zga~)ZTS%F3ir5p>SNzy2f?j39%MyEl8{R%Js6RO~5HAnXJ`Z>m%`ZDy-mec4rV{VQ z#@M0O|FxFz+>u+%kK+Ky;~#8XOOv7*u8U_sD0R|LA>;@se-vAz?kf{krwY>%@w@rh+ms}Ypfk^QA37h5hGWvLrfF3sUgFN!{Or9kzCCC zeCD9^_WezeD*LMOS_^5WnaBuxxN0t-#Xlaa+fv4k?SnztZHQnmB!k*f@RI;WR}a1i zjg8V?m*RYOxQLGNNgkb)@bx22RPCyFt9w;k`mOuoyr@ooVbaR80A{F>6osG+xBOS_ zr;t4PGM{db$YwxqYBLD8W@y|0?x`JLAs0L7%g~{6c{2a%*9px~iZ!t{!=S4@fE#3z z#KO0)xRGK###8Wje^fYG8+8{W)1;8~k7yG2zF7T{0r3OeM-!?+Aa6`k5XO^2vkqvC z396Na5)IZ@T=NAm{0N^${!4L-O>8+UR8rT}^OsWhv{3EG@J6hF(jEzRb%e?u5dyGt)Z^~h?nI7`UPz8)<}TEFP@Jo zGT&T333c?ZXWjXldcf}ucOQS?a5g|pWzTl&b6#J>mtVq2s(5bNKP=`0Ga)d~dW90IpdRWJhFJIHz( zmSJJ(U5644QNxoe77 z4XV!Y;$6eR+8d6lafoaP^xg1t;_8P-D+lo7K2ZO6MwR3b@-~TnUi;G-3+idcebnIj zrqPj04MBD${q4^+-~3krns7g9+%E53Rf*`S>+J`_|7{mIl-Z|##oZf?bhTY0p*9F` zK9A>ss1s;)>ddRlV~2ailOEPPI)ndP!~IaS1j}83Y;m_tx2>TthV(p!O>i8#H8yHH zE;snRav8DF%7&pHzIn);hUIJdgcg-i>ML0>;yU_bqo0WYpnG07ug@EX6cE zJFQehEFcUy)hI+9r*?rFO))YAzd$_)R(80*A7DB0)1k{_7@pS27e|{l|CT?8A-DGi zhAXJZ?U(USH%1h4D=xV29oBQ(#T^&Ro5b}7%^AP7<$8{lFtStSF*)plqflCCyt&M( z^nc`4SFF<4W-KP1OPQ#pNBwAeGnTNh#NWEc-$c{aWIsLcj;?2g@oeeTriGxoHug~0l+vC|p8w^62)kij znD5B@pAh8LDE6Bxt6{iZYZ>s9qO?VJye)oe#TD{R6>i%jm}5YZ`?O_mM~8Mu_yOXN z%~Q0sa=Vkz5VUXib$lE%^!>XC73cagt5e{LaIy^zB(`F;wP)MKrQK^%BJG_)J_#&R zC@|T~MMr6*_67j>XIl+hkxWGD);%iMcfIofN1DE+atsJ>;_WF{ql~}`x&RY6sJQD@1rYW;~i89k|BG0JgYFA`5M;xd))`0t16(C)h9~N)9o!W2Q5sk zpwur`jKe`Z8hsro6KDsWSK#Ui0x616ZYvM|FK8g_0&d#0ZCd;G+|1c&J}KVn_l2 zHxJ$nFEyG7_7keIT6;>MO5v_h?g1hd_|T?A!UQ!vaG|CV<&03>R5GMGt5t%R$)h}K zQ8)Qm1dr0XQf+XH2tnk#st+;G7jyH9D&c@I3-<@Y-;2fW>!55|D+I}Dn%Xt)-XcWa z$^JBggiD7rS~!%7t;&hbnn$E2%0uqHK852OW?P-Fm3=vK{?zkhCQP&5vTQ~A;0;NR z)}P=>dAzeGB36E~PIvvDiduhSCI#3LQ&NWHVa^AK>4mIzMSwbX$Imk+6)~{sl?98x zwt>W179Cp?SsYE=5}F!Y3EoR!_oj;Eh;crW$#9xC z?Zpw2;?vB@0Ut?xsDk|}{dD$J!%gFCl`Kl2`TSf(_GdP_P<6rW^dvi89cKHJy%7T2 zJx|ZVo}#Nj4Ujm4PFp4DGanl)=%7lw5$+7bm+kv;~ccpAEj_7GI_GxTdsbw+*dB{Rg4rdaEV})gMsyP+WEZ zlB)EZ6?c-YB#zU1gV|~v+$lRh#*((a>*IInb?Pb+Xc8`H5gE zDVkXU39X#SZZix(P|nf;RzEn$l9*ok@_GLO597?vz3FNV<4oM&TZmBcV>V{r09&vA z4LbBTKFBZzCI)3=^I?*me__&=j@$zgR0L}1#(O3bCu)}WYyxzm6jj$~D08WQXZhL4 zrxL+*c`&U08@gM3rQ0G5qu*4mSaMC8=U0itjkg*1)>JUnAlEqP!8m%dM2DD~hR!pN ziK#llej=aDXl++0P*iFE4RD+_b>#@Ja86|%)IQ*f+T2H>^U1-fOCqLCMZaE9?7Q-0 z-%9qXL)ob{TJA?2YJ7-fN|Njylb7GnrXyXYgZkCKm^^eQ8uiGzH;jBrG*-~EG5*zB$N}p5RIYDAnTb8@ybps<9+QL2j_NBXUsCQPB9wZtJ9?-bz$)g?BPeUAtDPvPVv6O&}oitudZwa(w#GlIoMYJVCcDE4O!o zO2t4X^JjPRZqn|>gCDH|JwNS0sQaUFgYpJXZBe+mI%pZ?mp@;Ke=?z|xxIsSTUW!& zHyx?s;`rrXG--~;5W-d-%Laztgx{O2TSBs%Iq}yL22j;vpHErDa_5Ec0}xN0 zzu1R}pf2Wz)TV_M@P*E4=N3p4J+8A97KMBWYi^|k={zcpM& z_99eUI(LAWv}xjiVVrygoqi%kZF}QQIHU9B0mm?*(vUF`rweg5skn$QU)1P7txwTo literal 0 HcmV?d00001 diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 6000115993..621475a3ec 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_simulator::produce_unaggregated_attestation; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; @@ -155,7 +154,6 @@ async fn produces_attestations() { .store .make_full_block(&block_root, blinded_block) .unwrap(); - let blobs = chain.get_blobs(&block_root).unwrap().blobs(); let epoch_boundary_slot = state .current_epoch() @@ -223,8 +221,7 @@ async fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let rpc_block = - RpcBlock::::new(None, Arc::new(block.clone()), blobs.clone()) - .unwrap(); + harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available( available_block, ) = chain @@ -296,14 +293,8 @@ async fn early_attester_cache_old_request() { .get_block(&head.beacon_block_root) .unwrap(); - let head_blobs = harness - .chain - .get_blobs(&head.beacon_block_root) - .expect("should get blobs") - .blobs(); - - let rpc_block = - RpcBlock::::new(None, head.beacon_block.clone(), head_blobs).unwrap(); + let rpc_block = harness + .build_rpc_block_from_store_blobs(Some(head.beacon_block_root), head.beacon_block.clone()); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = harness .chain diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 46f5befbba..2a881b5b0f 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,6 +1,7 @@ #![cfg(not(debug_assertions))] use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; +use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::{ test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, @@ -34,7 +35,12 @@ const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGT static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); -async fn get_chain_segment() -> (Vec>, Vec>>) { +enum DataSidecars { + Blobs(BlobSidecarList), + DataColumns(Vec>), +} + +async fn get_chain_segment() -> (Vec>, Vec>>) { let harness = get_harness(VALIDATOR_COUNT); harness @@ -46,7 +52,7 @@ async fn get_chain_segment() -> (Vec>, Vec (Vec>, Vec (Vec>, Vec>>) { - let harness = get_harness(VALIDATOR_COUNT); - - harness - .extend_chain( - CHAIN_SEGMENT_LENGTH, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); - let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); - for snapshot in harness - .chain - .chain_dump() - .expect("should dump chain") - .into_iter() - .skip(1) - { - let full_block = harness - .chain - .get_block(&snapshot.beacon_block_root) - .await - .unwrap() - .unwrap(); - segment.push(BeaconSnapshot { - beacon_block_root: snapshot.beacon_block_root, - beacon_block: Arc::new(full_block), - beacon_state: snapshot.beacon_state, - }); - let blob_sidecars = harness - .chain - .get_blobs(&snapshot.beacon_block_root) - .unwrap() - .blobs(); - segment_blobs.push(blob_sidecars) - } - (segment, segment_blobs) + (segment, segment_sidecars) } fn get_harness(validator_count: usize) -> BeaconChainHarness> { @@ -137,17 +119,35 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness], - blobs: &[Option>], + chain_segment_sidecars: &[Option>], + spec: &ChainSpec, ) -> Vec> { chain_segment .iter() - .zip(blobs.iter()) - .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + .zip(chain_segment_sidecars.iter()) + .map(|(snapshot, data_sidecars)| { + let block = snapshot.beacon_block.clone(); + build_rpc_block(block, data_sidecars, spec) }) .collect() } +fn build_rpc_block( + block: Arc>, + data_sidecars: &Option>, + spec: &ChainSpec, +) -> RpcBlock { + match data_sidecars { + Some(DataSidecars::Blobs(blobs)) => { + RpcBlock::new(None, block, Some(blobs.clone())).unwrap() + } + Some(DataSidecars::DataColumns(columns)) => { + RpcBlock::new_with_custody_columns(None, block, columns.clone(), spec).unwrap() + } + None => RpcBlock::new_without_blobs(None, block), + } +} + fn junk_signature() -> Signature { let kp = generate_deterministic_keypair(VALIDATOR_COUNT); let message = Hash256::from_slice(&[42; 32]); @@ -186,18 +186,22 @@ fn update_proposal_signatures( } } -fn update_parent_roots( - snapshots: &mut [BeaconSnapshot], - blobs: &mut [Option>], -) { +fn update_parent_roots(snapshots: &mut [BeaconSnapshot], blobs: &mut [Option>]) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) { let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature)); - if let Some(blobs) = child_blobs { - update_blob_signed_header(&new_child, blobs); + if let Some(data_sidecars) = child_blobs { + match data_sidecars { + DataSidecars::Blobs(blobs) => { + update_blob_signed_header(&new_child, blobs); + } + DataSidecars::DataColumns(columns) => { + update_data_column_signed_header(&new_child, columns); + } + } } child.beacon_block = new_child; } @@ -225,13 +229,36 @@ fn update_blob_signed_header( } } +fn update_data_column_signed_header( + signed_block: &SignedBeaconBlock, + data_columns: &mut Vec>, +) { + for old_custody_column_sidecar in data_columns.as_mut_slice() { + let old_column_sidecar = old_custody_column_sidecar.as_data_column(); + let new_column_sidecar = Arc::new(DataColumnSidecar:: { + index: old_column_sidecar.index, + column: old_column_sidecar.column.clone(), + kzg_commitments: old_column_sidecar.kzg_commitments.clone(), + kzg_proofs: old_column_sidecar.kzg_proofs.clone(), + signed_block_header: signed_block.signed_block_header(), + kzg_commitments_inclusion_proof: signed_block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(), + }); + *old_custody_column_sidecar = CustodyDataColumn::from_asserted_custody(new_column_sidecar); + } +} + #[tokio::test] async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); harness .chain @@ -267,9 +294,10 @@ async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); harness .chain @@ -308,9 +336,10 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a block removed. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); blocks.remove(2); assert!( @@ -328,9 +357,10 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); @@ -365,9 +395,10 @@ async fn chain_segment_non_linear_slots() { * Test where a child is lower than the parent. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); blocks[3] = RpcBlock::new_without_blobs( @@ -391,9 +422,10 @@ async fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); blocks[3] = RpcBlock::new_without_blobs( @@ -416,7 +448,7 @@ async fn chain_segment_non_linear_slots() { async fn assert_invalid_signature( chain_segment: &[BeaconSnapshot], - chain_segment_blobs: &[Option>], + chain_segment_blobs: &[Option>], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], @@ -426,7 +458,7 @@ async fn assert_invalid_signature( .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); @@ -453,7 +485,7 @@ async fn assert_invalid_signature( .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been @@ -468,12 +500,11 @@ async fn assert_invalid_signature( .chain .process_block( snapshots[block_index].beacon_block.canonical_root(), - RpcBlock::new( - None, + build_rpc_block( snapshots[block_index].beacon_block.clone(), - chain_segment_blobs[block_index].clone(), - ) - .unwrap(), + &chain_segment_blobs[block_index], + &harness.spec, + ), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -531,7 +562,7 @@ async fn invalid_signature_gossip_block() { .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); harness @@ -583,7 +614,7 @@ async fn invalid_signature_block_proposal() { .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. @@ -891,7 +922,7 @@ async fn invalid_signature_deposit() { .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); assert!( @@ -957,7 +988,7 @@ fn unwrap_err(result: Result) -> U { #[tokio::test] async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); - let (chain_segment, chain_segment_blobs) = get_chain_segment_with_blob_sidecars().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -969,7 +1000,7 @@ async fn block_gossip_verification() { // Import the ancestors prior to the block we're testing. for (snapshot, blobs_opt) in chain_segment[0..block_index] .iter() - .zip(chain_segment_blobs.iter()) + .zip(chain_segment_blobs.into_iter()) { let gossip_verified = harness .chain @@ -988,20 +1019,8 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); - if let Some(blob_sidecars) = blobs_opt { - for blob_sidecar in blob_sidecars { - let blob_index = blob_sidecar.index; - let gossip_verified = harness - .chain - .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) - .expect("should obtain gossip verified blob"); - - harness - .chain - .process_gossip_blob(gossip_verified) - .await - .expect("should import valid gossip verified blob"); - } + if let Some(data_sidecars) = blobs_opt { + verify_and_process_gossip_data_sidecars(&harness, data_sidecars).await; } } @@ -1229,6 +1248,51 @@ async fn block_gossip_verification() { ); } +async fn verify_and_process_gossip_data_sidecars( + harness: &BeaconChainHarness>, + data_sidecars: DataSidecars, +) { + match data_sidecars { + DataSidecars::Blobs(blob_sidecars) => { + for blob_sidecar in blob_sidecars { + let blob_index = blob_sidecar.index; + let gossip_verified = harness + .chain + .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) + .expect("should obtain gossip verified blob"); + + harness + .chain + .process_gossip_blob(gossip_verified) + .await + .expect("should import valid gossip verified blob"); + } + } + DataSidecars::DataColumns(column_sidecars) => { + let gossip_verified = column_sidecars + .into_iter() + .map(|column_sidecar| { + let subnet_id = DataColumnSubnetId::from_column_index( + column_sidecar.index(), + &harness.spec, + ); + harness.chain.verify_data_column_sidecar_for_gossip( + column_sidecar.into_inner(), + *subnet_id, + ) + }) + .collect::, _>>() + .expect("should obtain gossip verified columns"); + + harness + .chain + .process_gossip_data_columns(gossip_verified, || Ok(())) + .await + .expect("should import valid gossip verified columns"); + } + } +} + #[tokio::test] async fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); @@ -1259,20 +1323,14 @@ async fn verify_block_for_gossip_slashing_detection() { let verified_block = harness.chain.verify_block_for_gossip(block1).await.unwrap(); if let Some((kzg_proofs, blobs)) = blobs1 { - let sidecars = - BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs, &spec).unwrap(); - for sidecar in sidecars { - let blob_index = sidecar.index; - let verified_blob = harness - .chain - .verify_blob_sidecar_for_gossip(sidecar, blob_index) - .unwrap(); - harness - .chain - .process_gossip_blob(verified_blob) - .await - .unwrap(); - } + harness + .process_gossip_blobs_or_columns( + verified_block.block(), + blobs.iter(), + kzg_proofs.iter(), + None, + ) + .await; } harness .chain diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index d1a38b1cde..8654b33646 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_verification::Error as AttnError; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; @@ -82,13 +81,26 @@ fn get_harness( reconstruct_historic_states: true, ..ChainConfig::default() }; - get_harness_generic(store, validator_count, chain_config) + get_harness_generic(store, validator_count, chain_config, false) +} + +fn get_harness_import_all_data_columns( + store: Arc, BeaconNodeBackend>>, + validator_count: usize, +) -> TestHarness { + // Most tests expect to retain historic states, so we use this as the default. + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + get_harness_generic(store, validator_count, chain_config, true) } fn get_harness_generic( store: Arc, BeaconNodeBackend>>, validator_count: usize, chain_config: ChainConfig, + import_all_data_columns: bool, ) -> TestHarness { let harness = TestHarness::builder(MinimalEthSpec) .spec(store.get_chain_spec().clone()) @@ -97,6 +109,7 @@ fn get_harness_generic( .fresh_disk_store(store) .mock_execution_layer() .chain_config(chain_config) + .import_all_data_columns(import_all_data_columns) .build(); harness.advance_slot(); harness @@ -2286,7 +2299,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let temp1 = tempdir().unwrap(); let full_store = get_store(&temp1); - let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); + + // TODO(das): Run a supernode so the node has full blobs stored. + // This may not be required in the future if we end up implementing downloading checkpoint + // blobs from p2p peers: + // https://github.com/sigp/lighthouse/issues/6837 + let harness = get_harness_import_all_data_columns(full_store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); @@ -2319,10 +2337,8 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap(); let wss_blobs_opt = harness .chain - .store - .get_blobs(&wss_block_root) - .unwrap() - .blobs(); + .get_or_reconstruct_blobs(&wss_block_root) + .unwrap(); let wss_state = full_store .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() @@ -2395,14 +2411,16 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); + // This test may break in the future if we no longer store the full checkpoint data columns. let store_wss_blobs_opt = beacon_chain - .store - .get_blobs(&wss_block_root) - .unwrap() - .blobs(); + .get_or_reconstruct_blobs(&wss_block_root) + .unwrap(); assert_eq!(store_wss_block, wss_block); - assert_eq!(store_wss_blobs_opt, wss_blobs_opt); + // TODO(fulu): Remove this condition once #6760 (PeerDAS checkpoint sync) is merged. + if !beacon_chain.spec.is_peer_das_scheduled() { + assert_eq!(store_wss_blobs_opt, wss_blobs_opt); + } // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); @@ -2418,7 +2436,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); + let slot = full_block.slot(); let state_root = full_block.state_root(); @@ -2426,7 +2444,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .process_block( full_block.canonical_root(), - RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), + harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2480,13 +2498,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .expect("should get block") .expect("should get block"); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); if let MaybeAvailableBlock::Available(block) = harness .chain .data_availability_checker .verify_kzg_for_rpc_block( - RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), + harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), ) .expect("should verify kzg") { @@ -2587,7 +2604,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { reconstruct_historic_states: false, ..ChainConfig::default() }; - let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config); + let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config, false); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); @@ -3075,6 +3092,10 @@ async fn deneb_prune_blobs_happy_case() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); + if store.get_chain_spec().is_peer_das_scheduled() { + // TODO(fulu): add prune tests for Fulu / PeerDAS data columns. + return; + } let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { // No-op prior to Deneb. return; @@ -3122,6 +3143,10 @@ async fn deneb_prune_blobs_no_finalization() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); + if store.get_chain_spec().is_peer_das_scheduled() { + // TODO(fulu): add prune tests for Fulu / PeerDAS data columns. + return; + } let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { // No-op prior to Deneb. return; @@ -3266,6 +3291,10 @@ async fn deneb_prune_blobs_margin_test(margin: u64) { let db_path = tempdir().unwrap(); let store = get_store_generic(&db_path, config, test_spec::()); + if store.get_chain_spec().is_peer_das_scheduled() { + // TODO(fulu): add prune tests for Fulu / PeerDAS data columns. + return; + } let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { // No-op prior to Deneb. return; diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 0edda2f95b..07d2a90df9 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -1717,7 +1717,7 @@ mod tests { #[test] fn min_queue_len() { // State with no validators. - let spec = ForkName::latest().make_genesis_spec(ChainSpec::mainnet()); + let spec = ForkName::latest_stable().make_genesis_spec(ChainSpec::mainnet()); let genesis_time = 0; let state = BeaconState::::new(genesis_time, Eth1Data::default(), &spec); assert_eq!(state.validators().len(), 0); diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index daf2bf6ed4..747383754a 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -829,7 +829,8 @@ impl HttpJsonRpc { Ok(response.into()) } - pub async fn new_payload_v5_fulu( + // TODO(fulu): switch to v5 endpoint when the EL is ready for Fulu + pub async fn new_payload_v4_fulu( &self, new_payload_request_fulu: NewPayloadRequestFulu<'_, E>, ) -> Result { @@ -844,7 +845,7 @@ impl HttpJsonRpc { let response: JsonPayloadStatusV1 = self .rpc_request( - ENGINE_NEW_PAYLOAD_V5, + ENGINE_NEW_PAYLOAD_V4, params, ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) @@ -962,6 +963,19 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } + // TODO(fulu): remove when v5 method is ready. + ForkName::Fulu => { + let response: JsonGetPayloadResponseV5 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V4, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + JsonGetPayloadResponse::V5(response) + .try_into() + .map_err(Error::BadResponse) + } _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v4 with {}", fork_name @@ -1263,10 +1277,11 @@ impl HttpJsonRpc { } } NewPayloadRequest::Fulu(new_payload_request_fulu) => { - if engine_capabilities.new_payload_v5 { - self.new_payload_v5_fulu(new_payload_request_fulu).await + // TODO(fulu): switch to v5 endpoint when the EL is ready for Fulu + if engine_capabilities.new_payload_v4 { + self.new_payload_v4_fulu(new_payload_request_fulu).await } else { - Err(Error::RequiredMethodUnsupported("engine_newPayloadV5")) + Err(Error::RequiredMethodUnsupported("engine_newPayloadV4")) } } } @@ -1305,8 +1320,9 @@ impl HttpJsonRpc { } } ForkName::Fulu => { - if engine_capabilities.get_payload_v5 { - self.get_payload_v5(fork_name, payload_id).await + // TODO(fulu): switch to v5 when the EL is ready + if engine_capabilities.get_payload_v4 { + self.get_payload_v4(fork_name, payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayloadv5")) } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 0babb9d1a3..d727d2c159 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -230,7 +230,8 @@ pub async fn handle_rpc( if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 || method == ENGINE_NEW_PAYLOAD_V3 - || method == ENGINE_NEW_PAYLOAD_V4 + // TODO(fulu): Uncomment this once v5 method is ready for Fulu + // || method == ENGINE_NEW_PAYLOAD_V4 { return Err(( format!("{} called after Fulu fork!", method), @@ -264,15 +265,16 @@ pub async fn handle_rpc( GENERIC_ERROR_CODE, )); } - if matches!(request, JsonExecutionPayload::V4(_)) { - return Err(( - format!( - "{} called with `ExecutionPayloadV4` after Fulu fork!", - method - ), - GENERIC_ERROR_CODE, - )); - } + // TODO(fulu): remove once we switch to v5 + // if matches!(request, JsonExecutionPayload::V4(_)) { + // return Err(( + // format!( + // "{} called with `ExecutionPayloadV4` after Fulu fork!", + // method + // ), + // GENERIC_ERROR_CODE, + // )); + // } } _ => unreachable!(), }; @@ -381,8 +383,9 @@ pub async fn handle_rpc( == ForkName::Fulu && (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2 - || method == ENGINE_GET_PAYLOAD_V3 - || method == ENGINE_GET_PAYLOAD_V4) + || method == ENGINE_GET_PAYLOAD_V3) + // TODO(fulu): Uncomment this once v5 method is ready for Fulu + // || method == ENGINE_GET_PAYLOAD_V4) { return Err(( format!("{} called after Fulu fork!", method), @@ -448,6 +451,22 @@ pub async fn handle_rpc( }) .unwrap() } + // TODO(fulu): remove this once we switch to v5 method + JsonExecutionPayload::V5(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV5 { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } _ => unreachable!(), }), ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 7b48d64e36..fbc92a45cc 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -8,6 +8,7 @@ use beacon_processor::{ }; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; +use lighthouse_network::rpc::methods::MetaDataV3; use lighthouse_network::{ discv5::enr::CombinedKey, libp2p::swarm::{ @@ -150,11 +151,21 @@ pub async fn create_api_server_with_config( let (network_senders, network_receivers) = NetworkSenders::new(); // Default metadata - let meta_data = MetaData::V2(MetaDataV2 { - seq_number: SEQ_NUMBER, - attnets: EnrAttestationBitfield::::default(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }); + let meta_data = if chain.spec.is_peer_das_scheduled() { + MetaData::V3(MetaDataV3 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + custody_group_count: chain.spec.custody_requirement, + }) + } else { + MetaData::V2(MetaDataV2 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + }; + let enr_key = CombinedKey::generate_secp256k1(); let enr = Enr::builder().build(&enr_key).unwrap(); let network_config = Arc::new(NetworkConfig::default()); diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index db4ef00257..1baa71699c 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,4 +1,3 @@ -use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, GossipVerifiedBlock, IntoGossipVerifiedBlock, @@ -7,9 +6,10 @@ use eth2::reqwest::StatusCode; use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, Config, ProvenancedBlock}; +use std::collections::HashSet; use std::sync::Arc; use types::{ - BlobSidecar, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, + ColumnIndex, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, }; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -17,6 +17,8 @@ use warp_utils::reject::CustomBadRequest; type E = MainnetEthSpec; /* + * TODO(fulu): write PeerDAS equivalent tests for these. + * * We have the following test cases, which are duplicated for the blinded variant of the route: * * - `broadcast_validation=gossip` @@ -1375,7 +1377,7 @@ pub async fn block_seen_on_gossip_without_blobs() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1437,7 +1439,7 @@ pub async fn block_seen_on_gossip_with_some_blobs() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1464,8 +1466,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { blobs.0.len() ); - let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; - let partial_blobs = vec![blobs.1.first().unwrap().clone()]; + let partial_kzg_proofs = [*blobs.0.first().unwrap()]; + let partial_blobs = [blobs.1.first().unwrap().clone()]; // Simulate the block being seen on gossip. block @@ -1474,21 +1476,15 @@ pub async fn block_seen_on_gossip_with_some_blobs() { .unwrap(); // Simulate some of the blobs being seen on gossip. - for (i, (kzg_proof, blob)) in partial_kzg_proofs - .into_iter() - .zip(partial_blobs) - .enumerate() - { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block, + partial_blobs.iter(), + partial_kzg_proofs.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because all blobs have not been seen. assert!(!tester @@ -1523,7 +1519,7 @@ pub async fn blobs_seen_on_gossip_without_block() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1546,22 +1542,15 @@ pub async fn blobs_seen_on_gossip_without_block() { let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); // Simulate the blobs being seen on gossip. - for (i, (kzg_proof, blob)) in kzg_proofs - .clone() - .into_iter() - .zip(blobs.clone()) - .enumerate() - { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block, + blobs.iter(), + kzg_proofs.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because the block has not been seen. assert!(!tester @@ -1596,7 +1585,7 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1620,22 +1609,15 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { assert!(!blobs.is_empty()); // Simulate the blobs being seen on gossip. - for (i, (kzg_proof, blob)) in kzg_proofs - .clone() - .into_iter() - .zip(blobs.clone()) - .enumerate() - { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block, + blobs.iter(), + kzg_proofs.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because the block has not been seen. assert!(!tester @@ -1672,7 +1654,7 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1697,17 +1679,15 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { let (kzg_proofs_b, blobs_b) = blobs_b.expect("should have some blobs"); // Simulate the blobs of block B being seen on gossip. - for (i, (kzg_proof, blob)) in kzg_proofs_b.into_iter().zip(blobs_b).enumerate() { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block_b, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block_b, + blobs_b.iter(), + kzg_proofs_b.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because block B has not been seen. assert!(!tester @@ -1742,7 +1722,7 @@ pub async fn duplicate_block_status_code() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let duplicate_block_status_code = StatusCode::IM_A_TEAPOT; let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec), @@ -1804,3 +1784,13 @@ fn assert_server_message_error(error_response: eth2::Error, expected_message: St }; assert_eq!(err.message, expected_message); } + +fn get_custody_columns(tester: &InteractiveTester) -> HashSet { + tester + .ctx + .network_globals + .as_ref() + .unwrap() + .sampling_columns + .clone() +} diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 062a119e0d..8067711954 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -339,9 +339,9 @@ mod test { type E = MainnetEthSpec; - fn make_eip7594_spec() -> ChainSpec { + fn make_fulu_spec() -> ChainSpec { let mut spec = E::default_spec(); - spec.eip7594_fork_epoch = Some(Epoch::new(10)); + spec.fulu_fork_epoch = Some(Epoch::new(10)); spec } @@ -359,7 +359,7 @@ mod test { subscribe_all_data_column_subnets: false, ..NetworkConfig::default() }; - let spec = make_eip7594_spec(); + let spec = make_fulu_spec(); let enr = build_enr_with_config(config, &spec).0; @@ -375,7 +375,7 @@ mod test { subscribe_all_data_column_subnets: true, ..NetworkConfig::default() }; - let spec = make_eip7594_spec(); + let spec = make_fulu_spec(); let enr = build_enr_with_config(config, &spec).0; assert_eq!( diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 6a70eef9bd..2bf35b0e35 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -485,17 +485,9 @@ fn context_bytes( RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => { return fork_context.to_context_bytes(ForkName::Deneb); } - RpcSuccessResponse::DataColumnsByRoot(d) - | RpcSuccessResponse::DataColumnsByRange(d) => { - // TODO(das): Remove deneb fork after `peerdas-devnet-2`. - return if matches!( - fork_context.spec.fork_name_at_slot::(d.slot()), - ForkName::Deneb - ) { - fork_context.to_context_bytes(ForkName::Deneb) - } else { - fork_context.to_context_bytes(ForkName::Electra) - }; + RpcSuccessResponse::DataColumnsByRoot(_) + | RpcSuccessResponse::DataColumnsByRange(_) => { + return fork_context.to_context_bytes(ForkName::Fulu); } RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => { return lc_bootstrap @@ -730,10 +722,7 @@ fn handle_rpc_response( }, SupportedProtocol::DataColumnsByRootV1 => match fork_name { Some(fork_name) => { - // TODO(das): PeerDAS is currently supported for both deneb and electra. This check - // does not advertise the topic on deneb, simply allows it to decode it. Advertise - // logic is in `SupportedTopic::currently_supported`. - if fork_name.deneb_enabled() { + if fork_name.fulu_enabled() { Ok(Some(RpcSuccessResponse::DataColumnsByRoot(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) @@ -754,7 +743,7 @@ fn handle_rpc_response( }, SupportedProtocol::DataColumnsByRangeV1 => match fork_name { Some(fork_name) => { - if fork_name.deneb_enabled() { + if fork_name.fulu_enabled() { Ok(Some(RpcSuccessResponse::DataColumnsByRange(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) @@ -945,9 +934,10 @@ mod tests { use crate::rpc::protocol::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ - blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, - BeaconBlockBellatrix, DataColumnIdentifier, EmptyBlock, Epoch, FixedBytesExtended, - FullPayload, Signature, Slot, + blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, BeaconBlock, BeaconBlockAltair, + BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, DataColumnIdentifier, EmptyBlock, + Epoch, FixedBytesExtended, FullPayload, KzgCommitment, KzgProof, Signature, + SignedBeaconBlockHeader, Slot, }; type Spec = types::MainnetEthSpec; @@ -998,7 +988,17 @@ mod tests { } fn empty_data_column_sidecar() -> Arc> { - Arc::new(DataColumnSidecar::empty()) + Arc::new(DataColumnSidecar { + index: 0, + column: VariableList::new(vec![Cell::::default()]).unwrap(), + kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty()]).unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: Default::default(), + }) } /// Bellatrix block with length < max_rpc_size. diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 80f15c9445..eac7d67490 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -554,9 +554,11 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), - Protocol::DataColumnsByRoot => rpc_data_column_limits::(fork_context.current_fork()), + Protocol::DataColumnsByRoot => { + rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) + } Protocol::DataColumnsByRange => { - rpc_data_column_limits::(fork_context.current_fork()) + rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), @@ -637,13 +639,10 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -// TODO(das): fix hardcoded max here -pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { +pub fn rpc_data_column_limits(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits { RpcLimits::new( - DataColumnSidecar::::empty().as_ssz_bytes().len(), - DataColumnSidecar::::max_size( - E::default_spec().max_blobs_per_block_by_fork(fork_name) as usize - ), + DataColumnSidecar::::min_size(), + DataColumnSidecar::::max_size(spec.max_blobs_per_block_by_fork(fork_name) as usize), ) } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 8cce9a0f25..c9e84e2dd1 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -223,7 +223,7 @@ mod test { fn test_sampling_subnets() { let log = logging::test_logger(); let mut spec = E::default_spec(); - spec.eip7594_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(0)); let custody_group_count = spec.number_of_custody_groups / 2; let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); @@ -247,7 +247,7 @@ mod test { fn test_sampling_columns() { let log = logging::test_logger(); let mut spec = E::default_spec(); - spec.eip7594_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(0)); let custody_group_count = spec.number_of_custody_groups / 2; let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 1e1f3efa18..c199d2312b 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -283,27 +283,15 @@ impl PubsubMessage { } GossipKind::DataColumnSidecar(subnet_id) => { match fork_context.from_context_bytes(gossip_topic.fork_digest) { - // TODO(das): Remove Deneb fork - Some(fork) if fork.deneb_enabled() => { + Some(fork) if fork.fulu_enabled() => { let col_sidecar = Arc::new( DataColumnSidecar::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ); - let peer_das_enabled = - fork_context.spec.is_peer_das_enabled_for_epoch( - col_sidecar.slot().epoch(E::slots_per_epoch()), - ); - if peer_das_enabled { - Ok(PubsubMessage::DataColumnSidecar(Box::new(( - *subnet_id, - col_sidecar, - )))) - } else { - Err(format!( - "data_column_sidecar topic invalid for given fork digest {:?}", - gossip_topic.fork_digest - )) - } + Ok(PubsubMessage::DataColumnSidecar(Box::new(( + *subnet_id, + col_sidecar, + )))) } Some(_) | None => Err(format!( "data_column_sidecar topic invalid for given fork digest {:?}", diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 4a3fb28e10..8d07ef1a12 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -613,6 +613,11 @@ impl NetworkBeaconProcessor { blocks: Vec>, ) -> Result<(), Error> { let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); + debug!(self.log, "Batch sending for process"; + "blocks" => blocks.len(), + "id" => ?process_id, + ); + let processor = self.clone(); let process_fn = async move { let notify_execution_layer = if processor diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 8238fa146d..8415ece638 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -15,7 +15,7 @@ use beacon_chain::test_utils::{ use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::discovery::ConnectionId; -use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ discv5::enr::{self, CombinedKey}, @@ -198,11 +198,21 @@ impl TestRig { let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); // Default metadata - let meta_data = MetaData::V2(MetaDataV2 { - seq_number: SEQ_NUMBER, - attnets: EnrAttestationBitfield::::default(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }); + let meta_data = if spec.is_peer_das_scheduled() { + MetaData::V3(MetaDataV3 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + custody_group_count: spec.custody_requirement, + }) + } else { + MetaData::V2(MetaDataV2 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + }; + let enr_key = CombinedKey::generate_secp256k1(); let enr = enr::Enr::builder().build(&enr_key).unwrap(); let network_config = Arc::new(NetworkConfig::default()); @@ -342,6 +352,7 @@ impl TestRig { ) .unwrap(); } + pub fn enqueue_single_lookup_rpc_blobs(&self) { if let Some(blobs) = self.next_blobs.clone() { let blobs = FixedBlobSidecarList::new(blobs.into_iter().map(Some).collect::>()); @@ -350,7 +361,7 @@ impl TestRig { self.next_block.canonical_root(), blobs, std::time::Duration::default(), - BlockProcessType::SingleBlock { id: 1 }, + BlockProcessType::SingleBlob { id: 1 }, ) .unwrap(); } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ab654ddf77..49f73bf9c8 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -751,11 +751,6 @@ impl NetworkService { } } - // TODO(das): This is added here for the purpose of testing, *without* having to - // activate Electra. This should happen as part of the Electra upgrade and we should - // move the subscription logic once it's ready to rebase PeerDAS on Electra, or if - // we decide to activate via the soft fork route: - // https://github.com/sigp/lighthouse/pull/5899 if self.fork_context.spec.is_peer_das_scheduled() { self.subscribe_to_peer_das_topics(&mut subscribed_topics); } @@ -806,32 +801,32 @@ impl NetworkService { } } + /// Keeping these separate from core topics because it has custom logic: + /// 1. Data column subscription logic depends on subscription configuration. + /// 2. Data column topic subscriptions will be dynamic based on validator balances due to + /// validator custody. + /// + /// TODO(das): The downside with not including it in core fork topic is - we subscribe to + /// PeerDAS topics on startup if Fulu is scheduled, rather than waiting until the fork. + /// If this is an issue we could potentially consider adding the logic to + /// `network.subscribe_new_fork_topics()`. fn subscribe_to_peer_das_topics(&mut self, subscribed_topics: &mut Vec) { - if self.subscribe_all_data_column_subnets { - for column_subnet in 0..self.fork_context.spec.data_column_sidecar_subnet_count { - for fork_digest in self.required_gossip_fork_digests() { - let gossip_kind = - Subnet::DataColumn(DataColumnSubnetId::new(column_subnet)).into(); - let topic = - GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); - if self.libp2p.subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } + let column_subnets_to_subscribe = if self.subscribe_all_data_column_subnets { + &(0..self.fork_context.spec.data_column_sidecar_subnet_count) + .map(DataColumnSubnetId::new) + .collect() } else { - for column_subnet in &self.network_globals.sampling_subnets { - for fork_digest in self.required_gossip_fork_digests() { - let gossip_kind = Subnet::DataColumn(*column_subnet).into(); - let topic = - GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); - if self.libp2p.subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); - } + &self.network_globals.sampling_subnets + }; + + for column_subnet in column_subnets_to_subscribe.iter() { + for fork_digest in self.required_gossip_fork_digests() { + let gossip_kind = Subnet::DataColumn(*column_subnet).into(); + let topic = GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); + if self.libp2p.subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); } } } diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 7a234eaef0..70a3fe4f5a 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -321,7 +321,7 @@ mod tests { let blocks = (0..4) .map(|_| { generate_rand_block_and_data_columns::( - ForkName::Deneb, + ForkName::Fulu, NumBlobs::Number(1), &mut rng, &spec, @@ -384,7 +384,7 @@ mod tests { let blocks = (0..4) .map(|_| { generate_rand_block_and_data_columns::( - ForkName::Deneb, + ForkName::Fulu, NumBlobs::Number(1), &mut rng, &spec, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index e21041192d..4135f901b1 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -373,6 +373,7 @@ impl SyncNetworkContext { "count" => request.count(), "epoch" => epoch, "peer" => %peer_id, + "id" => id, ); let rpc_request = match request { BlocksByRangeRequest::V1(ref req) => { @@ -442,6 +443,7 @@ impl SyncNetworkContext { "epoch" => epoch, "columns" => ?columns_by_range_request.columns, "peer" => %peer_id, + "id" => id, ); self.send_network_msg(NetworkMessage::SendRequest { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 341fe8667c..9ab581950c 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -43,8 +43,8 @@ use types::ForkContext; use types::{ data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, - BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ForkName, - Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, + BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, EthSpec, ForkName, Hash256, + MinimalEthSpec as E, SignedBeaconBlock, Slot, }; const D: Duration = Duration::new(0, 0); @@ -54,12 +54,8 @@ const SAMPLING_REQUIRED_SUCCESSES: usize = 2; type DCByRootIds = Vec; type DCByRootId = (SyncRequestId, Vec); -struct TestRigConfig { - peer_das_enabled: bool, -} - impl TestRig { - fn test_setup_with_config(config: Option) -> Self { + pub fn test_setup() -> Self { let logger_type = if cfg!(feature = "test_logger") { LoggerType::Test } else if cfg!(feature = "ci_logger") { @@ -70,13 +66,7 @@ impl TestRig { let log = build_log(slog::Level::Trace, logger_type); // Use `fork_from_env` logic to set correct fork epochs - let mut spec = test_spec::(); - - if let Some(config) = config { - if config.peer_das_enabled { - spec.eip7594_fork_epoch = Some(Epoch::new(0)); - } - } + let spec = test_spec::(); // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) @@ -155,24 +145,18 @@ impl TestRig { } } - pub fn test_setup() -> Self { - Self::test_setup_with_config(None) - } - - fn test_setup_after_deneb() -> Option { + fn test_setup_after_deneb_before_fulu() -> Option { let r = Self::test_setup(); - if r.after_deneb() { + if r.after_deneb() && !r.fork_name.fulu_enabled() { Some(r) } else { None } } - fn test_setup_after_peerdas() -> Option { - let r = Self::test_setup_with_config(Some(TestRigConfig { - peer_das_enabled: true, - })); - if r.after_deneb() { + fn test_setup_after_fulu() -> Option { + let r = Self::test_setup(); + if r.fork_name.fulu_enabled() { Some(r) } else { None @@ -187,6 +171,10 @@ impl TestRig { self.fork_name.deneb_enabled() } + pub fn after_fulu(&self) -> bool { + self.fork_name.fulu_enabled() + } + fn trigger_unknown_parent_block(&mut self, peer_id: PeerId, block: Arc>) { let block_root = block.canonical_root(); self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)) @@ -387,7 +375,7 @@ impl TestRig { .__add_connected_peer_testing_only(false, &self.harness.spec) } - fn new_connected_supernode_peer(&mut self) -> PeerId { + pub fn new_connected_supernode_peer(&mut self) -> PeerId { self.network_globals .peers .write() @@ -1945,7 +1933,7 @@ fn test_same_chain_race_condition() { #[test] fn block_in_da_checker_skips_download() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -1963,7 +1951,7 @@ fn block_in_da_checker_skips_download() { #[test] fn block_in_processing_cache_becomes_invalid() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -1989,7 +1977,7 @@ fn block_in_processing_cache_becomes_invalid() { #[test] fn block_in_processing_cache_becomes_valid_imported() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -2014,7 +2002,7 @@ fn block_in_processing_cache_becomes_valid_imported() { #[ignore] #[test] fn blobs_in_da_checker_skip_download() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -2033,7 +2021,7 @@ fn blobs_in_da_checker_skip_download() { #[test] fn sampling_happy_path() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; r.new_connected_peers_for_peerdas(); @@ -2050,7 +2038,7 @@ fn sampling_happy_path() { #[test] fn sampling_with_retries() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; r.new_connected_peers_for_peerdas(); @@ -2072,7 +2060,7 @@ fn sampling_with_retries() { #[test] fn sampling_avoid_retrying_same_peer() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let peer_id_1 = r.new_connected_supernode_peer(); @@ -2093,7 +2081,7 @@ fn sampling_avoid_retrying_same_peer() { #[test] fn sampling_batch_requests() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let _supernode = r.new_connected_supernode_peer(); @@ -2119,7 +2107,7 @@ fn sampling_batch_requests() { #[test] fn sampling_batch_requests_not_enough_responses_returned() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let _supernode = r.new_connected_supernode_peer(); @@ -2164,7 +2152,7 @@ fn sampling_batch_requests_not_enough_responses_returned() { #[test] fn custody_lookup_happy_path() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let spec = E::default_spec(); @@ -2238,7 +2226,7 @@ mod deneb_only { impl DenebTester { fn new(request_trigger: RequestTrigger) -> Option { - let Some(mut rig) = TestRig::test_setup_after_deneb() else { + let Some(mut rig) = TestRig::test_setup_after_deneb_before_fulu() else { return None; }; let (block, blobs) = rig.rand_block_and_blobs(NumBlobs::Random); @@ -2963,7 +2951,7 @@ mod deneb_only { #[ignore] #[test] fn no_peer_penalty_when_rpc_response_already_known_from_gossip() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(2)); diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 05d5e4a414..cfd89f7b44 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -3,8 +3,13 @@ use crate::status::ToStatusMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; use crate::sync::range_sync::RangeSyncType; use crate::sync::SyncMessage; +use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use beacon_chain::{block_verification_types::RpcBlock, EngineState, NotifyExecutionLayer}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, + OldBlocksByRangeRequestV2, +}; use lighthouse_network::rpc::{RequestType, StatusMessage}; use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; use lighthouse_network::{PeerId, SyncInfo}; @@ -16,6 +21,47 @@ use types::{ const D: Duration = Duration::new(0, 0); +pub(crate) enum DataSidecars { + Blobs(BlobSidecarList), + DataColumns(Vec>), +} + +enum ByRangeDataRequestIds { + PreDeneb, + PrePeerDAS(Id, PeerId), + PostPeerDAS(Vec<(Id, PeerId)>), +} + +/// Sync tests are usually written in the form: +/// - Do some action +/// - Expect a request to be sent +/// - Complete the above request +/// +/// To make writting tests succint, the machinery in this testing rig automatically identifies +/// _which_ request to complete. Picking the right request is critical for tests to pass, so this +/// filter allows better expressivity on the criteria to identify the right request. +#[derive(Default)] +struct RequestFilter { + peer: Option, + epoch: Option, +} + +impl RequestFilter { + fn peer(mut self, peer: PeerId) -> Self { + self.peer = Some(peer); + self + } + + fn epoch(mut self, epoch: u64) -> Self { + self.epoch = Some(epoch); + self + } +} + +fn filter() -> RequestFilter { + RequestFilter::default() +} + impl TestRig { /// Produce a head peer with an advanced head fn add_head_peer(&mut self) -> PeerId { @@ -67,7 +113,9 @@ impl TestRig { fn add_peer(&mut self, remote_info: SyncInfo) -> PeerId { // Create valid peer known to network globals - let peer_id = self.new_connected_peer(); + // TODO(fulu): Using supernode peers to ensure we have peer across all column + // subnets for syncing. Should add tests connecting to full node peers. + let peer_id = self.new_connected_supernode_peer(); // Send peer to sync self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info.clone())); peer_id @@ -86,11 +134,13 @@ impl TestRig { } #[track_caller] - fn expect_chain_segment(&mut self) { - self.pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expect ChainSegment work event: {e:?}")); + fn expect_chain_segments(&mut self, count: usize) { + for i in 0..count { + self.pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expect ChainSegment work event count {i}: {e:?}")); + } } fn update_execution_engine_state(&mut self, state: EngineState) { @@ -98,39 +148,80 @@ impl TestRig { self.sync_manager.update_execution_engine_state(state); } - fn find_blocks_by_range_request(&mut self, target_peer_id: &PeerId) -> (Id, Option) { + fn find_blocks_by_range_request( + &mut self, + request_filter: RequestFilter, + ) -> ((Id, PeerId), ByRangeDataRequestIds) { + let filter_f = |peer: PeerId, start_slot: u64| { + if let Some(expected_epoch) = request_filter.epoch { + let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); + if epoch != expected_epoch { + return false; + } + } + if let Some(expected_peer) = request_filter.peer { + if peer != expected_peer { + return false; + } + } + true + }; + let block_req_id = self .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id, - request: RequestType::BlocksByRange(_), + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - } if peer_id == target_peer_id => Some(*id), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) .expect("Should have a blocks by range request"); - let blob_req_id = if self.after_deneb() { - Some( - self.pop_received_network_event(|ev| match ev { + let by_range_data_requests = if self.after_fulu() { + let mut data_columns_requests = vec![]; + while let Ok(data_columns_request) = self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: + RequestType::DataColumnsByRange(DataColumnsByRangeRequest { + start_slot, .. + }), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), + _ => None, + }) { + data_columns_requests.push(data_columns_request); + } + if data_columns_requests.is_empty() { + panic!("Found zero DataColumnsByRange requests"); + } + ByRangeDataRequestIds::PostPeerDAS(data_columns_requests) + } else if self.after_deneb() { + let (id, peer) = self + .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id, - request: RequestType::BlobsByRange(_), + request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot, .. }), request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - } if peer_id == target_peer_id => Some(*id), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) - .expect("Should have a blobs by range request"), - ) + .expect("Should have a blobs by range request"); + ByRangeDataRequestIds::PrePeerDAS(id, peer) } else { - None + ByRangeDataRequestIds::PreDeneb }; - (block_req_id, blob_req_id) + (block_req_id, by_range_data_requests) } - fn find_and_complete_blocks_by_range_request(&mut self, target_peer_id: PeerId) { - let (blocks_req_id, blobs_req_id) = self.find_blocks_by_range_request(&target_peer_id); + fn find_and_complete_blocks_by_range_request(&mut self, request_filter: RequestFilter) { + let ((blocks_req_id, block_peer), by_range_data_request_ids) = + self.find_blocks_by_range_request(request_filter); // Complete the request with a single stream termination self.log(&format!( @@ -138,28 +229,43 @@ impl TestRig { )); self.send_sync_message(SyncMessage::RpcBlock { request_id: SyncRequestId::RangeBlockAndBlobs { id: blocks_req_id }, - peer_id: target_peer_id, + peer_id: block_peer, beacon_block: None, seen_timestamp: D, }); - if let Some(blobs_req_id) = blobs_req_id { - // Complete the request with a single stream termination - self.log(&format!( - "Completing BlobsByRange request {blobs_req_id} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::RangeBlockAndBlobs { id: blobs_req_id }, - peer_id: target_peer_id, - blob_sidecar: None, - seen_timestamp: D, - }); + match by_range_data_request_ids { + ByRangeDataRequestIds::PreDeneb => {} + ByRangeDataRequestIds::PrePeerDAS(id, peer_id) => { + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlobsByRange request {id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlob { + request_id: SyncRequestId::RangeBlockAndBlobs { id }, + peer_id, + blob_sidecar: None, + seen_timestamp: D, + }); + } + ByRangeDataRequestIds::PostPeerDAS(data_column_req_ids) => { + // Complete the request with a single stream termination + for (id, peer_id) in data_column_req_ids { + self.log(&format!( + "Completing DataColumnsByRange request {id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcDataColumn { + request_id: SyncRequestId::RangeBlockAndBlobs { id }, + peer_id, + data_column: None, + seen_timestamp: D, + }); + } + } } } - async fn create_canonical_block( - &mut self, - ) -> (SignedBeaconBlock, Option>) { + async fn create_canonical_block(&mut self) -> (SignedBeaconBlock, Option>) { self.harness.advance_slot(); let block_root = self @@ -170,20 +276,38 @@ impl TestRig { AttestationStrategy::AllValidators, ) .await; - // TODO(das): this does not handle data columns yet + let store = &self.harness.chain.store; let block = store.get_full_block(&block_root).unwrap().unwrap(); - let blobs = if block.fork_name_unchecked().deneb_enabled() { - store.get_blobs(&block_root).unwrap().blobs() + let fork = block.fork_name_unchecked(); + + let data_sidecars = if fork.fulu_enabled() { + store + .get_data_columns(&block_root) + .unwrap() + .map(|columns| { + columns + .into_iter() + .map(CustodyDataColumn::from_asserted_custody) + .collect() + }) + .map(DataSidecars::DataColumns) + } else if fork.deneb_enabled() { + store + .get_blobs(&block_root) + .unwrap() + .blobs() + .map(DataSidecars::Blobs) } else { None }; - (block, blobs) + + (block, data_sidecars) } async fn remember_block( &mut self, - (block, blob_sidecars): (SignedBeaconBlock, Option>), + (block, data_sidecars): (SignedBeaconBlock, Option>), ) { // This code is kind of duplicated from Harness::process_block, but takes sidecars directly. let block_root = block.canonical_root(); @@ -193,7 +317,7 @@ impl TestRig { .chain .process_block( block_root, - RpcBlock::new(Some(block_root), block.into(), blob_sidecars).unwrap(), + build_rpc_block(block.into(), &data_sidecars, &self.spec), NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), @@ -206,6 +330,22 @@ impl TestRig { } } +fn build_rpc_block( + block: Arc>, + data_sidecars: &Option>, + spec: &ChainSpec, +) -> RpcBlock { + match data_sidecars { + Some(DataSidecars::Blobs(blobs)) => { + RpcBlock::new(None, block, Some(blobs.clone())).unwrap() + } + Some(DataSidecars::DataColumns(columns)) => { + RpcBlock::new_with_custody_columns(None, block, columns.clone(), spec).unwrap() + } + None => RpcBlock::new_without_blobs(None, block), + } +} + #[test] fn head_chain_removed_while_finalized_syncing() { // NOTE: this is a regression test. @@ -217,14 +357,14 @@ fn head_chain_removed_while_finalized_syncing() { rig.assert_state(RangeSyncType::Head); // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(&head_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); // Now get a peer with an advanced finalized epoch. let finalized_peer = rig.add_finalized_peer(); rig.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(&finalized_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); // Fail the head chain by disconnecting the peer. rig.peer_disconnected(head_peer); @@ -251,14 +391,14 @@ async fn state_update_while_purging() { rig.assert_state(RangeSyncType::Head); // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(&head_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); // Now get a peer with an advanced finalized epoch. let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); rig.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(&finalized_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); // Now the chain knows both chains target roots. rig.remember_block(head_peer_block).await; @@ -277,15 +417,18 @@ fn pause_and_resume_on_ee_offline() { // make the ee offline rig.update_execution_engine_state(EngineState::Offline); // send the response to the request - rig.find_and_complete_blocks_by_range_request(peer1); + rig.find_and_complete_blocks_by_range_request(filter().peer(peer1).epoch(0)); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); // while the ee is offline, more peers might arrive. Add a new finalized peer. - let peer2 = rig.add_finalized_peer(); + let _peer2 = rig.add_finalized_peer(); // send the response to the request - rig.find_and_complete_blocks_by_range_request(peer2); + // Don't filter requests and the columns requests may be sent to peer1 or peer2 + // We need to filter by epoch, because the previous batch eagerly sent requests for the next + // epoch for the other batch. So we can either filter by epoch of by sync type. + rig.find_and_complete_blocks_by_range_request(filter().epoch(0)); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); // make the beacon processor available again. @@ -293,6 +436,6 @@ fn pause_and_resume_on_ee_offline() { // now resume range, we should have two processing requests in the beacon processor. rig.update_execution_engine_state(EngineState::Online); - rig.expect_chain_segment(); - rig.expect_chain_segment(); + // The head chain and finalized chain (2) should be in the processing queue + rig.expect_chain_segments(2); } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 45b1983492..02014a05a3 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -119,6 +119,11 @@ impl BlockCache { pub fn get_blobs<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a BlobSidecarList> { self.blob_cache.get(block_root) } + pub fn get_data_columns(&mut self, block_root: &Hash256) -> Option> { + self.data_column_cache + .get(block_root) + .map(|map| map.values().cloned().collect::>()) + } pub fn get_data_column<'a>( &'a mut self, block_root: &Hash256, @@ -322,16 +327,15 @@ impl HotColdDB, BeaconNodeBackend> { db.compare_and_set_blob_info_with_write(<_>::default(), new_blob_info.clone())?; let data_column_info = db.load_data_column_info()?; - let eip7594_fork_slot = db + let fulu_fork_slot = db .spec - .eip7594_fork_epoch + .fulu_fork_epoch .map(|epoch| epoch.start_slot(E::slots_per_epoch())); let new_data_column_info = match &data_column_info { Some(data_column_info) => { // Set the oldest data column slot to the fork slot if it is not yet set. - let oldest_data_column_slot = data_column_info - .oldest_data_column_slot - .or(eip7594_fork_slot); + let oldest_data_column_slot = + data_column_info.oldest_data_column_slot.or(fulu_fork_slot); DataColumnInfo { oldest_data_column_slot, } @@ -339,7 +343,7 @@ impl HotColdDB, BeaconNodeBackend> { // First start. None => DataColumnInfo { // Set the oldest data column slot to the fork slot if it is not yet set. - oldest_data_column_slot: eip7594_fork_slot, + oldest_data_column_slot: fulu_fork_slot, }, }; db.compare_and_set_data_column_info_with_write( @@ -2037,6 +2041,40 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Fetch columns for a given block from the store. + pub fn get_data_columns( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + if let Some(columns) = self.block_cache.lock().get_data_columns(block_root) { + metrics::inc_counter(&metrics::BEACON_DATA_COLUMNS_CACHE_HIT_COUNT); + return Ok(Some(columns)); + } + + let columns = self + .blobs_db + .iter_column_from::>(DBColumn::BeaconDataColumn, block_root.as_slice()) + .take_while(|res| { + res.as_ref() + .is_ok_and(|(key, _)| key.starts_with(block_root.as_slice())) + }) + .map(|result| { + let (_key, value) = result?; + let column = DataColumnSidecar::::from_ssz_bytes(&value).map(Arc::new)?; + self.block_cache + .lock() + .put_data_column(*block_root, column.clone()); + Ok(column) + }) + .collect::, Error>>()?; + + if columns.is_empty() { + Ok(None) + } else { + Ok(Some(columns)) + } + } + /// Fetch blobs for a given block from the store. pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { // Check the cache. @@ -2079,13 +2117,8 @@ impl, Cold: ItemStore> HotColdDB self.blobs_db .iter_column_from::>(DBColumn::BeaconDataColumn, block_root.as_slice()) .take_while(|res| { - let Ok((key, _)) = res else { return false }; - - if !key.starts_with(block_root.as_slice()) { - return false; - } - - true + res.as_ref() + .is_ok_and(|(key, _)| key.starts_with(block_root.as_slice())) }) .map(|key| key.and_then(|(key, _)| parse_data_column_key(key).map(|key| key.1))) .collect() @@ -2282,7 +2315,7 @@ impl, Cold: ItemStore> HotColdDB /// Initialize the `DataColumnInfo` when starting from genesis or a checkpoint. pub fn init_data_column_info(&self, anchor_slot: Slot) -> Result { - let oldest_data_column_slot = self.spec.eip7594_fork_epoch.map(|fork_epoch| { + let oldest_data_column_slot = self.spec.fulu_fork_epoch.map(|fork_epoch| { std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) }); let data_column_info = DataColumnInfo { diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 3f076a767a..1d70e105b9 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -225,10 +225,10 @@ impl StoreItem for BlobInfo { pub struct DataColumnInfo { /// The slot after which data columns are or *will be* available (>=). /// - /// If this slot is in the future, then it is the first slot of the EIP-7594 fork, from which + /// If this slot is in the future, then it is the first slot of the Fulu fork, from which /// data columns will be available. /// - /// If the `oldest_data_column_slot` is `None` then this means that the EIP-7594 fork epoch is + /// If the `oldest_data_column_slot` is `None` then this means that the Fulu fork epoch is /// not yet known. pub oldest_data_column_slot: Option, } diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index f92de4225d..74fe727867 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -53,8 +53,6 @@ ELECTRA_FORK_EPOCH: 18446744073709551615 # Fulu FULU_FORK_VERSION: 0x06000000 FULU_FORK_EPOCH: 18446744073709551615 -# PeerDAS -EIP7594_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 70b4b73d52..b224cde048 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -54,7 +54,7 @@ impl ForkChoiceTest { /// Creates a new tester with a custom chain config. pub fn new_with_chain_config(chain_config: ChainConfig) -> Self { // Run fork choice tests against the latest fork. - let spec = ForkName::latest().make_genesis_spec(ChainSpec::default()); + let spec = ForkName::latest_stable().make_genesis_spec(ChainSpec::default()); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.into()) .chain_config(chain_config) diff --git a/consensus/types/presets/gnosis/deneb.yaml b/consensus/types/presets/gnosis/deneb.yaml index 9a46a6dafe..d25c4d3d38 100644 --- a/consensus/types/presets/gnosis/deneb.yaml +++ b/consensus/types/presets/gnosis/deneb.yaml @@ -1,6 +1,4 @@ # Gnosis preset - Deneb -# NOTE: The below are PLACEHOLDER values from Mainnet. -# Gnosis preset for the Deneb fork TBD: https://github.com/gnosischain/configs/tree/main/presets/gnosis # Misc # --------------------------------------------------------------- diff --git a/consensus/types/presets/gnosis/eip7594.yaml b/consensus/types/presets/gnosis/eip7594.yaml deleted file mode 100644 index 813febf26d..0000000000 --- a/consensus/types/presets/gnosis/eip7594.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Mainnet preset - EIP7594 - -# Misc -# --------------------------------------------------------------- -# `uint64(2**6)` (= 64) -FIELD_ELEMENTS_PER_CELL: 64 -# `uint64(2 * 4096)` (= 8192) -FIELD_ELEMENTS_PER_EXT_BLOB: 8192 -# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) -KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/gnosis/fulu.yaml b/consensus/types/presets/gnosis/fulu.yaml index 35a7c98fbf..e5f3ce0212 100644 --- a/consensus/types/presets/gnosis/fulu.yaml +++ b/consensus/types/presets/gnosis/fulu.yaml @@ -1,3 +1,10 @@ # Gnosis preset - Fulu -FULU_PLACEHOLDER: 0 +# Misc +# --------------------------------------------------------------- +# `uint64(2**6)` (= 64) +FIELD_ELEMENTS_PER_CELL: 64 +# `uint64(2 * 4096)` (= 8192) +FIELD_ELEMENTS_PER_EXT_BLOB: 8192 +# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) +KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/mainnet/eip7594.yaml b/consensus/types/presets/mainnet/eip7594.yaml deleted file mode 100644 index 813febf26d..0000000000 --- a/consensus/types/presets/mainnet/eip7594.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Mainnet preset - EIP7594 - -# Misc -# --------------------------------------------------------------- -# `uint64(2**6)` (= 64) -FIELD_ELEMENTS_PER_CELL: 64 -# `uint64(2 * 4096)` (= 8192) -FIELD_ELEMENTS_PER_EXT_BLOB: 8192 -# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) -KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/mainnet/fulu.yaml b/consensus/types/presets/mainnet/fulu.yaml index 8aa9ccdcc3..394f335f90 100644 --- a/consensus/types/presets/mainnet/fulu.yaml +++ b/consensus/types/presets/mainnet/fulu.yaml @@ -1,3 +1,10 @@ # Mainnet preset - Fulu -FULU_PLACEHOLDER: 0 +# Misc +# --------------------------------------------------------------- +# `uint64(2**6)` (= 64) +FIELD_ELEMENTS_PER_CELL: 64 +# `uint64(2 * 4096)` (= 8192) +FIELD_ELEMENTS_PER_EXT_BLOB: 8192 +# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) +KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/minimal/eip7594.yaml b/consensus/types/presets/minimal/eip7594.yaml deleted file mode 100644 index 847719a421..0000000000 --- a/consensus/types/presets/minimal/eip7594.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Minimal preset - EIP7594 - -# Misc -# --------------------------------------------------------------- -# `uint64(2**6)` (= 64) -FIELD_ELEMENTS_PER_CELL: 64 -# `uint64(2 * 4096)` (= 8192) -FIELD_ELEMENTS_PER_EXT_BLOB: 8192 -# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) -KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/minimal/fulu.yaml b/consensus/types/presets/minimal/fulu.yaml index 121c9858f4..c961eb7f3c 100644 --- a/consensus/types/presets/minimal/fulu.yaml +++ b/consensus/types/presets/minimal/fulu.yaml @@ -1,3 +1,10 @@ # Minimal preset - Fulu -FULU_PLACEHOLDER: 0 +# Misc +# --------------------------------------------------------------- +# `uint64(2**6)` (= 64) +FIELD_ELEMENTS_PER_CELL: 64 +# `uint64(2 * 4096)` (= 8192) +FIELD_ELEMENTS_PER_EXT_BLOB: 8192 +# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) +KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 91d64f5c8e..230805e86c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -198,12 +198,6 @@ pub struct ChainSpec { pub fulu_fork_version: [u8; 4], /// The Fulu fork epoch is optional, with `None` representing "Fulu never happens". pub fulu_fork_epoch: Option, - pub fulu_placeholder: u64, - - /* - * DAS params - */ - pub eip7594_fork_epoch: Option, pub number_of_columns: u64, pub number_of_custody_groups: u64, pub data_column_sidecar_subnet_count: u64, @@ -440,16 +434,16 @@ impl ChainSpec { } } - /// Returns true if the given epoch is greater than or equal to the `EIP7594_FORK_EPOCH`. + /// Returns true if the given epoch is greater than or equal to the `FULU_FORK_EPOCH`. pub fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool { - self.eip7594_fork_epoch - .is_some_and(|eip7594_fork_epoch| block_epoch >= eip7594_fork_epoch) + self.fulu_fork_epoch + .is_some_and(|fulu_fork_epoch| block_epoch >= fulu_fork_epoch) } - /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + /// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. pub fn is_peer_das_scheduled(&self) -> bool { - self.eip7594_fork_epoch - .is_some_and(|eip7594_fork_epoch| eip7594_fork_epoch != self.far_future_epoch) + self.fulu_fork_epoch + .is_some_and(|fulu_fork_epoch| fulu_fork_epoch != self.far_future_epoch) } /// Returns a full `Fork` struct for a given epoch. @@ -916,17 +910,11 @@ impl ChainSpec { */ fulu_fork_version: [0x06, 0x00, 0x00, 0x00], fulu_fork_epoch: None, - fulu_placeholder: 0, - - /* - * DAS params - */ - eip7594_fork_epoch: None, - number_of_columns: 128, + custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, + number_of_columns: 128, samples_per_slot: 8, - custody_requirement: 4, /* * Network specific @@ -1045,8 +1033,6 @@ impl ChainSpec { // Fulu fulu_fork_version: [0x06, 0x00, 0x00, 0x01], fulu_fork_epoch: None, - // PeerDAS - eip7594_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1254,17 +1240,11 @@ impl ChainSpec { */ fulu_fork_version: [0x06, 0x00, 0x00, 0x64], fulu_fork_epoch: None, - fulu_placeholder: 0, - - /* - * DAS params - */ - eip7594_fork_epoch: None, - number_of_columns: 128, + custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, + number_of_columns: 128, samples_per_slot: 8, - custody_requirement: 4, /* * Network specific @@ -1408,11 +1388,6 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub fulu_fork_epoch: Option>, - #[serde(default)] - #[serde(serialize_with = "serialize_fork_epoch")] - #[serde(deserialize_with = "deserialize_fork_epoch")] - pub eip7594_fork_epoch: Option>, - #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -1855,10 +1830,6 @@ impl Config { .fulu_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), - eip7594_fork_epoch: spec - .eip7594_fork_epoch - .map(|epoch| MaybeQuoted { value: epoch }), - seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, min_validator_withdrawability_delay: spec.min_validator_withdrawability_delay, @@ -1945,7 +1916,6 @@ impl Config { electra_fork_version, fulu_fork_epoch, fulu_fork_version, - eip7594_fork_epoch, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -2015,7 +1985,6 @@ impl Config { electra_fork_version, fulu_fork_epoch: fulu_fork_epoch.map(|q| q.value), fulu_fork_version, - eip7594_fork_epoch: eip7594_fork_epoch.map(|q| q.value), seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index b2a050e9d5..90a914dfae 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -133,20 +133,6 @@ impl DataColumnSidecar { .len() } - pub fn empty() -> Self { - Self { - index: 0, - column: DataColumn::::default(), - kzg_commitments: VariableList::default(), - kzg_proofs: VariableList::default(), - signed_block_header: SignedBeaconBlockHeader { - message: BeaconBlockHeader::empty(), - signature: Signature::empty(), - }, - kzg_commitments_inclusion_proof: Default::default(), - } - } - pub fn id(&self) -> DataColumnIdentifier { DataColumnIdentifier { block_root: self.block_root(), diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index b61e0a4d4a..40557e0cb9 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -49,6 +49,13 @@ impl ForkName { *ForkName::list_all().last().unwrap() } + /// Returns the fork primarily used for testing purposes. + /// This fork serves as the baseline for many tests, and the goal + /// is to ensure features are passing on this fork. + pub fn latest_stable() -> ForkName { + ForkName::Electra + } + /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` /// is the only fork in effect from genesis. pub fn make_genesis_spec(&self, mut spec: ChainSpec) -> ChainSpec { diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 9a9915e458..707d2d4697 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -276,21 +276,6 @@ impl ElectraPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct FuluPreset { - #[serde(with = "serde_utils::quoted_u64")] - pub fulu_placeholder: u64, -} - -impl FuluPreset { - pub fn from_chain_spec(spec: &ChainSpec) -> Self { - Self { - fulu_placeholder: spec.fulu_placeholder, - } - } -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -#[serde(rename_all = "UPPERCASE")] -pub struct Eip7594Preset { #[serde(with = "serde_utils::quoted_u64")] pub field_elements_per_cell: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -299,7 +284,7 @@ pub struct Eip7594Preset { pub kzg_commitments_inclusion_proof_depth: u64, } -impl Eip7594Preset { +impl FuluPreset { pub fn from_chain_spec(_spec: &ChainSpec) -> Self { Self { field_elements_per_cell: E::field_elements_per_cell() as u64, @@ -357,9 +342,6 @@ mod test { let fulu: FuluPreset = preset_from_file(&preset_name, "fulu.yaml"); assert_eq!(fulu, FuluPreset::from_chain_spec::(&spec)); - - let eip7594: Eip7594Preset = preset_from_file(&preset_name, "eip7594.yaml"); - assert_eq!(eip7594, Eip7594Preset::from_chain_spec::(&spec)); } #[test] diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 348ed785af..2a5c6e47f5 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -21,6 +21,9 @@ pub use rust_eth_kzg::{ Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup, }; +// Note: `spec.number_of_columns` is a config and should match `CELLS_PER_EXT_BLOB` - however this +// is a constant in the KZG library - be aware that overriding `number_of_columns` will break KZG +// operations. pub type CellsAndKzgProofs = ([Cell; CELLS_PER_EXT_BLOB], [KzgProof; CELLS_PER_EXT_BLOB]); pub type KzgBlobRef<'a> = &'a [u8; BYTES_PER_BLOB]; diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml index ab2f07a24e..030aa2b820 100644 --- a/scripts/local_testnet/network_params_das.yaml +++ b/scripts/local_testnet/network_params_das.yaml @@ -3,6 +3,7 @@ participants: cl_image: lighthouse:local cl_extra_params: - --subscribe-all-data-column-subnets + - --subscribe-all-subnets - --target-peers=3 count: 2 - cl_type: lighthouse @@ -11,11 +12,14 @@ participants: - --target-peers=3 count: 2 network_params: - eip7594_fork_epoch: 0 + electra_fork_epoch: 1 + fulu_fork_epoch: 2 seconds_per_slot: 6 snooper_enabled: false global_log_level: debug additional_services: - dora - - goomy_blob + - spamoor_blob - prometheus_grafana +dora_params: + image: ethpandaops/dora:fulu-support \ No newline at end of file diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index bf9e5d6cfa..02a01555b4 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -49,11 +49,10 @@ excluded_paths = [ "bls12-381-tests/hash_to_G2", "tests/.*/eip6110", "tests/.*/whisk", - "tests/.*/eip7594", - # Fulu tests are not yet being run - "tests/.*/fulu", # TODO(electra): SingleAttestation tests are waiting on Eitan's PR - "tests/.*/electra/ssz_static/SingleAttestation" + "tests/.*/electra/ssz_static/SingleAttestation", + "tests/.*/fulu/ssz_static/SingleAttestation", + "tests/.*/fulu/ssz_static/MatrixEntry", ] diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 54a142a96b..4a202ee3d2 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -91,6 +91,9 @@ pub use transition::TransitionTest; /// to return `true` for the feature in order for the feature test vector to be tested. #[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { + // TODO(fulu): to be removed once we start using Fulu types for test vectors. + // Existing SSZ types for PeerDAS (Fulu) are the same as Electra, so the test vectors get + // loaded as Electra types (default serde behaviour for untagged enums). Fulu, } diff --git a/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs index 1d0bf951bc..8a6330d399 100644 --- a/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs +++ b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs @@ -21,12 +21,8 @@ impl LoadCase for ComputeColumnsForCustodyGroups { } impl Case for ComputeColumnsForCustodyGroups { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/get_custody_groups.rs b/testing/ef_tests/src/cases/get_custody_groups.rs index f8c4370aeb..1c1294305f 100644 --- a/testing/ef_tests/src/cases/get_custody_groups.rs +++ b/testing/ef_tests/src/cases/get_custody_groups.rs @@ -24,12 +24,8 @@ impl LoadCase for GetCustodyGroups { } impl Case for GetCustodyGroups { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs index 8df43bb267..6ab9a8db65 100644 --- a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs @@ -26,12 +26,8 @@ impl LoadCase for KZGComputeCellsAndKZGProofs { } impl Case for KZGComputeCellsAndKZGProofs { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs index 26ab4e96b5..732cb54f31 100644 --- a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs @@ -27,12 +27,8 @@ impl LoadCase for KZGRecoverCellsAndKZGProofs { } impl Case for KZGRecoverCellsAndKZGProofs { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs index fc625063b1..e3edc0df0a 100644 --- a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -29,12 +29,8 @@ impl LoadCase for KZGVerifyCellKZGProofBatch { } impl Case for KZGVerifyCellKZGProofBatch { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 6c0165efab..d1ddd6a48f 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -355,11 +355,14 @@ where } fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - // This ensures we only run the tests **once** for the feature, using the types matching the - // correct fork, e.g. `Fulu` uses SSZ types from `Electra` fork as of spec test version - // `v1.5.0-beta.0`, therefore the `Fulu` tests should get included when testing Electra types. + // TODO(fulu): to be removed once Fulu types start differing from Electra. We currently run Fulu tests as a + // "feature" - this means we use Electra types for Fulu SSZ tests (except for PeerDAS types, e.g. `DataColumnSidecar`). // - // e.g. Fulu test vectors are executed in the first line below, but excluded in the 2nd + // This ensures we only run the tests **once** for `Fulu`, using the types matching the + // correct fork, e.g. `Fulu` uses SSZ types from `Electra` as of spec test version + // `v1.5.0-beta.0`, therefore the `Fulu` tests should get included when testing Deneb types. + // + // e.g. Fulu test vectors are executed in the 2nd line below, but excluded in the 1st // line when testing the type `AttestationElectra`: // // ``` @@ -890,6 +893,10 @@ impl Handler for GetCustodyGroupsHandler { fn handler_name(&self) -> String { "get_custody_groups".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -910,6 +917,10 @@ impl Handler for ComputeColumnsForCustodyGroupHandler fn handler_name(&self) -> String { "compute_columns_for_custody_group".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -930,6 +941,10 @@ impl Handler for KZGComputeCellsAndKZGProofHandler { fn handler_name(&self) -> String { "compute_cells_and_kzg_proofs".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -950,6 +965,10 @@ impl Handler for KZGVerifyCellKZGProofBatchHandler { fn handler_name(&self) -> String { "verify_cell_kzg_proof_batch".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -970,6 +989,10 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { fn handler_name(&self) -> String { "recover_cells_and_kzg_proofs".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index c50032a63d..285ac951a6 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -54,6 +54,7 @@ type_name_generic!(BeaconBlockBodyBellatrix, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyDeneb, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyElectra, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyFulu, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(BlobIdentifier); @@ -74,12 +75,14 @@ type_name_generic!(ExecutionPayloadBellatrix, "ExecutionPayload"); type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); type_name_generic!(ExecutionPayloadDeneb, "ExecutionPayload"); type_name_generic!(ExecutionPayloadElectra, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadFulu, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); type_name_generic!(ExecutionPayloadHeaderBellatrix, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderElectra, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderFulu, "ExecutionPayloadHeader"); type_name_generic!(ExecutionRequests); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); @@ -93,6 +96,7 @@ type_name_generic!(LightClientBootstrapAltair, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapCapella, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapDeneb, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapElectra, "LightClientBootstrap"); +type_name_generic!(LightClientBootstrapFulu, "LightClientBootstrap"); type_name_generic!(LightClientFinalityUpdate); type_name_generic!(LightClientFinalityUpdateAltair, "LightClientFinalityUpdate"); type_name_generic!( @@ -104,11 +108,13 @@ type_name_generic!( LightClientFinalityUpdateElectra, "LightClientFinalityUpdate" ); +type_name_generic!(LightClientFinalityUpdateFulu, "LightClientFinalityUpdate"); type_name_generic!(LightClientHeader); type_name_generic!(LightClientHeaderAltair, "LightClientHeader"); type_name_generic!(LightClientHeaderCapella, "LightClientHeader"); type_name_generic!(LightClientHeaderDeneb, "LightClientHeader"); type_name_generic!(LightClientHeaderElectra, "LightClientHeader"); +type_name_generic!(LightClientHeaderFulu, "LightClientHeader"); type_name_generic!(LightClientOptimisticUpdate); type_name_generic!( LightClientOptimisticUpdateAltair, @@ -126,11 +132,16 @@ type_name_generic!( LightClientOptimisticUpdateElectra, "LightClientOptimisticUpdate" ); +type_name_generic!( + LightClientOptimisticUpdateFulu, + "LightClientOptimisticUpdate" +); type_name_generic!(LightClientUpdate); type_name_generic!(LightClientUpdateAltair, "LightClientUpdate"); type_name_generic!(LightClientUpdateCapella, "LightClientUpdate"); type_name_generic!(LightClientUpdateDeneb, "LightClientUpdate"); type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); +type_name_generic!(LightClientUpdateFulu, "LightClientUpdate"); type_name_generic!(PendingAttestation); type_name!(PendingConsolidation); type_name!(PendingPartialWithdrawal); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 61581128d4..bba7efde49 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -276,9 +276,9 @@ mod ssz_static { fn attestation() { SszStaticHandler::, MinimalEthSpec>::pre_electra().run(); SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); - SszStaticHandler::, MinimalEthSpec>::electra_only() + SszStaticHandler::, MinimalEthSpec>::electra_and_later() .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only() + SszStaticHandler::, MainnetEthSpec>::electra_and_later() .run(); } @@ -288,9 +288,9 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::pre_electra() .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only() + SszStaticHandler::, MinimalEthSpec>::electra_and_later() .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only() + SszStaticHandler::, MainnetEthSpec>::electra_and_later() .run(); } @@ -300,9 +300,9 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::pre_electra() .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only() + SszStaticHandler::, MinimalEthSpec>::electra_and_later() .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only() + SszStaticHandler::, MainnetEthSpec>::electra_and_later() .run(); } @@ -314,10 +314,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::pre_electra( ) .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only( + SszStaticHandler::, MinimalEthSpec>::electra_and_later( ) .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only( + SszStaticHandler::, MainnetEthSpec>::electra_and_later( ) .run(); } @@ -328,10 +328,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::pre_electra() .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only( + SszStaticHandler::, MinimalEthSpec>::electra_and_later( ) .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only( + SszStaticHandler::, MainnetEthSpec>::electra_and_later( ) .run(); } @@ -361,6 +361,8 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::electra_only() .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only().run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only().run(); } // Altair and later @@ -399,6 +401,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::electra_only() .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -430,6 +436,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::electra_only( ) .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } // LightClientOptimisticUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -445,6 +455,8 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); SszStaticHandler::, MinimalEthSpec>::electra_only().run(); SszStaticHandler::, MainnetEthSpec>::electra_only().run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only().run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only().run(); } // LightClientFinalityUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -480,6 +492,12 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::electra_only( ) .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only( + ) + .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -509,6 +527,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::electra_only( ) .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } #[test] @@ -566,6 +588,8 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::electra_only() .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only().run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only().run(); } #[test] @@ -586,6 +610,10 @@ mod ssz_static { ::electra_only().run(); SszStaticHandler::, MainnetEthSpec> ::electra_only().run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } #[test] @@ -626,17 +654,17 @@ mod ssz_static { #[test] fn data_column_sidecar() { - SszStaticHandler::, MinimalEthSpec>::deneb_only() + SszStaticHandler::, MinimalEthSpec>::default() .run_for_feature(FeatureName::Fulu); - SszStaticHandler::, MainnetEthSpec>::deneb_only() + SszStaticHandler::, MainnetEthSpec>::default() .run_for_feature(FeatureName::Fulu); } #[test] fn data_column_identifier() { - SszStaticHandler::::deneb_only() + SszStaticHandler::::default() .run_for_feature(FeatureName::Fulu); - SszStaticHandler::::deneb_only() + SszStaticHandler::::default() .run_for_feature(FeatureName::Fulu); } @@ -901,20 +929,17 @@ fn kzg_verify_kzg_proof() { #[test] fn kzg_compute_cells_and_proofs() { - KZGComputeCellsAndKZGProofHandler::::default() - .run_for_feature(FeatureName::Fulu); + KZGComputeCellsAndKZGProofHandler::::default().run(); } #[test] fn kzg_verify_cell_proof_batch() { - KZGVerifyCellKZGProofBatchHandler::::default() - .run_for_feature(FeatureName::Fulu); + KZGVerifyCellKZGProofBatchHandler::::default().run(); } #[test] fn kzg_recover_cells_and_proofs() { - KZGRecoverCellsAndKZGProofHandler::::default() - .run_for_feature(FeatureName::Fulu); + KZGRecoverCellsAndKZGProofHandler::::default().run(); } #[test] @@ -949,14 +974,12 @@ fn rewards() { #[test] fn get_custody_groups() { - GetCustodyGroupsHandler::::default().run_for_feature(FeatureName::Fulu); - GetCustodyGroupsHandler::::default().run_for_feature(FeatureName::Fulu); + GetCustodyGroupsHandler::::default().run(); + GetCustodyGroupsHandler::::default().run() } #[test] fn compute_columns_for_custody_group() { - ComputeColumnsForCustodyGroupHandler::::default() - .run_for_feature(FeatureName::Fulu); - ComputeColumnsForCustodyGroupHandler::::default() - .run_for_feature(FeatureName::Fulu); + ComputeColumnsForCustodyGroupHandler::::default().run(); + ComputeColumnsForCustodyGroupHandler::::default().run(); } From d47b3e3e43677b56d2f6afb476d42b462518de74 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 30 Jan 2025 19:02:04 +1100 Subject: [PATCH 18/52] Cargo update without `rust_eth_kzg` (#6848) Update cargo dependencies while keeping `rust_eth_kzg` pinned to `0.5.1` due to the regression described in: - https://github.com/sigp/lighthouse/pull/6608 The changes from that PR were not sufficient to actually pin the dependencies of `rust_eth_kzg`, because the dependencies from the workspace Cargo.toml file were not being used anywhere. To fix this, I've added them as explicit dependencies in `crypto/kzg/Cargo.toml`. With this change, `cargo update` no longer tries to update them. --- Cargo.lock | 730 +++++++++++++++++++++--------------------- Dockerfile | 2 +- crypto/kzg/Cargo.toml | 6 + lcli/Dockerfile | 2 +- lighthouse/Cargo.toml | 2 +- 5 files changed, 377 insertions(+), 365 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 899435a66b..02871ed79c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-consensus" @@ -204,9 +204,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.12" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" +checksum = "ec878088ec6283ce1e90d280316aadd3d6ce3de06ff63d68953c855e7e447e92" dependencies = [ "alloy-rlp", "arbitrary", @@ -217,9 +217,8 @@ dependencies = [ "derive_more 1.0.0", "foldhash", "getrandom", - "hashbrown 0.15.1", - "hex-literal", - "indexmap 2.6.0", + "hashbrown 0.15.2", + "indexmap 2.7.1", "itoa", "k256 0.13.4", "keccak-asm", @@ -228,7 +227,7 @@ dependencies = [ "proptest-derive", "rand", "ruint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "sha3 0.10.8", "tiny-keccak", @@ -236,9 +235,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" +checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -247,13 +246,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" +checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -318,19 +317,20 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.6" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", + "once_cell", "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" @@ -522,7 +522,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", "synstructure", ] @@ -534,7 +534,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -567,7 +567,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.41", + "rustix 0.38.44", "slab", "tracing", "windows-sys 0.59.0", @@ -579,20 +579,20 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -643,13 +643,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -668,10 +668,10 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "itoa", "matchit", @@ -701,7 +701,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "mime", @@ -848,7 +848,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.5.1", + "hyper 1.5.2", "lighthouse_network", "monitoring_api", "node_test_rig", @@ -917,7 +917,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -930,7 +930,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.96", "which", ] @@ -957,9 +957,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "bitvec" @@ -1145,9 +1145,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -1199,9 +1199,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ "serde", ] @@ -1214,7 +1214,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.23", + "semver 1.0.25", "serde", "serde_json", "thiserror 1.0.69", @@ -1228,9 +1228,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.1" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" dependencies = [ "jobserver", "libc", @@ -1284,9 +1284,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1354,9 +1354,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" dependencies = [ "clap_builder", "clap_derive", @@ -1364,9 +1364,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstream", "anstyle", @@ -1377,21 +1377,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] name = "clap_lex" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clap_utils" @@ -1452,9 +1452,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" dependencies = [ "cc", ] @@ -1492,9 +1492,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -1654,18 +1654,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1682,15 +1682,15 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" [[package]] name = "crypto-bigint" @@ -1799,7 +1799,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -1847,7 +1847,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -1869,7 +1869,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -1900,15 +1900,15 @@ checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" [[package]] name = "data-encoding-macro" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +checksum = "5b16d9d0d88a5273d830dac8b78ceb217ffc9b1d5404e5597a3542515329405b" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1916,12 +1916,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +checksum = "1145d32e826a7748b69ee8fc62d3e6355ff7f1051df53141e7048162fc90481b" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.96", ] [[package]] @@ -2035,7 +2035,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2048,7 +2048,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2068,17 +2068,17 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", "unicode-xid", ] [[package]] name = "diesel" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf9649c05e0a9dbd6d0b0b8301db5182b972d0fd02f0a7c6736cf632d7c0fd5" +checksum = "ccf1bedf64cdb9643204a36dd15b19a6ce8e7aa7f7b105868e9f1fad5ffa7d12" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "byteorder", "diesel_derives", "itoa", @@ -2096,7 +2096,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2116,7 +2116,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2231,7 +2231,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2262,7 +2262,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2442,7 +2442,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2506,12 +2506,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2822,7 +2822,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -2956,9 +2956,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -2967,11 +2967,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] @@ -3086,9 +3086,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fastrlp" @@ -3101,6 +3101,17 @@ dependencies = [ "bytes", ] +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "fdlimit" version = "0.3.0" @@ -3213,9 +3224,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -3340,9 +3351,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" dependencies = [ "futures-core", "pin-project-lite", @@ -3356,7 +3367,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -3366,7 +3377,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.18", + "rustls 0.23.21", "rustls-pki-types", ] @@ -3497,14 +3508,14 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gloo-timers" @@ -3596,7 +3607,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.6.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -3646,9 +3657,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", @@ -3759,12 +3770,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-literal" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" - [[package]] name = "hex_fmt" version = "0.3.0" @@ -3773,9 +3778,9 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" dependencies = [ "async-trait", "cfg-if", @@ -3784,7 +3789,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", "rand", @@ -3798,9 +3803,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if", "futures-util", @@ -3868,11 +3873,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3899,9 +3904,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -3926,7 +3931,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -3937,7 +3942,7 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "pin-project-lite", ] @@ -4032,9 +4037,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", @@ -4056,14 +4061,14 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "httparse", "httpdate", @@ -4081,7 +4086,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -4094,7 +4099,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.31", + "hyper 0.14.32", "native-tls", "tokio", "tokio-native-tls", @@ -4108,9 +4113,9 @@ checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.1", + "hyper 1.5.2", "pin-project-lite", "tokio", "tower-service", @@ -4254,7 +4259,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -4263,16 +4268,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.3" @@ -4338,7 +4333,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "rand", "tokio", @@ -4399,7 +4394,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -4420,13 +4415,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "serde", ] @@ -4520,19 +4515,19 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "is-terminal" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" dependencies = [ "hermit-abi 0.4.0", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4570,9 +4565,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" @@ -4585,10 +4580,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -4669,6 +4665,11 @@ version = "0.1.0" dependencies = [ "arbitrary", "c-kzg", + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_erasure_codes", + "crate_crypto_internal_eth_kzg_maybe_rayon", + "crate_crypto_internal_eth_kzg_polynomial", + "crate_crypto_kzg_multi_open_fk20", "criterion", "derivative", "ethereum_hashing", @@ -4758,9 +4759,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.164" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libflate" @@ -4788,12 +4789,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -5077,7 +5078,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.18", + "rustls 0.23.21", "socket2", "thiserror 1.0.69", "tokio", @@ -5117,7 +5118,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -5149,7 +5150,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.18", + "rustls 0.23.21", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser", @@ -5193,7 +5194,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "libc", ] @@ -5258,9 +5259,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "pkg-config", @@ -5390,9 +5391,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -5441,9 +5442,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" [[package]] name = "logging" @@ -5472,7 +5473,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.1", + "hashbrown 0.15.2", ] [[package]] @@ -5697,20 +5698,19 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "wasi", "windows-sys 0.52.0", @@ -5779,9 +5779,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "core2", "unsigned-varint 0.8.0", @@ -5857,24 +5857,23 @@ dependencies = [ [[package]] name = "netlink-proto" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" +checksum = "b2741a6c259755922e3ed29ebce3b299cc2160c4acae94b465b5938ab02c2bbe" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror 1.0.69", - "tokio", + "thiserror 2.0.11", ] [[package]] name = "netlink-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" dependencies = [ "bytes", "futures", @@ -5960,7 +5959,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "cfg_aliases", "libc", @@ -6094,9 +6093,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -6166,7 +6165,7 @@ version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "foreign-types", "libc", @@ -6183,7 +6182,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -6364,7 +6363,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall 0.5.8", "smallvec", "windows-targets 0.52.6", ] @@ -6434,12 +6433,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 1.0.69", + "thiserror 2.0.11", "ucd-trie", ] @@ -6455,47 +6454,47 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_shared", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -6573,7 +6572,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.41", + "rustix 0.38.44", "tracing", "windows-sys 0.59.0", ] @@ -6664,12 +6663,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -6729,9 +6728,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -6786,7 +6785,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -6797,7 +6796,7 @@ checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.6.0", + "bitflags 2.8.0", "lazy_static", "num-traits", "rand", @@ -6811,13 +6810,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -6918,10 +6917,10 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustc-hash 2.1.0", + "rustls 0.23.21", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -6936,11 +6935,11 @@ dependencies = [ "getrandom", "rand", "ring 0.17.8", - "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustc-hash 2.1.0", + "rustls 0.23.21", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.11", "tinyvec", "tracing", "web-time", @@ -6948,9 +6947,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.7" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -6962,9 +6961,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -7076,9 +7075,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b1de48a7cf7ba193e81e078d17ee2b786236eed1d3f7c60f8a09545efc4925" +checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" dependencies = [ "libc", ] @@ -7094,11 +7093,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] @@ -7170,7 +7169,7 @@ dependencies = [ "h2", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "hyper-rustls", "hyper-tls", "ipnet", @@ -7345,17 +7344,19 @@ dependencies = [ [[package]] name = "ruint" -version = "1.12.3" +version = "1.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" dependencies = [ "alloy-rlp", "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", "bytes", - "fastrlp", + "fastrlp 0.3.1", + "fastrlp 0.4.0", "num-bigint", + "num-integer", "num-traits", "parity-scale-codec 3.6.12", "primitive-types 0.12.2", @@ -7416,9 +7417,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc-hex" @@ -7441,7 +7442,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.23", + "semver 1.0.25", ] [[package]] @@ -7469,15 +7470,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "errno", "libc", - "linux-raw-sys 0.4.14", - "windows-sys 0.52.0", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", ] [[package]] @@ -7508,9 +7509,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.18" +version = "0.23.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" +checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "once_cell", "ring 0.17.8", @@ -7540,9 +7541,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" dependencies = [ "web-time", ] @@ -7570,9 +7571,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -7646,7 +7647,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -7735,7 +7736,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "core-foundation", "core-foundation-sys", "libc", @@ -7744,9 +7745,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -7763,9 +7764,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" dependencies = [ "serde", ] @@ -7801,9 +7802,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -7820,20 +7821,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b" dependencies = [ "itoa", "memchr", @@ -7859,7 +7860,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -7911,7 +7912,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -8047,13 +8048,13 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 1.0.69", + "thiserror 2.0.11", "time", ] @@ -8078,9 +8079,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" @@ -8303,9 +8304,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -8533,9 +8534,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", @@ -8562,7 +8563,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -8597,7 +8598,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "core-foundation", "system-configuration-sys 0.6.0", ] @@ -8674,14 +8675,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", - "rustix 0.38.41", + "rustix 0.38.44", "windows-sys 0.59.0", ] @@ -8707,11 +8709,11 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" dependencies = [ - "rustix 0.38.41", + "rustix 0.38.44", "windows-sys 0.59.0", ] @@ -8759,11 +8761,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.11", ] [[package]] @@ -8774,18 +8776,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -8840,9 +8842,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -8861,9 +8863,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -8930,9 +8932,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -8945,9 +8947,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -8972,13 +8974,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -9040,9 +9042,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -9052,9 +9054,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -9101,7 +9103,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.1", "toml_datetime", "winnow 0.5.40", ] @@ -9112,23 +9114,23 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.20", + "winnow 0.6.24", ] [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.2", "tokio", "tower-layer", "tower-service", @@ -9149,9 +9151,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -9173,20 +9175,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -9215,9 +9217,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -9270,7 +9272,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -9402,15 +9404,15 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-bidi" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" @@ -9499,7 +9501,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", ] @@ -9546,7 +9548,7 @@ dependencies = [ "eth2", "fdlimit", "graffiti_file", - "hyper 1.5.1", + "hyper 1.5.2", "initialized_validators", "metrics", "monitoring_api", @@ -9723,9 +9725,9 @@ dependencies = [ [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" @@ -9790,7 +9792,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "mime", "mime_guess", @@ -9839,47 +9841,48 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9887,22 +9890,25 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "wasm-streams" @@ -9947,7 +9953,7 @@ dependencies = [ "env_logger 0.9.3", "eth2", "http_api", - "hyper 1.5.1", + "hyper 1.5.2", "log", "logging", "network", @@ -9968,9 +9974,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -10029,7 +10035,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.41", + "rustix 0.38.44", ] [[package]] @@ -10038,7 +10044,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.5.7", + "redox_syscall 0.5.8", "wasite", "web-sys", ] @@ -10361,9 +10367,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" dependencies = [ "memchr", ] @@ -10469,9 +10475,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.23" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" +checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" [[package]] name = "xmltree" @@ -10553,7 +10559,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", "synstructure", ] @@ -10575,7 +10581,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -10595,7 +10601,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", "synstructure", ] @@ -10617,7 +10623,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] @@ -10639,7 +10645,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.96", ] [[package]] diff --git a/Dockerfile b/Dockerfile index 0f334e2ac8..437c864c30 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.80.0-bullseye AS builder +FROM rust:1.84.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index bfe0f19cd0..e26fe59413 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -8,6 +8,12 @@ edition = "2021" [dependencies] arbitrary = { workspace = true } c-kzg = { workspace = true } +# Required to maintain the pin from https://github.com/sigp/lighthouse/pull/6608 +crate_crypto_internal_eth_kzg_bls12_381 = { workspace = true } +crate_crypto_internal_eth_kzg_erasure_codes = { workspace = true } +crate_crypto_internal_eth_kzg_maybe_rayon = { workspace = true } +crate_crypto_internal_eth_kzg_polynomial = { workspace = true } +crate_crypto_kzg_multi_open_fk20 = { workspace = true } derivative = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index d2cb6f6f14..67bc290112 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.80.0-bullseye AS builder +FROM rust:1.84.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index c303511338..26ee4f310f 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.80.0" +rust-version = "1.82.0" [features] default = ["slasher-lmdb", "beacon-node-leveldb"] From 276eda3dfed10ce8f90860466570fc0f4747f8aa Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 31 Jan 2025 03:20:44 +0300 Subject: [PATCH 19/52] `POST /eth/v2/beacon/pool/attestations` bugfixes (#6867) --- Cargo.lock | 2 + .../src/attestation_verification.rs | 4 +- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 66 ++++++++++++------- .../http_api/src/publish_attestations.rs | 42 ++++++++++-- .../http_api/tests/interactive_tests.rs | 5 +- beacon_node/http_api/tests/tests.rs | 32 ++++++--- .../src/network_beacon_processor/mod.rs | 2 +- common/eth2/Cargo.toml | 1 + common/eth2/src/lib.rs | 32 ++++++--- consensus/types/src/attestation.rs | 18 ++--- consensus/types/src/subnet_id.rs | 2 +- .../validator_services/Cargo.toml | 1 + .../src/attestation_service.rs | 9 ++- 15 files changed, 160 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 02871ed79c..259c9a2293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2562,6 +2562,7 @@ name = "eth2" version = "0.1.0" dependencies = [ "derivative", + "either", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz", @@ -9690,6 +9691,7 @@ dependencies = [ "beacon_node_fallback", "bls", "doppelganger_service", + "either", "environment", "eth2", "futures", diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index ffaf61e41a..a69eb99a51 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -327,8 +327,8 @@ impl VerifiedUnaggregatedAttestation<'_, T> { pub fn single_attestation(&self) -> Option { Some(SingleAttestation { - committee_index: self.attestation.committee_index()? as usize, - attester_index: self.validator_index, + committee_index: self.attestation.committee_index()?, + attester_index: self.validator_index as u64, data: self.attestation.data().clone(), signature: self.attestation.signature().clone(), }) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e88ce71a7b..4526b2b360 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1131,15 +1131,15 @@ where .unwrap(); let single_attestation = - attestation.to_single_attestation_with_attester_index(attester_index)?; + attestation.to_single_attestation_with_attester_index(attester_index as u64)?; let attestation: Attestation = single_attestation.to_attestation(committee.committee)?; assert_eq!( single_attestation.committee_index, - attestation.committee_index().unwrap() as usize + attestation.committee_index().unwrap() ); - assert_eq!(single_attestation.attester_index, validator_index); + assert_eq!(single_attestation.attester_index, validator_index as u64); Ok(single_attestation) } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 61f3370c70..2fb3ec06bf 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -32,6 +32,7 @@ rand = { workspace = true } safe_arith = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } state_processing = { workspace = true } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 29c27198c0..77c9bcc34f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -30,7 +30,6 @@ mod validator; mod validator_inclusion; mod validators; mod version; - use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::fork_versioned_response; @@ -63,6 +62,7 @@ pub use publish_blocks::{ publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock, }; use serde::{Deserialize, Serialize}; +use serde_json::Value; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; @@ -83,14 +83,13 @@ use tokio_stream::{ wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, StreamExt, }; -use types::ChainSpec; use types::{ fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, - AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, - ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, - SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, - SingleAttestation, Slot, SyncCommitteeMessage, SyncContributionData, + AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, + ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, + RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -1279,6 +1278,9 @@ pub fn serve( let consensus_version_header_filter = warp::header::header::(CONSENSUS_VERSION_HEADER); + let optional_consensus_version_header_filter = + warp::header::optional::(CONSENSUS_VERSION_HEADER); + // POST beacon/blocks let post_beacon_blocks = eth_v1 .and(warp::path("beacon")) @@ -1829,20 +1831,19 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()); - let beacon_pool_path_any = any_version - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - let beacon_pool_path_v2 = eth_v2 .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()); - // POST beacon/pool/attestations - let post_beacon_pool_attestations = beacon_pool_path + let beacon_pool_path_any = any_version + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + let post_beacon_pool_attestations_v1 = beacon_pool_path .clone() .and(warp::path("attestations")) .and(warp::path::end()) @@ -1851,9 +1852,6 @@ pub fn serve( .and(reprocess_send_filter.clone()) .and(log_filter.clone()) .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. |task_spawner: TaskSpawner, chain: Arc>, attestations: Vec>, @@ -1879,18 +1877,40 @@ pub fn serve( .clone() .and(warp::path("attestations")) .and(warp::path::end()) - .and(warp_utils::json::json()) + .and(warp_utils::json::json::()) + .and(optional_consensus_version_header_filter) .and(network_tx_filter.clone()) - .and(reprocess_send_filter) + .and(reprocess_send_filter.clone()) .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, - attestations: Vec, + payload: Value, + fork_name: Option, network_tx: UnboundedSender>, reprocess_tx: Option>, log: Logger| async move { - let attestations = attestations.into_iter().map(Either::Right).collect(); + let attestations = + match crate::publish_attestations::deserialize_attestation_payload::( + payload, fork_name, &log, + ) { + Ok(attestations) => attestations, + Err(err) => { + warn!( + log, + "Unable to deserialize attestation POST request"; + "error" => ?err + ); + return warp::reply::with_status( + warp::reply::json( + &"Unable to deserialize request body".to_string(), + ), + eth2::StatusCode::BAD_REQUEST, + ) + .into_response(); + } + }; + let result = crate::publish_attestations::publish_attestations( task_spawner, chain, @@ -4765,7 +4785,7 @@ pub fn serve( .uor(post_beacon_blinded_blocks) .uor(post_beacon_blocks_v2) .uor(post_beacon_blinded_blocks_v2) - .uor(post_beacon_pool_attestations) + .uor(post_beacon_pool_attestations_v1) .uor(post_beacon_pool_attestations_v2) .uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_proposer_slashings) diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 111dee3cff..1b9949d4d5 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -44,6 +44,7 @@ use either::Either; use eth2::types::Failure; use lighthouse_network::PubsubMessage; use network::NetworkMessage; +use serde_json::Value; use slog::{debug, error, warn, Logger}; use std::borrow::Cow; use std::sync::Arc; @@ -52,11 +53,11 @@ use tokio::sync::{ mpsc::{Sender, UnboundedSender}, oneshot, }; -use types::{Attestation, EthSpec, SingleAttestation}; +use types::{Attestation, EthSpec, ForkName, SingleAttestation}; // Error variants are only used in `Debug` and considered `dead_code` by the compiler. #[derive(Debug)] -enum Error { +pub enum Error { Validation(AttestationError), Publication, ForkChoice(#[allow(dead_code)] BeaconChainError), @@ -64,6 +65,7 @@ enum Error { ReprocessDisabled, ReprocessFull, ReprocessTimeout, + InvalidJson(#[allow(dead_code)] serde_json::Error), FailedConversion(#[allow(dead_code)] BeaconChainError), } @@ -74,6 +76,36 @@ enum PublishAttestationResult { Failure(Error), } +#[allow(clippy::type_complexity)] +pub fn deserialize_attestation_payload( + payload: Value, + fork_name: Option, + log: &Logger, +) -> Result, SingleAttestation>>, Error> { + if fork_name.is_some_and(|fork_name| fork_name.electra_enabled()) || fork_name.is_none() { + if fork_name.is_none() { + warn!( + log, + "No Consensus Version header specified."; + ); + } + + Ok(serde_json::from_value::>(payload) + .map_err(Error::InvalidJson)? + .into_iter() + .map(Either::Right) + .collect()) + } else { + Ok( + serde_json::from_value::>>(payload) + .map_err(Error::InvalidJson)? + .into_iter() + .map(Either::Left) + .collect(), + ) + } +} + fn verify_and_publish_attestation( chain: &Arc>, either_attestation: &Either, SingleAttestation>, @@ -163,12 +195,12 @@ fn convert_to_attestation<'a, T: BeaconChainTypes>( |committee_cache, _| { let Some(committee) = committee_cache.get_beacon_committee( single_attestation.data.slot, - single_attestation.committee_index as u64, + single_attestation.committee_index, ) else { return Err(BeaconChainError::AttestationError( types::AttestationError::NoCommitteeForSlotAndIndex { slot: single_attestation.data.slot, - index: single_attestation.committee_index as u64, + index: single_attestation.committee_index, }, )); }; @@ -199,7 +231,7 @@ pub async fn publish_attestations( .iter() .map(|att| match att { Either::Left(att) => (att.data().slot, att.committee_index()), - Either::Right(att) => (att.data.slot, Some(att.committee_index as u64)), + Either::Right(att) => (att.data.slot, Some(att.committee_index)), }) .collect::>(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 60a4c50783..bb3086945b 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -5,6 +5,7 @@ use beacon_chain::{ ChainConfig, }; use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; +use either::Either; use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; @@ -906,9 +907,11 @@ async fn queue_attestations_from_http() { .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) .collect::>(); + let attestations = Either::Right(single_attestations); + tokio::spawn(async move { client - .post_beacon_pool_attestations_v2(&single_attestations, fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .expect("attestations should be processed successfully") }) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 99b7696610..bc3159e074 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3,6 +3,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; +use either::Either; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, @@ -1810,12 +1811,25 @@ impl ApiTester { self } - pub async fn test_post_beacon_pool_attestations_valid_v1(mut self) -> Self { + pub async fn test_post_beacon_pool_attestations_valid(mut self) -> Self { self.client .post_beacon_pool_attestations_v1(self.attestations.as_slice()) .await .unwrap(); + let fork_name = self + .attestations + .first() + .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) + .unwrap(); + + let attestations = Either::Left(self.attestations.clone()); + + self.client + .post_beacon_pool_attestations_v2::(attestations, fork_name) + .await + .unwrap(); + assert!( self.network_rx.network_recv.recv().await.is_some(), "valid attestation should be sent to network" @@ -1833,8 +1847,10 @@ impl ApiTester { .first() .map(|att| self.chain.spec.fork_name_at_slot::(att.data.slot)) .unwrap(); + + let attestations = Either::Right(self.single_attestations.clone()); self.client - .post_beacon_pool_attestations_v2(self.single_attestations.as_slice(), fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap(); assert!( @@ -1900,10 +1916,10 @@ impl ApiTester { .first() .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) .unwrap(); - + let attestations = Either::Right(attestations); let err_v2 = self .client - .post_beacon_pool_attestations_v2(attestations.as_slice(), fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap_err(); @@ -6054,9 +6070,9 @@ impl ApiTester { .chain .spec .fork_name_at_slot::(self.chain.slot().unwrap()); - + let attestations = Either::Right(self.single_attestations.clone()); self.client - .post_beacon_pool_attestations_v2(&self.single_attestations, fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap(); @@ -6375,10 +6391,10 @@ async fn post_beacon_blocks_duplicate() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_pools_post_attestations_valid_v1() { +async fn beacon_pools_post_attestations_valid() { ApiTester::new() .await - .test_post_beacon_pool_attestations_valid_v1() + .test_post_beacon_pool_attestations_valid() .await; } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 8d07ef1a12..5c1d4f24e5 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -103,7 +103,7 @@ impl NetworkBeaconProcessor { |committee_cache, _| { let Some(committee) = committee_cache.get_beacon_committee( single_attestation.data.slot, - single_attestation.committee_index as u64, + single_attestation.committee_index, ) else { warn!( self.log, diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index ca7fa7ccdb..a1bc9d025b 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } [dependencies] derivative = { workspace = true } +either = { workspace = true } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index af8573a578..b86aa62765 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -17,6 +17,7 @@ pub mod types; use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; use derivative::Derivative; +use either::Either; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; @@ -1324,9 +1325,9 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/pool/attestations` - pub async fn post_beacon_pool_attestations_v2( + pub async fn post_beacon_pool_attestations_v2( &self, - attestations: &[SingleAttestation], + attestations: Either>, Vec>, fork_name: ForkName, ) -> Result<(), Error> { let mut path = self.eth_path(V2)?; @@ -1337,13 +1338,26 @@ impl BeaconNodeHttpClient { .push("pool") .push("attestations"); - self.post_with_timeout_and_consensus_header( - path, - &attestations, - self.timeouts.attestation, - fork_name, - ) - .await?; + match attestations { + Either::Right(attestations) => { + self.post_with_timeout_and_consensus_header( + path, + &attestations, + self.timeouts.attestation, + fork_name, + ) + .await?; + } + Either::Left(attestations) => { + self.post_with_timeout_and_consensus_header( + path, + &attestations, + self.timeouts.attestation, + fork_name, + ) + .await?; + } + }; Ok(()) } diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 47e41acb5b..276b27b0f8 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -24,7 +24,7 @@ pub enum Error { IncorrectStateVariant, InvalidCommitteeLength, InvalidCommitteeIndex, - AttesterNotInCommittee(usize), + AttesterNotInCommittee(u64), InvalidCommittee, MissingCommittee, NoCommitteeForSlotAndIndex { slot: Slot, index: CommitteeIndex }, @@ -238,7 +238,7 @@ impl Attestation { pub fn to_single_attestation_with_attester_index( &self, - attester_index: usize, + attester_index: u64, ) -> Result { match self { Self::Base(_) => Err(Error::IncorrectStateVariant), @@ -375,14 +375,14 @@ impl AttestationElectra { pub fn to_single_attestation_with_attester_index( &self, - attester_index: usize, + attester_index: u64, ) -> Result { let Some(committee_index) = self.committee_index() else { return Err(Error::InvalidCommitteeIndex); }; Ok(SingleAttestation { - committee_index: committee_index as usize, + committee_index, attester_index, data: self.data.clone(), signature: self.signature.clone(), @@ -579,8 +579,10 @@ impl ForkVersionDeserialize for Vec> { PartialEq, )] pub struct SingleAttestation { - pub committee_index: usize, - pub attester_index: usize, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub attester_index: u64, pub data: AttestationData, pub signature: AggregateSignature, } @@ -591,7 +593,7 @@ impl SingleAttestation { .iter() .enumerate() .find_map(|(i, &validator_index)| { - if self.attester_index == validator_index { + if self.attester_index as usize == validator_index { return Some(i); } None @@ -600,7 +602,7 @@ impl SingleAttestation { let mut committee_bits: BitVector = BitVector::default(); committee_bits - .set(self.committee_index, true) + .set(self.committee_index as usize, true) .map_err(|_| Error::InvalidCommitteeIndex)?; let mut aggregation_bits = diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 981d6d5653..7a5357c6cc 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -67,7 +67,7 @@ impl SubnetId { ) -> Result { Self::compute_subnet::( attestation.data.slot, - attestation.committee_index as u64, + attestation.committee_index, committee_count_per_slot, spec, ) diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml index 21f0ae2d77..b4495a7c81 100644 --- a/validator_client/validator_services/Cargo.toml +++ b/validator_client/validator_services/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Sigma Prime "] beacon_node_fallback = { workspace = true } bls = { workspace = true } doppelganger_service = { workspace = true } +either = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index 58c6ea3298..9a6f94d52b 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -1,5 +1,6 @@ use crate::duties_service::{DutiesService, DutyAndProof}; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use either::Either; use environment::RuntimeContext; use futures::future::join_all; use slog::{crit, debug, error, info, trace, warn}; @@ -461,7 +462,7 @@ impl AttestationService { .iter() .zip(validator_indices) .filter_map(|(a, i)| { - match a.to_single_attestation_with_attester_index(*i as usize) { + match a.to_single_attestation_with_attester_index(*i) { Ok(a) => Some(a), Err(e) => { // This shouldn't happen unless BN and VC are out of sync with @@ -479,8 +480,12 @@ impl AttestationService { } }) .collect::>(); + beacon_node - .post_beacon_pool_attestations_v2(&single_attestations, fork_name) + .post_beacon_pool_attestations_v2::( + Either::Right(single_attestations), + fork_name, + ) .await } else { beacon_node From e4183f8e4ddd12a68be054a2fcf2f80956996771 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 31 Jan 2025 14:39:34 +1100 Subject: [PATCH 20/52] Fix mdbook build. (#6891) https://github.com/sigp/lighthouse/actions/runs/13063781937/job/36452383133 `mdbook` ci job above is failing because the latest release now requires a newer version of glibc: > Updated the Linux pre-built binaries which requires a newer version of glibc (2.34). https://github.com/rust-lang/mdBook/pull/2523 https://github.com/rust-lang/mdBook/blob/master/CHANGELOG.md Updating to latest ubuntu to fix this. --- .github/workflows/book.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 031a88b03c..e9db3b6ab1 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -11,7 +11,7 @@ concurrency: jobs: build-and-upload-to-s3: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 From 027bb973f80969d5174ce253d89727990d19e9bc Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 31 Jan 2025 03:00:52 -0300 Subject: [PATCH 21/52] Compute columns in post-PeerDAS checkpoint sync (#6760) Addresses #6026. Post-PeerDAS the DB expects to have data columns for the finalized block. Instead of forcing the user to submit the columns, this PR computes the columns from the blobs that we can already fetch from the checkpointz server or with the existing CLI options. Note 1: (EDIT) Pruning concern addressed Note 2: I have not tested this feature Note 3: @michaelsproul an alternative I recall is to not require the blobs / columns at this point and expect backfill to populate the finalized block --- beacon_node/beacon_chain/src/builder.rs | 28 ++++++++++++++++++++++--- beacon_node/store/src/hot_cold_store.rs | 18 ++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 9d99ff9d8e..8d62478bea 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -9,6 +9,7 @@ use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; use crate::head_tracker::HeadTracker; +use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; @@ -562,9 +563,30 @@ where .put_block(&weak_subj_block_root, weak_subj_block.clone()) .map_err(|e| format!("Failed to store weak subjectivity block: {e:?}"))?; if let Some(blobs) = weak_subj_blobs { - store - .put_blobs(&weak_subj_block_root, blobs) - .map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?; + if self + .spec + .is_peer_das_enabled_for_epoch(weak_subj_block.epoch()) + { + // After PeerDAS recompute columns from blobs to not force the checkpointz server + // into exposing another route. + let blobs = blobs + .iter() + .map(|blob_sidecar| &blob_sidecar.blob) + .collect::>(); + let data_columns = + blobs_to_data_column_sidecars(&blobs, &weak_subj_block, &self.kzg, &self.spec) + .map_err(|e| { + format!("Failed to compute weak subjectivity data_columns: {e:?}") + })?; + // TODO(das): only persist the columns under custody + store + .put_data_columns(&weak_subj_block_root, data_columns) + .map_err(|e| format!("Failed to store weak subjectivity data_column: {e:?}"))?; + } else { + store + .put_blobs(&weak_subj_block_root, blobs) + .map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?; + } } // Stage the database's metadata fields for atomic storage when `build` is called. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 02014a05a3..134be9ec0d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -863,6 +863,24 @@ impl, Cold: ItemStore> HotColdDB )); } + pub fn put_data_columns( + &self, + block_root: &Hash256, + data_columns: DataColumnSidecarList, + ) -> Result<(), Error> { + for data_column in data_columns { + self.blobs_db.put_bytes( + DBColumn::BeaconDataColumn, + &get_data_column_key(block_root, &data_column.index), + &data_column.as_ssz_bytes(), + )?; + self.block_cache + .lock() + .put_data_column(*block_root, data_column); + } + Ok(()) + } + pub fn data_columns_as_kv_store_ops( &self, block_root: &Hash256, From b6db2135485c30115d6f940f23cfda7a6e692618 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 31 Jan 2025 11:15:40 +0000 Subject: [PATCH 22/52] update MSRV (#6896) --- lighthouse/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 26ee4f310f..c95735d41c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.82.0" +rust-version = "1.83.0" [features] default = ["slasher-lmdb", "beacon-node-leveldb"] From ddb845d5033f3aa1205844bb0287a797c1ee7b5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 31 Jan 2025 12:21:06 +0000 Subject: [PATCH 23/52] update libp2p to 0.55 (#6889) Updates libp2p to `0.55`. Will address the deprecations in a subsequent PR --- Cargo.lock | 473 +++++++++++++----- beacon_node/lighthouse_network/Cargo.toml | 4 +- .../lighthouse_network/gossipsub/Cargo.toml | 2 +- .../gossipsub/src/handler.rs | 18 +- .../lighthouse_network/src/discovery/mod.rs | 2 +- .../src/peer_manager/network_behaviour.rs | 5 +- .../lighthouse_network/src/rpc/handler.rs | 2 + .../lighthouse_network/src/service/mod.rs | 2 +- beacon_node/network/Cargo.toml | 2 +- 9 files changed, 351 insertions(+), 159 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 259c9a2293..20d2548d09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,7 +216,7 @@ dependencies = [ "derive_arbitrary", "derive_more 1.0.0", "foldhash", - "getrandom", + "getrandom 0.2.15", "hashbrown 0.15.2", "indexmap 2.7.1", "itoa", @@ -584,6 +584,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "async-trait" version = "0.1.85" @@ -619,6 +630,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.24.1" @@ -671,7 +688,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.2", + "hyper 1.6.0", "hyper-util", "itoa", "matchit", @@ -848,7 +865,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.5.2", + "hyper 1.6.0", "lighthouse_network", "monitoring_api", "node_test_rig", @@ -1127,9 +1144,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "byte-slice-cast" @@ -1452,9 +1469,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" dependencies = [ "cc", ] @@ -1548,9 +1565,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -2637,7 +2654,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid", + "uuid 0.8.2", "zeroize", ] @@ -2677,7 +2694,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -2856,7 +2873,7 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "getrandom", + "getrandom 0.2.15", "hex", "proc-macro2", "quote", @@ -2927,7 +2944,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom", + "getrandom 0.2.15", "hashers", "hex", "http 0.2.12", @@ -3431,6 +3448,19 @@ dependencies = [ "byteorder", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -3472,10 +3502,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "ghash" version = "0.5.1" @@ -3543,7 +3585,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "getrandom", + "getrandom 0.2.15", "hashlink 0.9.1", "hex_fmt", "libp2p", @@ -3615,6 +3657,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.2.0", + "indexmap 2.7.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "half" version = "2.4.1" @@ -3779,10 +3840,11 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.2" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" +checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" dependencies = [ + "async-recursion", "async-trait", "cfg-if", "data-encoding", @@ -3795,7 +3857,7 @@ dependencies = [ "once_cell", "rand", "socket2", - "thiserror 1.0.69", + "thiserror 2.0.11", "tinyvec", "tokio", "tracing", @@ -3804,21 +3866,21 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.2" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" dependencies = [ "cfg-if", "futures-util", "hickory-proto", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot 0.12.3", "rand", "resolv-conf", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -4020,9 +4082,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -4046,7 +4108,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -4062,13 +4124,14 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", "futures-util", + "h2 0.4.7", "http 1.2.0", "http-body 1.0.1", "httparse", @@ -4077,6 +4140,7 @@ dependencies = [ "pin-project-lite", "smallvec", "tokio", + "want", ] [[package]] @@ -4113,13 +4177,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", + "futures-channel", "futures-util", "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.2", + "hyper 1.6.0", "pin-project-lite", + "socket2", "tokio", "tower-service", + "tracing", ] [[package]] @@ -4320,21 +4387,44 @@ dependencies = [ "rtnetlink", "system-configuration 0.6.1", "tokio", - "windows", + "windows 0.53.0", ] [[package]] name = "igd-next" -version = "0.14.3" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http 0.2.12", - "hyper 0.14.32", + "http 1.2.0", + "http-body-util", + "hyper 1.6.0", + "hyper-util", + "log", + "rand", + "tokio", + "url", + "xmltree", +] + +[[package]] +name = "igd-next" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2830127baaaa55dae9aa5ee03158d5aa3687a9c2c11ce66870452580cc695df4" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http 1.2.0", + "http-body-util", + "hyper 1.6.0", + "hyper-util", "log", "rand", "tokio", @@ -4821,15 +4911,15 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.54.1" +version = "0.55.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +checksum = "b72dc443ddd0254cb49a794ed6b6728400ee446a0f7ab4a07d0209ee98de20e9" dependencies = [ "bytes", "either", "futures", "futures-timer", - "getrandom", + "getrandom 0.2.15", "libp2p-allow-block-list", "libp2p-connection-limits", "libp2p-core", @@ -4848,38 +4938,36 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror 1.0.69", + "thiserror 2.0.11", ] [[package]] name = "libp2p-allow-block-list" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +checksum = "38944b7cb981cc93f2f0fb411ff82d0e983bd226fbcc8d559639a3a73236568b" dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "void", ] [[package]] name = "libp2p-connection-limits" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +checksum = "efe9323175a17caa8a2ed4feaf8a548eeef5e0b72d03840a0eab4bcb0210ce1c" dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "void", ] [[package]] name = "libp2p-core" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +checksum = "193c75710ba43f7504ad8f58a62ca0615b1d7e572cb0f1780bc607252c39e9ef" dependencies = [ "either", "fnv", @@ -4895,19 +4983,17 @@ dependencies = [ "quick-protobuf", "rand", "rw-stream-sink", - "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "unsigned-varint 0.8.0", - "void", "web-time", ] [[package]] name = "libp2p-dns" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" +checksum = "1b780a1150214155b0ed1cdf09fbd2e1b0442604f9146a431d1b21d23eef7bd7" dependencies = [ "async-trait", "futures", @@ -4921,9 +5007,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" +checksum = "e8c06862544f02d05d62780ff590cc25a75f5c2b9df38ec7a370dcae8bb873cf" dependencies = [ "asynchronous-codec", "either", @@ -4933,13 +5019,11 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru", "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", - "void", ] [[package]] @@ -4966,11 +5050,10 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" +checksum = "11d0ba095e1175d797540e16b62e7576846b883cb5046d4159086837b36846cc" dependencies = [ - "data-encoding", "futures", "hickory-proto", "if-watch", @@ -4982,14 +5065,13 @@ dependencies = [ "socket2", "tokio", "tracing", - "void", ] [[package]] name = "libp2p-metrics" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +checksum = "2ce58c64292e87af624fcb86465e7dd8342e46a388d71e8fec0ab37ee789630a" dependencies = [ "futures", "libp2p-core", @@ -5003,9 +5085,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41187ab8f6c835ad864edf94224f666f636ee2d270601422c1441f739e0abccc" +checksum = "8aaa6fee3722e355443058472fc4705d78681bc2d8e447a0bdeb3fecf40cd197" dependencies = [ "asynchronous-codec", "bytes", @@ -5022,13 +5104,12 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" +checksum = "afcc133e0f3cea07acde6eb8a9665cb11b600bd61110b010593a0210b8153b16" dependencies = [ "asynchronous-codec", "bytes", - "curve25519-dalek", "futures", "libp2p-core", "libp2p-identity", @@ -5037,10 +5118,9 @@ dependencies = [ "once_cell", "quick-protobuf", "rand", - "sha2 0.10.8", "snow", "static_assertions", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "x25519-dalek", "zeroize", @@ -5048,9 +5128,9 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63d926c6be56a2489e0e7316b17fe95a70bc5c4f3e85740bb3e67c0f3c6a44" +checksum = "7e659439578fc6d305da8303834beb9d62f155f40e7f5b9d81c9f2b2c69d1926" dependencies = [ "asynchronous-codec", "bytes", @@ -5064,33 +5144,31 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" +checksum = "41432a159b00424a0abaa2c80d786cddff81055ac24aa127e0cf375f7858d880" dependencies = [ - "bytes", "futures", "futures-timer", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-tls", - "parking_lot 0.12.3", "quinn", "rand", "ring 0.17.8", "rustls 0.23.21", "socket2", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tracing", ] [[package]] name = "libp2p-swarm" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +checksum = "803399b4b6f68adb85e63ab573ac568154b193e9a640f03e0f2890eabbcb37f8" dependencies = [ "either", "fnv", @@ -5106,7 +5184,6 @@ dependencies = [ "smallvec", "tokio", "tracing", - "void", "web-time", ] @@ -5124,16 +5201,15 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" +checksum = "65346fb4d36035b23fec4e7be4c320436ba53537ce9b6be1d1db1f70c905cad0" dependencies = [ "futures", "futures-timer", "if-watch", "libc", "libp2p-core", - "libp2p-identity", "socket2", "tokio", "tracing", @@ -5141,9 +5217,9 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" +checksum = "dcaebc1069dea12c5b86a597eaaddae0317c2c2cb9ec99dc94f82fd340f5c78b" dependencies = [ "futures", "futures-rustls", @@ -5153,37 +5229,36 @@ dependencies = [ "ring 0.17.8", "rustls 0.23.21", "rustls-webpki 0.101.7", - "thiserror 1.0.69", + "thiserror 2.0.11", "x509-parser", "yasna", ] [[package]] name = "libp2p-upnp" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +checksum = "d457b9ecceb66e7199f049926fad447f1f17f040e8d29d690c086b4cab8ed14a" dependencies = [ "futures", "futures-timer", - "igd-next", + "igd-next 0.15.1", "libp2p-core", "libp2p-swarm", "tokio", "tracing", - "void", ] [[package]] name = "libp2p-yamux" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" +checksum = "f15df094914eb4af272acf9adaa9e287baa269943f32ea348ba29cfb9bfc60d8" dependencies = [ "either", "futures", "libp2p-core", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "yamux 0.12.1", "yamux 0.13.4", @@ -5378,12 +5453,6 @@ dependencies = [ "target_info", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.1.4" @@ -5468,6 +5537,19 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lru" version = "0.12.5" @@ -5477,15 +5559,6 @@ dependencies = [ "hashbrown 0.15.2", ] -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "lru_cache" version = "0.1.0" @@ -5713,7 +5786,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -5723,6 +5796,25 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9366861eb2a2c436c20b12c8dbec5f798cea6b47ad99216be0282942e2c81ea0" +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot 0.12.3", + "portable-atomic", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid 1.12.1", +] + [[package]] name = "monitoring_api" version = "0.1.0" @@ -5804,9 +5896,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" dependencies = [ "libc", "log", @@ -5858,9 +5950,9 @@ dependencies = [ [[package]] name = "netlink-proto" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2741a6c259755922e3ed29ebce3b299cc2160c4acae94b465b5938ab02c2bbe" +checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" dependencies = [ "bytes", "futures", @@ -5905,7 +5997,7 @@ dependencies = [ "genesis", "gossipsub", "hex", - "igd-next", + "igd-next 0.16.0", "itertools 0.10.5", "kzg", "lighthouse_network", @@ -6162,9 +6254,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.68" +version = "0.10.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "f5e534d133a060a3c19daec1eb3e98ec6f4685978834f2dbadfe2ec215bab64e" dependencies = [ "bitflags 2.8.0", "cfg-if", @@ -6188,9 +6280,9 @@ dependencies = [ [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" @@ -6601,6 +6693,12 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "portable-atomic" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" + [[package]] name = "postgres-protocol" version = "0.6.7" @@ -6933,7 +7031,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", - "getrandom", + "getrandom 0.2.15", "rand", "ring 0.17.8", "rustc-hash 2.1.0", @@ -7030,7 +7128,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -7107,7 +7205,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", "thiserror 1.0.69", ] @@ -7167,7 +7265,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", @@ -7272,7 +7370,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -7542,9 +7640,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" dependencies = [ "web-time", ] @@ -7601,9 +7699,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "safe_arith" @@ -7833,9 +7931,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "itoa", "memchr", @@ -8635,6 +8733,12 @@ dependencies = [ "types", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "take_mut" version = "0.2.2" @@ -8676,13 +8780,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.15.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" dependencies = [ "cfg-if", "fastrand", - "getrandom", + "getrandom 0.3.1", "once_cell", "rustix 0.38.44", "windows-sys 0.59.0", @@ -9119,7 +9223,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.24", + "winnow 0.6.25", ] [[package]] @@ -9417,9 +9521,9 @@ checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-normalization" @@ -9530,10 +9634,19 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom", + "getrandom 0.2.15", "serde", ] +[[package]] +name = "uuid" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" +dependencies = [ + "getrandom 0.2.15", +] + [[package]] name = "validator_client" version = "0.3.5" @@ -9549,7 +9662,7 @@ dependencies = [ "eth2", "fdlimit", "graffiti_file", - "hyper 1.5.2", + "hyper 1.6.0", "initialized_validators", "metrics", "monitoring_api", @@ -9835,6 +9948,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasite" version = "0.1.0" @@ -9955,7 +10077,7 @@ dependencies = [ "env_logger 0.9.3", "eth2", "http_api", - "hyper 1.5.2", + "hyper 1.6.0", "log", "logging", "network", @@ -10104,6 +10226,16 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -10131,10 +10263,45 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" dependencies = [ - "windows-result", + "windows-result 0.1.2", "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "windows-result" version = "0.1.2" @@ -10144,6 +10311,25 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -10369,9 +10555,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.24" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" +checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" dependencies = [ "memchr", ] @@ -10386,6 +10572,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + [[package]] name = "write16" version = "1.0.0" diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 485f32b37a..3c89ece442 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -20,7 +20,7 @@ futures = { workspace = true } gossipsub = { workspace = true } hex = { workspace = true } itertools = { workspace = true } -libp2p-mplex = "0.42" +libp2p-mplex = "0.43" lighthouse_version = { workspace = true } lru = { workspace = true } lru_cache = { workspace = true } @@ -50,7 +50,7 @@ unused_port = { workspace = true } void = "1.0.2" [dependencies.libp2p] -version = "0.54" +version = "0.55" default-features = false features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 61f5730c08..239caae47a 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -26,7 +26,7 @@ futures-timer = "3.0.2" getrandom = "0.2.12" hashlink = { workspace = true } hex_fmt = "0.3.0" -libp2p = { version = "0.54", default-features = false } +libp2p = { version = "0.55", default-features = false } prometheus-client = "0.22.0" quick-protobuf = "0.8" quick-protobuf-codec = "0.3" diff --git a/beacon_node/lighthouse_network/gossipsub/src/handler.rs b/beacon_node/lighthouse_network/gossipsub/src/handler.rs index d89013eb2f..0f25db6e3d 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/handler.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/handler.rs @@ -194,7 +194,6 @@ impl EnabledHandler { &mut self, FullyNegotiatedOutbound { protocol, .. }: FullyNegotiatedOutbound< ::OutboundProtocol, - ::OutboundOpenInfo, >, ) { let (substream, peer_kind) = protocol; @@ -217,7 +216,7 @@ impl EnabledHandler { ) -> Poll< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, > { @@ -423,7 +422,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type OutboundProtocol = ProtocolConfig; - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { match self { Handler::Enabled(handler) => { SubstreamProtocol::new(either::Either::Left(handler.listen_protocol.clone()), ()) @@ -458,9 +457,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { match self { Handler::Enabled(handler) => handler.poll(cx), Handler::Disabled(DisabledHandler::ProtocolUnsupported { peer_kind_sent }) => { @@ -479,12 +476,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match self { Handler::Enabled(handler) => { @@ -521,7 +513,7 @@ impl ConnectionHandler for Handler { }) => match protocol { Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol), #[allow(unreachable_patterns)] - Either::Right(v) => void::unreachable(v), + Either::Right(v) => libp2p::core::util::unreachable(v), }, ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { handler.on_fully_negotiated_outbound(fully_negotiated_outbound) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 578bb52b51..33c7775ae2 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -994,7 +994,7 @@ impl NetworkBehaviour for Discovery { &mut self, _peer_id: PeerId, _connection_id: ConnectionId, - _event: void::Void, + _event: std::convert::Infallible, ) { } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 9fd059df85..abafb200be 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -37,7 +37,10 @@ impl NetworkBehaviour for PeerManager { // no events from the dummy handler } - fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { // perform the heartbeat when necessary while self.heartbeat.poll_tick(cx).is_ready() { self.heartbeat(); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index cb57a640bc..03203fcade 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -353,6 +353,7 @@ where !matches!(self.state, HandlerState::Deactivated) } + #[allow(deprecated)] fn poll( &mut self, cx: &mut Context<'_>, @@ -814,6 +815,7 @@ where Poll::Pending } + #[allow(deprecated)] fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a18daa5791..354def79b0 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1846,7 +1846,7 @@ impl Network { None } #[allow(unreachable_patterns)] - BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), + BehaviourEvent::ConnectionLimits(le) => libp2p::core::util::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, SwarmEvent::ConnectionClosed { .. } => None, diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 44f6c54bbc..09179c4a51 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -31,7 +31,7 @@ execution_layer = { workspace = true } fnv = { workspace = true } futures = { workspace = true } hex = { workspace = true } -igd-next = "0.14" +igd-next = { version = "0.16", features = ["aio_tokio"] } itertools = { workspace = true } lighthouse_network = { workspace = true } logging = { workspace = true } From 6b40b985375ae97efc3f4049d7d981754ee4eb8b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 3 Feb 2025 15:27:07 +1100 Subject: [PATCH 24/52] Cargo update for openssl vuln (#6901) Run `cargo update` to address [RUSTSEC-2025-0004](https://rustsec.org/advisories/RUSTSEC-2025-0004), a vulnerability in `openssl`. I don't think we are affected, but this PR is required for us to pass `cargo audit` and unblock CI. --- Cargo.lock | 380 ++++++++++++++++++++++++++++++----------------------- Makefile | 2 +- 2 files changed, 217 insertions(+), 165 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20d2548d09..af27fb11c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -46,7 +46,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand", + "rand 0.8.5", "regex", "rpassword", "serde", @@ -135,7 +135,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -204,9 +204,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec878088ec6283ce1e90d280316aadd3d6ce3de06ff63d68953c855e7e447e92" +checksum = "bc1360603efdfba91151e623f13a4f4d3dc4af4adc1cbd90bf37c81e84db4c77" dependencies = [ "alloy-rlp", "arbitrary", @@ -225,7 +225,7 @@ dependencies = [ "paste", "proptest", "proptest-derive", - "rand", + "rand 0.8.5", "ruint", "rustc-hash 2.1.0", "serde", @@ -252,7 +252,7 @@ checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -467,7 +467,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -477,7 +477,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -522,7 +522,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -534,7 +534,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -592,18 +592,18 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -666,7 +666,7 @@ checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -821,7 +821,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "proto_array", - "rand", + "rand 0.8.5", "rayon", "safe_arith", "sensitive_url", @@ -947,7 +947,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.98", "which", ] @@ -1048,7 +1048,7 @@ dependencies = [ "ethereum_ssz", "fixed_bytes", "hex", - "rand", + "rand 0.8.5", "safe_arith", "serde", "tree_hash", @@ -1078,7 +1078,7 @@ dependencies = [ "ff 0.13.0", "group 0.13.0", "pairing", - "rand_core", + "rand_core 0.6.4", "serde", "subtle", ] @@ -1245,9 +1245,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.10" +version = "1.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf" dependencies = [ "jobserver", "libc", @@ -1401,7 +1401,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1716,7 +1716,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1728,7 +1728,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1740,7 +1740,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -1816,7 +1816,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1864,7 +1864,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1886,7 +1886,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1938,7 +1938,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1145d32e826a7748b69ee8fc62d3e6355ff7f1051df53141e7048162fc90481b" dependencies = [ "data-encoding", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2052,7 +2052,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2065,7 +2065,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2085,15 +2085,15 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "unicode-xid", ] [[package]] name = "diesel" -version = "2.2.6" +version = "2.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf1bedf64cdb9643204a36dd15b19a6ce8e7aa7f7b105868e9f1fad5ffa7d12" +checksum = "04001f23ba8843dc315804fa324000376084dfb1c30794ff68dd279e6e5696d5" dependencies = [ "bitflags 2.8.0", "byteorder", @@ -2113,7 +2113,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2133,7 +2133,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2231,7 +2231,7 @@ dependencies = [ "more-asserts", "multiaddr", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "smallvec", "socket2", "tokio", @@ -2248,7 +2248,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2270,16 +2270,16 @@ dependencies = [ [[package]] name = "dsl_auto_type" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d9abe6314103864cc2d8901b7ae224e0ab1a103a0a416661b4097b0779b607" +checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b" dependencies = [ "darling 0.20.10", "either", "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2338,7 +2338,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2 0.10.8", "subtle", @@ -2396,7 +2396,7 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "rand_core", + "rand_core 0.6.4", "sec1 0.3.0", "subtle", "zeroize", @@ -2416,7 +2416,7 @@ dependencies = [ "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", - "rand_core", + "rand_core 0.6.4", "sec1 0.7.3", "subtle", "zeroize", @@ -2444,7 +2444,7 @@ dependencies = [ "hex", "k256 0.13.4", "log", - "rand", + "rand 0.8.5", "serde", "sha3 0.10.8", "zeroize", @@ -2459,7 +2459,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2646,7 +2646,7 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand", + "rand 0.8.5", "scrypt", "serde", "serde_json", @@ -2688,7 +2688,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "serde_repr", @@ -2840,7 +2840,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2919,7 +2919,7 @@ dependencies = [ "k256 0.11.6", "once_cell", "open-fastrlp", - "rand", + "rand 0.8.5", "rlp", "rlp-derive", "serde", @@ -3056,7 +3056,7 @@ dependencies = [ "metrics", "parking_lot 0.12.3", "pretty_reqwest_error", - "rand", + "rand 0.8.5", "reqwest", "sensitive_url", "serde", @@ -3146,7 +3146,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3157,7 +3157,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ "bitvec 1.0.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3198,7 +3198,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -3210,7 +3210,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -3385,7 +3385,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3395,7 +3395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.21", + "rustls 0.23.22", "rustls-pki-types", ] @@ -3551,7 +3551,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3593,7 +3593,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "quickcheck", - "rand", + "rand 0.8.5", "regex", "serde", "sha2 0.10.8", @@ -3621,7 +3621,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3632,8 +3632,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rand_xorshift", "subtle", ] @@ -3855,7 +3855,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "socket2", "thiserror 2.0.11", "tinyvec", @@ -3877,7 +3877,7 @@ dependencies = [ "moka", "once_cell", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", "thiserror 2.0.11", @@ -4038,7 +4038,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "proto_array", - "rand", + "rand 0.8.5", "safe_arith", "sensitive_url", "serde", @@ -4327,7 +4327,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4405,7 +4405,7 @@ dependencies = [ "hyper 1.6.0", "hyper-util", "log", - "rand", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -4426,7 +4426,7 @@ dependencies = [ "hyper 1.6.0", "hyper-util", "log", - "rand", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -4485,7 +4485,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4528,7 +4528,7 @@ dependencies = [ "lockfile", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "reqwest", "serde", "serde_json", @@ -4981,7 +4981,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project", "quick-protobuf", - "rand", + "rand 0.8.5", "rw-stream-sink", "thiserror 2.0.11", "tracing", @@ -5040,7 +5040,7 @@ dependencies = [ "multihash", "p256", "quick-protobuf", - "rand", + "rand 0.8.5", "sec1 0.7.3", "sha2 0.10.8", "thiserror 1.0.69", @@ -5060,7 +5060,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand", + "rand 0.8.5", "smallvec", "socket2", "tokio", @@ -5096,7 +5096,7 @@ dependencies = [ "libp2p-identity", "nohash-hasher", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "smallvec", "tracing", "unsigned-varint 0.8.0", @@ -5117,7 +5117,7 @@ dependencies = [ "multihash", "once_cell", "quick-protobuf", - "rand", + "rand 0.8.5", "snow", "static_assertions", "thiserror 2.0.11", @@ -5155,9 +5155,9 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "quinn", - "rand", + "rand 0.8.5", "ring 0.17.8", - "rustls 0.23.21", + "rustls 0.23.22", "socket2", "thiserror 2.0.11", "tokio", @@ -5180,7 +5180,7 @@ dependencies = [ "lru", "multistream-select", "once_cell", - "rand", + "rand 0.8.5", "smallvec", "tokio", "tracing", @@ -5196,7 +5196,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -5227,7 +5227,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.21", + "rustls 0.23.22", "rustls-webpki 0.101.7", "thiserror 2.0.11", "x509-parser", @@ -5287,7 +5287,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand", + "rand 0.8.5", "serde", "sha2 0.9.9", "typenum", @@ -5420,7 +5420,7 @@ dependencies = [ "prometheus-client", "quickcheck", "quickcheck_macros", - "rand", + "rand 0.8.5", "regex", "serde", "sha2 0.9.9", @@ -6007,7 +6007,7 @@ dependencies = [ "metrics", "operation_pool", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "serde_json", "slog", "slog-async", @@ -6132,7 +6132,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "serde", "smallvec", "zeroize", @@ -6254,9 +6254,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.69" +version = "0.10.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e534d133a060a3c19daec1eb3e98ec6f4685978834f2dbadfe2ec215bab64e" +checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" dependencies = [ "bitflags 2.8.0", "cfg-if", @@ -6275,7 +6275,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -6295,9 +6295,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.104" +version = "0.9.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" dependencies = [ "cc", "libc", @@ -6319,7 +6319,7 @@ dependencies = [ "maplit", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "rayon", "serde", "state_processing", @@ -6468,7 +6468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -6580,7 +6580,7 @@ checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -6701,9 +6701,9 @@ checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "postgres-protocol" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acda0ebdebc28befa84bee35e651e4c5f09073d668c7aed4cf7e23c3cda84b23" +checksum = "76ff0abab4a9b844b93ef7b81f1efc0a366062aaef2cd702c76256b5dc075c54" dependencies = [ "base64 0.22.1", "byteorder", @@ -6712,16 +6712,16 @@ dependencies = [ "hmac 0.12.1", "md-5", "memchr", - "rand", + "rand 0.9.0", "sha2 0.10.8", "stringprep", ] [[package]] name = "postgres-types" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" +checksum = "613283563cd90e1dfc3518d548caee47e0e725455ed619881f5cf21f36de4b48" dependencies = [ "bytes", "fallible-iterator", @@ -6740,15 +6740,16 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] name = "pq-sys" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" +checksum = "30b51d65ebe1cb1f40641b15abae017fed35ccdda46e3dab1ff8768f625a3222" dependencies = [ + "libc", "vcpkg", ] @@ -6767,7 +6768,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -6822,7 +6823,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.22", + "toml_edit 0.22.23", ] [[package]] @@ -6884,7 +6885,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -6898,8 +6899,8 @@ dependencies = [ "bitflags 2.8.0", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -6915,7 +6916,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -6991,7 +6992,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.4", "log", - "rand", + "rand 0.8.5", ] [[package]] @@ -7017,7 +7018,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.0", - "rustls 0.23.21", + "rustls 0.23.22", "socket2", "thiserror 2.0.11", "tokio", @@ -7032,10 +7033,10 @@ checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", "getrandom 0.2.15", - "rand", + "rand 0.8.5", "ring 0.17.8", "rustc-hash 2.1.0", - "rustls 0.23.21", + "rustls 0.23.22", "rustls-pki-types", "slab", "thiserror 2.0.11", @@ -7107,11 +7108,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "serde", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.0", + "zerocopy 0.8.14", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -7119,7 +7131,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0", ] [[package]] @@ -7131,13 +7153,23 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.14", +] + [[package]] name = "rand_xorshift" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -7460,7 +7492,7 @@ dependencies = [ "parity-scale-codec 3.6.12", "primitive-types 0.12.2", "proptest", - "rand", + "rand 0.8.5", "rlp", "ruint-macro", "serde", @@ -7608,9 +7640,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "once_cell", "ring 0.17.8", @@ -7746,7 +7778,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -7926,7 +7958,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -7959,7 +7991,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -8116,7 +8148,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -8126,7 +8158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -8210,7 +8242,7 @@ dependencies = [ "maplit", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "rayon", "redb", "safe_arith", @@ -8394,7 +8426,7 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "ring 0.17.8", "rustc_version 0.4.1", "sha2 0.10.8", @@ -8484,7 +8516,7 @@ dependencies = [ "itertools 0.10.5", "merkle_proof", "metrics", - "rand", + "rand 0.8.5", "rayon", "safe_arith", "smallvec", @@ -8529,7 +8561,7 @@ dependencies = [ "lru", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "redb", "safe_arith", "serde", @@ -8633,9 +8665,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -8662,7 +8694,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -8849,7 +8881,7 @@ dependencies = [ "hex", "hmac 0.12.1", "log", - "rand", + "rand 0.8.5", "serde", "serde_json", "sha2 0.10.8", @@ -8881,7 +8913,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -8892,7 +8924,7 @@ checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -8997,7 +9029,7 @@ dependencies = [ "hmac 0.12.1", "once_cell", "pbkdf2 0.11.0", - "rand", + "rand 0.8.5", "rustc-hash 1.1.0", "sha2 0.10.8", "thiserror 1.0.69", @@ -9085,7 +9117,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -9100,9 +9132,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" +checksum = "6c95d533c83082bb6490e0189acaa0bbeef9084e60471b696ca6988cd0541fb0" dependencies = [ "async-trait", "byteorder", @@ -9117,7 +9149,7 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", - "rand", + "rand 0.9.0", "socket2", "tokio", "tokio-util", @@ -9190,7 +9222,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.22", + "toml_edit 0.22.23", ] [[package]] @@ -9215,15 +9247,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.25", + "winnow 0.7.0", ] [[package]] @@ -9286,7 +9318,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -9377,7 +9409,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -9442,7 +9474,7 @@ dependencies = [ "milhouse", "parking_lot 0.12.3", "paste", - "rand", + "rand 0.8.5", "rand_xorshift", "rayon", "regex", @@ -9693,7 +9725,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand", + "rand 0.8.5", "tempfile", "tree_hash", "types", @@ -9722,7 +9754,7 @@ dependencies = [ "lighthouse_version", "logging", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "sensitive_url", "serde", "signing_method", @@ -9985,7 +10017,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -10020,7 +10052,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10082,7 +10114,7 @@ dependencies = [ "logging", "network", "r2d2", - "rand", + "rand 0.8.5", "reqwest", "serde", "serde_json", @@ -10288,7 +10320,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -10299,7 +10331,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -10555,9 +10587,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.25" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" +checksum = "7e49d2d35d3fad69b39b94139037ecfb4f359f08958b9c11e7315ce770462419" dependencies = [ "memchr", ] @@ -10634,7 +10666,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "serde", "zeroize", ] @@ -10667,7 +10699,7 @@ dependencies = [ "futures-util", "libc", "log", - "rand", + "rand 0.8.5", ] [[package]] @@ -10707,7 +10739,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.3", "pin-project", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -10722,7 +10754,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.3", "pin-project", - "rand", + "rand 0.8.5", "static_assertions", "web-time", ] @@ -10756,7 +10788,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -10767,7 +10799,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a367f292d93d4eab890745e75a778da40909cab4d6ff8173693812f79c4a2468" +dependencies = [ + "zerocopy-derive 0.8.14", ] [[package]] @@ -10778,7 +10819,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3931cb58c62c13adec22e38686b559c86a30565e16ad6e8510a337cedc611e1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] @@ -10798,7 +10850,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -10820,7 +10872,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -10842,7 +10894,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] diff --git a/Makefile b/Makefile index 81477634fe..f621f38a63 100644 --- a/Makefile +++ b/Makefile @@ -250,7 +250,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit --ignore RUSTSEC-2024-0421 + cargo audit # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: From a088b0b6c423cafac0a0f339ea14aa1ce7c0e069 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sun, 2 Feb 2025 21:14:11 -0800 Subject: [PATCH 25/52] Fix subnet unsubscription time (#6890) Hopefully fixes https://github.com/sigp/lighthouse/issues/6732 In our `scheduled_subscriptions`, we were setting unsubscription slot to be `current_slot + 1`. Given that we were subscribing to the subnet at `duty.slot - 1`, the unsubscription slot ended up being `duty.slot`. So we were unsubscribing to the subnet at the beginning of the duty slot which is insane. Fixes the `scheduled_subscriptions` to unsubscribe at `duty.slot + 1`. --- beacon_node/network/src/subnet_service/mod.rs | 13 +++- .../network/src/subnet_service/tests/mod.rs | 59 ++++++++----------- 2 files changed, 35 insertions(+), 37 deletions(-) diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index 33ae567eb3..de90e22254 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -216,6 +216,12 @@ impl SubnetService { || self.permanent_attestation_subscriptions.contains(subnet) } + /// Returns whether we are subscribed to a permanent subnet for testing purposes. + #[cfg(test)] + pub(crate) fn is_subscribed_permanent(&self, subnet: &Subnet) -> bool { + self.permanent_attestation_subscriptions.contains(subnet) + } + /// Processes a list of validator subscriptions. /// /// This is fundamentally called form the HTTP API when a validator requests duties from us @@ -629,9 +635,10 @@ impl Stream for SubnetService { // expire subscription. match self.scheduled_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok(exact_subnet))) => { - let ExactSubnet { subnet, .. } = exact_subnet; - let current_slot = self.beacon_chain.slot_clock.now().unwrap_or_default(); - if let Err(e) = self.subscribe_to_subnet_immediately(subnet, current_slot + 1) { + let ExactSubnet { subnet, slot } = exact_subnet; + // Set the `end_slot` for the subscription to be `duty.slot + 1` so that we unsubscribe + // only at the end of the duty slot. + if let Err(e) = self.subscribe_to_subnet_immediately(subnet, slot + 1) { debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e); } self.waker diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 7283b4af31..0f3343df63 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -7,9 +7,6 @@ use beacon_chain::{ }; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::NetworkConfig; -use logging::test_logger; -use slog::{o, Drain, Logger}; -use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::sync::{Arc, LazyLock}; use std::time::{Duration, SystemTime}; @@ -21,10 +18,6 @@ use types::{ SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; -// Set to enable/disable logging -// const TEST_LOG_LEVEL: Option = Some(slog::Level::Debug); -const TEST_LOG_LEVEL: Option = None; - const SLOT_DURATION_MILLIS: u64 = 400; type TestBeaconChainType = Witness< @@ -46,7 +39,7 @@ impl TestBeaconChain { let keypairs = generate_deterministic_keypairs(1); - let log = get_logger(TEST_LOG_LEVEL); + let log = logging::test_logger(); let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); @@ -98,28 +91,10 @@ pub fn recent_genesis_time() -> u64 { .as_secs() } -fn get_logger(log_level: Option) -> Logger { - if let Some(level) = log_level { - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).chan_size(2048).build(); - drain.filter_level(level) - }; - - Logger::root(drain.fuse(), o!()) - } else { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } -} - static CHAIN: LazyLock = LazyLock::new(TestBeaconChain::new_with_system_clock); fn get_subnet_service() -> SubnetService { - let log = test_logger(); + let log = logging::test_logger(); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); @@ -501,8 +476,6 @@ mod test { let committee_count = 1; // Makes 3 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for each of the later slots subscriptions - // (subscription_slot2 and subscription_slot3). let subscription_slot1 = 0; let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; let subscription_slot3 = subscription_slot2 * 2; @@ -585,7 +558,7 @@ mod test { let expected_unsubscription = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!(expected_subscription, events[0]); assert_eq!(expected_unsubscription, events[2]); } @@ -607,9 +580,18 @@ mod test { assert_eq!(no_events, []); - let second_subscribe_event = get_events(&mut subnet_service, None, 2).await; + let subscription_end_slot = current_slot + subscription_slot2 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete + let wait_slots = subnet_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_end_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; + + let second_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; // If the permanent and short lived subnets are different, we should get an unsubscription event. - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( [ expected_subscription.clone(), @@ -633,9 +615,18 @@ mod test { assert_eq!(no_events, []); - let third_subscribe_event = get_events(&mut subnet_service, None, 2).await; + let subscription_end_slot = current_slot + subscription_slot3 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete + let wait_slots = subnet_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_end_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + let third_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; + + if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( [expected_subscription, expected_unsubscription], third_subscribe_event[..] From 55d1e754b4ba3952a23874e29cf8565a079d88e2 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 3 Feb 2025 03:07:39 -0300 Subject: [PATCH 26/52] Subscribe to PeerDAS topics on Fulu fork (#6849) `TODO(das)` now that PeerDAS is scheduled in a hard fork we can subscribe to its topics on the fork activation. In current stable we subscribe to PeerDAS topics as soon as the node starts if PeerDAS is scheduled. This PR adds another todo to unsubscribe to blob topics at the fork. This other PR included solution for that, but I can include it in a separate PR - https://github.com/sigp/lighthouse/pull/5899/files Include PeerDAS topics as part of Fulu fork in `fork_core_topics`. --- .../lighthouse_network/src/service/mod.rs | 8 +++- .../lighthouse_network/src/types/globals.rs | 9 ++++ .../lighthouse_network/src/types/mod.rs | 4 +- .../lighthouse_network/src/types/topics.rs | 47 ++++++++++++++++--- beacon_node/network/src/service.rs | 44 ++--------------- 5 files changed, 62 insertions(+), 50 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 354def79b0..8586fd9cd3 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -708,11 +708,17 @@ impl Network { } // Subscribe to core topics for the new fork - for kind in fork_core_topics::(&new_fork, &self.fork_context.spec) { + for kind in fork_core_topics::( + &new_fork, + &self.fork_context.spec, + &self.network_globals.as_topic_config(), + ) { let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); self.subscribe(topic); } + // TODO(das): unsubscribe from blob topics at the Fulu fork + // Register the new topics for metrics let topics_to_keep_metrics_for = attestation_sync_committee_topics::() .map(|gossip_kind| { diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index c9e84e2dd1..2800b75133 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -1,4 +1,5 @@ //! A collection of variables that are accessible outside of the network thread itself. +use super::TopicConfig; use crate::peer_manager::peerdb::PeerDB; use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; @@ -183,6 +184,14 @@ impl NetworkGlobals { .collect::>() } + /// Returns the TopicConfig to compute the set of Gossip topics for a given fork + pub fn as_topic_config(&self) -> TopicConfig { + TopicConfig { + subscribe_all_data_column_subnets: self.config.subscribe_all_data_column_subnets, + sampling_subnets: &self.sampling_subnets, + } + } + /// TESTING ONLY. Build a dummy NetworkGlobals instance. pub fn new_test_globals( trusted_peers: Vec, diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index a1eedaef74..58ba7588b9 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -17,6 +17,6 @@ pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; pub use topics::{ attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics, - subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS, - BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, TopicConfig, + ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 2c79f93423..171dab09a3 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,5 +1,6 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; +use std::collections::HashSet; use strum::AsRefStr; use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; @@ -41,8 +42,18 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ GossipKind::LightClientOptimisticUpdate, ]; +#[derive(Debug)] +pub struct TopicConfig<'a> { + pub subscribe_all_data_column_subnets: bool, + pub sampling_subnets: &'a HashSet, +} + /// Returns the core topics associated with each fork that are new to the previous fork -pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> Vec { +pub fn fork_core_topics( + fork_name: &ForkName, + spec: &ChainSpec, + topic_config: &TopicConfig, +) -> Vec { match fork_name { ForkName::Base => BASE_CORE_TOPICS.to_vec(), ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(), @@ -64,7 +75,21 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V } electra_blob_topics } - ForkName::Fulu => vec![], + ForkName::Fulu => { + let mut topics = vec![]; + if topic_config.subscribe_all_data_column_subnets { + for column_subnet in 0..spec.data_column_sidecar_subnet_count { + topics.push(GossipKind::DataColumnSidecar(DataColumnSubnetId::new( + column_subnet, + ))); + } + } else { + for column_subnet in topic_config.sampling_subnets { + topics.push(GossipKind::DataColumnSidecar(*column_subnet)); + } + } + topics + } } } @@ -84,10 +109,11 @@ pub fn attestation_sync_committee_topics() -> impl Iterator( mut current_fork: ForkName, spec: &ChainSpec, + topic_config: &TopicConfig, ) -> Vec { - let mut topics = fork_core_topics::(¤t_fork, spec); + let mut topics = fork_core_topics::(¤t_fork, spec, topic_config); while let Some(previous_fork) = current_fork.previous_fork() { - let previous_fork_topics = fork_core_topics::(&previous_fork, spec); + let previous_fork_topics = fork_core_topics::(&previous_fork, spec, topic_config); topics.extend(previous_fork_topics); current_fork = previous_fork; } @@ -475,8 +501,15 @@ mod tests { type E = MainnetEthSpec; let spec = E::default_spec(); let mut all_topics = Vec::new(); - let mut electra_core_topics = fork_core_topics::(&ForkName::Electra, &spec); - let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec); + let topic_config = TopicConfig { + subscribe_all_data_column_subnets: false, + sampling_subnets: &HashSet::from_iter([1, 2].map(DataColumnSubnetId::new)), + }; + let mut fulu_core_topics = fork_core_topics::(&ForkName::Fulu, &spec, &topic_config); + let mut electra_core_topics = + fork_core_topics::(&ForkName::Electra, &spec, &topic_config); + let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec, &topic_config); + all_topics.append(&mut fulu_core_topics); all_topics.append(&mut electra_core_topics); all_topics.append(&mut deneb_core_topics); all_topics.extend(CAPELLA_CORE_TOPICS); @@ -484,7 +517,7 @@ mod tests { all_topics.extend(BASE_CORE_TOPICS); let latest_fork = *ForkName::list_all().last().unwrap(); - let core_topics = core_topics_to_subscribe::(latest_fork, &spec); + let core_topics = core_topics_to_subscribe::(latest_fork, &spec, &topic_config); // Need to check all the topics exist in an order independent manner for topic in all_topics { assert!(core_topics.contains(&topic)); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 49f73bf9c8..1b2a681c64 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -33,8 +33,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, DataColumnSubnetId, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, - SyncSubnetId, Unsigned, ValidatorSubscription, + ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + Unsigned, ValidatorSubscription, }; mod tests; @@ -181,8 +181,6 @@ pub struct NetworkService { next_fork_subscriptions: Pin>>, /// A delay that expires when we need to unsubscribe from old fork topics. next_unsubscribe: Pin>>, - /// Subscribe to all the data column subnets. - subscribe_all_data_column_subnets: bool, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// Shutdown beacon node after sync is complete. @@ -349,7 +347,6 @@ impl NetworkService { next_fork_update, next_fork_subscriptions, next_unsubscribe, - subscribe_all_data_column_subnets: config.subscribe_all_data_column_subnets, subscribe_all_subnets: config.subscribe_all_subnets, shutdown_after_sync: config.shutdown_after_sync, metrics_enabled: config.metrics_enabled, @@ -717,6 +714,7 @@ impl NetworkService { for topic_kind in core_topics_to_subscribe::( self.fork_context.current_fork(), &self.fork_context.spec, + &self.network_globals.as_topic_config(), ) { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( @@ -751,10 +749,6 @@ impl NetworkService { } } - if self.fork_context.spec.is_peer_das_scheduled() { - self.subscribe_to_peer_das_topics(&mut subscribed_topics); - } - // If we are to subscribe to all subnets we do it here if self.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { @@ -801,37 +795,6 @@ impl NetworkService { } } - /// Keeping these separate from core topics because it has custom logic: - /// 1. Data column subscription logic depends on subscription configuration. - /// 2. Data column topic subscriptions will be dynamic based on validator balances due to - /// validator custody. - /// - /// TODO(das): The downside with not including it in core fork topic is - we subscribe to - /// PeerDAS topics on startup if Fulu is scheduled, rather than waiting until the fork. - /// If this is an issue we could potentially consider adding the logic to - /// `network.subscribe_new_fork_topics()`. - fn subscribe_to_peer_das_topics(&mut self, subscribed_topics: &mut Vec) { - let column_subnets_to_subscribe = if self.subscribe_all_data_column_subnets { - &(0..self.fork_context.spec.data_column_sidecar_subnet_count) - .map(DataColumnSubnetId::new) - .collect() - } else { - &self.network_globals.sampling_subnets - }; - - for column_subnet in column_subnets_to_subscribe.iter() { - for fork_digest in self.required_gossip_fork_digests() { - let gossip_kind = Subnet::DataColumn(*column_subnet).into(); - let topic = GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); - if self.libp2p.subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - } - /// Handle a message sent to the network service. async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { match msg { @@ -947,6 +910,7 @@ impl NetworkService { let core_topics = core_topics_to_subscribe::( self.fork_context.current_fork(), &self.fork_context.spec, + &self.network_globals.as_topic_config(), ); let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); let subscriptions = self.network_globals.gossipsub_subscriptions.read(); From 1e2b547b359b25b2ef012493e83413691d4883ab Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 3 Feb 2025 09:07:42 +0300 Subject: [PATCH 27/52] Add builder SSZ flow (#6859) --- Cargo.lock | 2 + beacon_node/builder_client/Cargo.toml | 2 + beacon_node/builder_client/src/lib.rs | 179 +++++++++++++++++- beacon_node/execution_layer/src/lib.rs | 17 +- .../src/test_utils/mock_builder.rs | 95 ++++++++-- beacon_node/store/src/hot_cold_store.rs | 4 +- common/eth2/src/lib.rs | 3 +- common/eth2/src/types.rs | 61 +++++- consensus/types/src/builder_bid.rs | 58 +++++- consensus/types/src/execution_payload.rs | 9 +- .../types/src/fork_versioned_response.rs | 5 + consensus/types/src/lib.rs | 4 +- consensus/types/src/signed_beacon_block.rs | 21 +- testing/ef_tests/src/cases/operations.rs | 8 +- 14 files changed, 406 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index af27fb11c8..cf7793fbb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1136,10 +1136,12 @@ name = "builder_client" version = "0.1.0" dependencies = [ "eth2", + "ethereum_ssz", "lighthouse_version", "reqwest", "sensitive_url", "serde", + "serde_json", ] [[package]] diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 3531e81c84..1920bd0ebb 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -6,7 +6,9 @@ authors = ["Sean Anderson "] [dependencies] eth2 = { workspace = true } +ethereum_ssz = { workspace = true } lighthouse_version = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 91ee00a65f..5f64ac7e43 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,16 +1,24 @@ use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::fork_versioned_response::EmptyMetadata; use eth2::types::{ - EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, - SignedValidatorRegistrationData, Slot, + ContentType, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, ForkVersionDeserialize, + ForkVersionedResponse, PublicKeyBytes, SignedValidatorRegistrationData, Slot, }; use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; pub use eth2::Error; -use eth2::{ok_or_error, StatusCode, CONSENSUS_VERSION_HEADER}; -use reqwest::header::{HeaderMap, HeaderValue}; +use eth2::{ + ok_or_error, StatusCode, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, + JSON_CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER, +}; +use reqwest::header::{HeaderMap, HeaderValue, ACCEPT}; use reqwest::{IntoUrl, Response}; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde::Serialize; +use ssz::Encode; +use std::str::FromStr; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; use std::time::Duration; pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; @@ -49,6 +57,7 @@ pub struct BuilderHttpClient { server: SensitiveUrl, timeouts: Timeouts, user_agent: String, + ssz_enabled: Arc, } impl BuilderHttpClient { @@ -64,6 +73,7 @@ impl BuilderHttpClient { server, timeouts: Timeouts::new(builder_header_timeout), user_agent, + ssz_enabled: Arc::new(false.into()), }) } @@ -71,6 +81,78 @@ impl BuilderHttpClient { &self.user_agent } + fn fork_name_from_header(&self, headers: &HeaderMap) -> Result, String> { + headers + .get(CONSENSUS_VERSION_HEADER) + .map(|fork_name| { + fork_name + .to_str() + .map_err(|e| e.to_string()) + .and_then(ForkName::from_str) + }) + .transpose() + } + + fn content_type_from_header(&self, headers: &HeaderMap) -> ContentType { + let Some(content_type) = headers.get(CONTENT_TYPE_HEADER).map(|content_type| { + let content_type = content_type.to_str(); + match content_type { + Ok(SSZ_CONTENT_TYPE_HEADER) => ContentType::Ssz, + _ => ContentType::Json, + } + }) else { + return ContentType::Json; + }; + content_type + } + + async fn get_with_header< + T: DeserializeOwned + ForkVersionDecode + ForkVersionDeserialize, + U: IntoUrl, + >( + &self, + url: U, + timeout: Duration, + headers: HeaderMap, + ) -> Result, Error> { + let response = self + .get_response_with_header(url, Some(timeout), headers) + .await?; + + let headers = response.headers().clone(); + let response_bytes = response.bytes().await?; + + let Ok(Some(fork_name)) = self.fork_name_from_header(&headers) else { + // if no fork version specified, attempt to fallback to JSON + self.ssz_enabled.store(false, Ordering::SeqCst); + return serde_json::from_slice(&response_bytes).map_err(Error::InvalidJson); + }; + + let content_type = self.content_type_from_header(&headers); + + match content_type { + ContentType::Ssz => { + self.ssz_enabled.store(true, Ordering::SeqCst); + T::from_ssz_bytes_by_fork(&response_bytes, fork_name) + .map(|data| ForkVersionedResponse { + version: Some(fork_name), + metadata: EmptyMetadata {}, + data, + }) + .map_err(Error::InvalidSsz) + } + ContentType::Json => { + self.ssz_enabled.store(false, Ordering::SeqCst); + serde_json::from_slice(&response_bytes).map_err(Error::InvalidJson) + } + } + } + + /// Return `true` if the most recently received response from the builder had SSZ Content-Type. + pub fn is_ssz_enabled(&self) -> bool { + self.ssz_enabled.load(Ordering::SeqCst) + } + async fn get_with_timeout( &self, url: U, @@ -83,6 +165,21 @@ impl BuilderHttpClient { .map_err(Into::into) } + /// Perform a HTTP GET request, returning the `Response` for further processing. + async fn get_response_with_header( + &self, + url: U, + timeout: Option, + headers: HeaderMap, + ) -> Result { + let mut builder = self.client.get(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.headers(headers).send().await.map_err(Error::from)?; + ok_or_error(response).await + } + /// Perform a HTTP GET request, returning the `Response` for further processing. async fn get_response_with_timeout( &self, @@ -112,6 +209,32 @@ impl BuilderHttpClient { ok_or_error(response).await } + async fn post_ssz_with_raw_response( + &self, + url: U, + ssz_body: Vec, + mut headers: HeaderMap, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + + headers.insert( + CONTENT_TYPE_HEADER, + HeaderValue::from_static(SSZ_CONTENT_TYPE_HEADER), + ); + + let response = builder + .headers(headers) + .body(ssz_body) + .send() + .await + .map_err(Error::from)?; + ok_or_error(response).await + } + async fn post_with_raw_response( &self, url: U, @@ -152,6 +275,42 @@ impl BuilderHttpClient { Ok(()) } + /// `POST /eth/v1/builder/blinded_blocks` with SSZ serialized request body + pub async fn post_builder_blinded_blocks_ssz( + &self, + blinded_block: &SignedBlindedBeaconBlock, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + let body = blinded_block.as_ssz_bytes(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("blinded_blocks"); + + let mut headers = HeaderMap::new(); + if let Ok(value) = HeaderValue::from_str(&blinded_block.fork_name_unchecked().to_string()) { + headers.insert(CONSENSUS_VERSION_HEADER, value); + } + + let result = self + .post_ssz_with_raw_response( + path, + body, + headers, + Some(self.timeouts.post_blinded_blocks), + ) + .await? + .bytes() + .await?; + + FullPayloadContents::from_ssz_bytes_by_fork(&result, blinded_block.fork_name_unchecked()) + .map_err(Error::InvalidSsz) + } + /// `POST /eth/v1/builder/blinded_blocks` pub async fn post_builder_blinded_blocks( &self, @@ -202,7 +361,17 @@ impl BuilderHttpClient { .push(format!("{parent_hash:?}").as_str()) .push(pubkey.as_hex_string().as_str()); - let resp = self.get_with_timeout(path, self.timeouts.get_header).await; + let mut headers = HeaderMap::new(); + if let Ok(ssz_content_type_header) = HeaderValue::from_str(&format!( + "{}; q=1.0,{}; q=0.9", + SSZ_CONTENT_TYPE_HEADER, JSON_CONTENT_TYPE_HEADER + )) { + headers.insert(ACCEPT, ssz_content_type_header); + }; + + let resp = self + .get_with_header(path, self.timeouts.get_header, headers) + .await; if matches!(resp, Err(Error::StatusCode(StatusCode::NO_CONTENT))) { Ok(None) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d5fef4c5aa..4e0fe1de16 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1900,11 +1900,18 @@ impl ExecutionLayer { if let Some(builder) = self.builder() { let (payload_result, duration) = timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async { - builder - .post_builder_blinded_blocks(block) - .await - .map_err(Error::Builder) - .map(|d| d.data) + if builder.is_ssz_enabled() { + builder + .post_builder_blinded_blocks_ssz(block) + .await + .map_err(Error::Builder) + } else { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + } }) .await; diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 3540909fe4..f07ee7ac6f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,15 +1,20 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; +use bytes::Bytes; use eth2::types::PublishBlockRequest; use eth2::types::{ BlobsBundle, BlockId, BroadcastValidation, EventKind, EventTopic, FullPayloadContents, ProposerData, StateId, ValidatorId, }; -use eth2::{BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER}; +use eth2::{ + BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, + SSZ_CONTENT_TYPE_HEADER, +}; use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use slog::{debug, error, info, warn, Logger}; +use ssz::Encode; use std::collections::HashMap; use std::fmt::Debug; use std::future::Future; @@ -26,11 +31,12 @@ use types::builder_bid::{ }; use types::{ Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionedResponse, Hash256, - PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, - SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, + ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, + SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; +use warp::reply::{self, Reply}; use warp::{Filter, Rejection}; pub const DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); @@ -955,6 +961,33 @@ pub fn serve( ) .boxed(); + let blinded_block_ssz = prefix + .and(warp::path("blinded_blocks")) + .and(warp::body::bytes()) + .and(warp::header::header::(CONSENSUS_VERSION_HEADER)) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |block_bytes: Bytes, fork_name: ForkName, builder: MockBuilder| async move { + let block = + SignedBlindedBeaconBlock::::from_ssz_bytes_by_fork(&block_bytes, fork_name) + .map_err(|e| warp::reject::custom(Custom(format!("{:?}", e))))?; + let payload = builder + .submit_blinded_block(block) + .await + .map_err(|e| warp::reject::custom(Custom(e)))?; + + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder() + .status(200) + .body(payload.as_ssz_bytes()) + .map(add_ssz_content_type_header) + .map(|res| add_consensus_version_header(res, fork_name)) + .unwrap(), + ) + }, + ); + let blinded_block = prefix .and(warp::path("blinded_blocks")) @@ -1007,35 +1040,47 @@ pub fn serve( ) .and(warp::path::end()) .and(ctx_filter.clone()) + .and(warp::header::optional::("accept")) .and_then( |slot: Slot, parent_hash: ExecutionBlockHash, pubkey: PublicKeyBytes, - builder: MockBuilder| async move { + builder: MockBuilder, + accept_header: Option| async move { let fork_name = builder.fork_name_at_slot(slot); let signed_bid = builder .get_header(slot, parent_hash, pubkey) .await .map_err(|e| warp::reject::custom(Custom(e)))?; - - let resp: ForkVersionedResponse<_> = ForkVersionedResponse { - version: Some(fork_name), - metadata: Default::default(), - data: signed_bid, - }; - let json_bid = serde_json::to_string(&resp) - .map_err(|_| reject("coudn't serialize signed bid"))?; - Ok::<_, Rejection>( - warp::http::Response::builder() - .status(200) - .body(json_bid) - .unwrap(), - ) + let accept_header = accept_header.unwrap_or(eth2::types::Accept::Any); + match accept_header { + eth2::types::Accept::Ssz => Ok::<_, Rejection>( + warp::http::Response::builder() + .status(200) + .body(signed_bid.as_ssz_bytes()) + .map(add_ssz_content_type_header) + .map(|res| add_consensus_version_header(res, fork_name)) + .unwrap(), + ), + eth2::types::Accept::Json | eth2::types::Accept::Any => { + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { + version: Some(fork_name), + metadata: Default::default(), + data: signed_bid, + }; + Ok::<_, Rejection>(warp::reply::json(&resp).into_response()) + } + } }, ); let routes = warp::post() - .and(validators.or(blinded_block)) + // Routes which expect `application/octet-stream` go within this `and`. + .and( + warp::header::exact(CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER) + .and(blinded_block_ssz), + ) + .or(validators.or(blinded_block)) .or(warp::get().and(status).or(header)) .map(|reply| warp::reply::with_header(reply, "Server", "lighthouse-mock-builder-server")); @@ -1048,3 +1093,13 @@ pub fn serve( fn reject(msg: &'static str) -> Rejection { warp::reject::custom(Custom(msg.to_string())) } + +/// Add the 'Content-Type application/octet-stream` header to a response. +fn add_ssz_content_type_header(reply: T) -> warp::reply::Response { + reply::with_header(reply, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER).into_response() +} + +/// Add the `Eth-Consensus-Version` header to a response. +fn add_consensus_version_header(reply: T, fork_name: ForkName) -> warp::reply::Response { + reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()).into_response() +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 134be9ec0d..128c03f771 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -666,7 +666,9 @@ impl, Cold: ItemStore> HotColdDB .hot_db .get_bytes(ExecutionPayload::::db_column(), key)? { - Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes(&bytes, fork_name)?)), + Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes_by_fork( + &bytes, fork_name, + )?)), None => Ok(None), } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index b86aa62765..73e9d57abc 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -49,6 +49,7 @@ pub const CONSENSUS_BLOCK_VALUE_HEADER: &str = "Eth-Consensus-Block-Value"; pub const CONTENT_TYPE_HEADER: &str = "Content-Type"; pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; +pub const JSON_CONTENT_TYPE_HEADER: &str = "application/json"; #[derive(Debug)] pub enum Error { @@ -112,9 +113,9 @@ impl Error { Error::InvalidSignatureHeader => None, Error::MissingSignatureHeader => None, Error::InvalidJson(_) => None, + Error::InvalidSsz(_) => None, Error::InvalidServerSentEvent(_) => None, Error::InvalidHeaders(_) => None, - Error::InvalidSsz(_) => None, Error::TokenReadError(..) => None, Error::NoServerPubkey | Error::NoToken => None, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index c6e95e1ba3..59374f629d 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1664,7 +1664,7 @@ impl FullBlockContents { } /// SSZ decode with fork variant determined by slot. - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { let slot_len = ::ssz_fixed_len(); let slot_bytes = bytes .get(0..slot_len) @@ -1678,10 +1678,7 @@ impl FullBlockContents { } /// SSZ decode with fork variant passed in explicitly. - pub fn from_ssz_bytes_for_fork( - bytes: &[u8], - fork_name: ForkName, - ) -> Result { + pub fn from_ssz_bytes_for_fork(bytes: &[u8], fork_name: ForkName) -> Result { if fork_name.deneb_enabled() { let mut builder = ssz::SszDecoderBuilder::new(bytes); @@ -1836,7 +1833,7 @@ impl PublishBlockRequest { } /// SSZ decode with fork variant determined by `fork_name`. - pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { if fork_name.deneb_enabled() { let mut builder = ssz::SszDecoderBuilder::new(bytes); builder.register_anonymous_variable_length_item()?; @@ -1845,7 +1842,7 @@ impl PublishBlockRequest { let mut decoder = builder.build()?; let block = decoder.decode_next_with(|bytes| { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + SignedBeaconBlock::from_ssz_bytes_by_fork(bytes, fork_name) })?; let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; @@ -1854,7 +1851,7 @@ impl PublishBlockRequest { Some((kzg_proofs, blobs)), )) } else { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + SignedBeaconBlock::from_ssz_bytes_by_fork(bytes, fork_name) .map(|block| PublishBlockRequest::Block(Arc::new(block))) } } @@ -1946,6 +1943,24 @@ pub enum FullPayloadContents { PayloadAndBlobs(ExecutionPayloadAndBlobs), } +impl ForkVersionDecode for FullPayloadContents { + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + if fork_name.deneb_enabled() { + Ok(Self::PayloadAndBlobs( + ExecutionPayloadAndBlobs::from_ssz_bytes_by_fork(bytes, fork_name)?, + )) + } else if fork_name.bellatrix_enabled() { + Ok(Self::Payload(ExecutionPayload::from_ssz_bytes_by_fork( + bytes, fork_name, + )?)) + } else { + Err(ssz::DecodeError::BytesInvalid(format!( + "FullPayloadContents decoding for {fork_name} not implemented" + ))) + } + } +} + impl FullPayloadContents { pub fn new( execution_payload: ExecutionPayload, @@ -2012,6 +2027,36 @@ pub struct ExecutionPayloadAndBlobs { pub blobs_bundle: BlobsBundle, } +impl ForkVersionDecode for ExecutionPayloadAndBlobs { + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + let mut decoder = builder.build()?; + + if fork_name.deneb_enabled() { + let execution_payload = decoder.decode_next_with(|bytes| { + ExecutionPayload::from_ssz_bytes_by_fork(bytes, fork_name) + })?; + let blobs_bundle = decoder.decode_next()?; + Ok(Self { + execution_payload, + blobs_bundle, + }) + } else { + Err(DecodeError::BytesInvalid(format!( + "ExecutionPayloadAndBlobs decoding for {fork_name} not implemented" + ))) + } + } +} + +#[derive(Debug)] +pub enum ContentType { + Json, + Ssz, +} + #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index ac53c41216..49911c3909 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -3,25 +3,37 @@ use crate::{ ChainSpec, EthSpec, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, - ForkVersionDeserialize, SignedRoot, Uint256, + ForkVersionDecode, ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize, Deserializer, Serialize}; +use ssz::Decode; +use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use tree_hash_derive::TreeHash; #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( - derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), + derive( + PartialEq, + Debug, + Encode, + Serialize, + Deserialize, + TreeHash, + Decode, + Clone + ), serde(bound = "E: EthSpec", deny_unknown_fields) ), map_ref_into(ExecutionPayloadHeaderRef), map_ref_mut_into(ExecutionPayloadHeaderRefMut) )] -#[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] +#[derive(PartialEq, Debug, Encode, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec", deny_unknown_fields, untagged)] +#[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] pub struct BuilderBid { #[superstruct(only(Bellatrix), partial_getter(rename = "header_bellatrix"))] @@ -65,16 +77,54 @@ impl<'a, E: EthSpec> BuilderBidRefMut<'a, E> { } } +impl ForkVersionDecode for BuilderBid { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + let builder_bid = match fork_name { + ForkName::Altair | ForkName::Base => { + return Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayloadHeader: {fork_name}", + ))) + } + ForkName::Bellatrix => { + BuilderBid::Bellatrix(BuilderBidBellatrix::from_ssz_bytes(bytes)?) + } + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella::from_ssz_bytes(bytes)?), + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb::from_ssz_bytes(bytes)?), + ForkName::Electra => BuilderBid::Electra(BuilderBidElectra::from_ssz_bytes(bytes)?), + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu::from_ssz_bytes(bytes)?), + }; + Ok(builder_bid) + } +} + impl SignedRoot for BuilderBid {} /// Validator registration, for use in interacting with servers implementing the builder API. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[derive(PartialEq, Debug, Encode, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec")] pub struct SignedBuilderBid { pub message: BuilderBid, pub signature: Signature, } +impl ForkVersionDecode for SignedBuilderBid { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + + builder.register_anonymous_variable_length_item()?; + builder.register_type::()?; + + let mut decoder = builder.build()?; + let message = decoder + .decode_next_with(|bytes| BuilderBid::from_ssz_bytes_by_fork(bytes, fork_name))?; + let signature = decoder.decode_next()?; + + Ok(Self { message, signature }) + } +} + impl ForkVersionDeserialize for BuilderBid { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: serde_json::value::Value, diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 2df66343af..5d756c8529 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -40,7 +40,7 @@ pub type Withdrawals = VariableList::MaxWithdrawal map_ref_into(ExecutionPayloadHeader) )] #[derive( - Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec", untagged)] @@ -102,8 +102,9 @@ impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { } } -impl ExecutionPayload { - pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { +impl ForkVersionDecode for ExecutionPayload { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { match fork_name { ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( "unsupported fork for ExecutionPayload: {fork_name}", @@ -117,7 +118,9 @@ impl ExecutionPayload { ForkName::Fulu => ExecutionPayloadFulu::from_ssz_bytes(bytes).map(Self::Fulu), } } +} +impl ExecutionPayload { #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_bellatrix_size() -> usize { diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index cd78b5b3ca..7e4efd05d6 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -4,6 +4,11 @@ use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; use std::sync::Arc; +pub trait ForkVersionDecode: Sized { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; +} + pub trait ForkVersionDeserialize: Sized + DeserializeOwned { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: Value, diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 11d1f5271b..73a50b4ef3 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -178,7 +178,9 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse}; +pub use crate::fork_versioned_response::{ + ForkVersionDecode, ForkVersionDeserialize, ForkVersionedResponse, +}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::{ diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index d9bf9bf55d..eb5925a29b 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -86,6 +86,17 @@ pub struct SignedBeaconBlock = FullP pub signature: Signature, } +impl> ForkVersionDecode + for SignedBeaconBlock +{ + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + Self::from_ssz_bytes_with(bytes, |bytes| { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + }) + } +} + pub type SignedBlindedBeaconBlock = SignedBeaconBlock>; impl> SignedBeaconBlock { @@ -108,16 +119,6 @@ impl> SignedBeaconBlock Self::from_ssz_bytes_with(bytes, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } - /// SSZ decode with explicit fork variant. - pub fn from_ssz_bytes_for_fork( - bytes: &[u8], - fork_name: ForkName, - ) -> Result { - Self::from_ssz_bytes_with(bytes, |bytes| { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - }) - } - /// SSZ decode which attempts to decode all variants (slow). pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { Self::from_ssz_bytes_with(bytes, BeaconBlock::any_from_ssz_bytes) diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index adb5bee768..7178edb151 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -25,9 +25,9 @@ use std::fmt::Debug; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, - BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, FullPayload, - ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, - WithdrawalRequest, + BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, + ForkVersionDecode, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, + SignedVoluntaryExit, SyncAggregate, WithdrawalRequest, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -398,7 +398,7 @@ impl Operation for WithdrawalsPayload { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| { - ExecutionPayload::from_ssz_bytes(bytes, fork_name) + ExecutionPayload::from_ssz_bytes_by_fork(bytes, fork_name) }) .map(|payload| WithdrawalsPayload { payload: payload.into(), From 95cec45c3840864c37327abee2d06ad56a6a2ad0 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 3 Feb 2025 03:07:45 -0300 Subject: [PATCH 28/52] Use data column batch verification consistently (#6851) Resolve a `TODO(das)` to use KZG batch verification in `put_rpc_custody_columns` Uses `verify_kzg_for_data_column_list_with_scoring` in all paths that send more than one column. To use batch verification and have attributability of which peer is sending a bad column. Needs to move `verify_kzg_for_data_column_list_with_scoring` into the type's module to convert to the KZG verified type. --- .../src/data_availability_checker.rs | 57 +++++-------------- .../src/data_availability_checker/error.rs | 2 +- .../src/data_column_verification.rs | 44 ++++++++++++++ .../network/src/sync/block_lookups/mod.rs | 12 +++- 4 files changed, 69 insertions(+), 46 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index aa4689121c..f10d59ca1a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -27,8 +27,8 @@ mod overflow_lru_cache; mod state_lru_cache; use crate::data_column_verification::{ - verify_kzg_for_data_column, verify_kzg_for_data_column_list, CustodyDataColumn, - GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, + verify_kzg_for_data_column_list_with_scoring, CustodyDataColumn, GossipVerifiedDataColumn, + KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, }; use crate::metrics::{ KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, @@ -230,19 +230,14 @@ impl DataAvailabilityChecker { block_root: Hash256, custody_columns: DataColumnSidecarList, ) -> Result, AvailabilityCheckError> { - // TODO(das): report which column is invalid for proper peer scoring - // TODO(das): batch KZG verification here, but fallback into checking each column - // individually to report which column(s) are invalid. - let verified_custody_columns = custody_columns + // Attributes fault to the specific peer that sent an invalid column + let kzg_verified_columns = KzgVerifiedDataColumn::from_batch(custody_columns, &self.kzg) + .map_err(AvailabilityCheckError::InvalidColumn)?; + + let verified_custody_columns = kzg_verified_columns .into_iter() - .map(|column| { - let index = column.index; - Ok(KzgVerifiedCustodyDataColumn::from_asserted_custody( - KzgVerifiedDataColumn::new(column, &self.kzg) - .map_err(|e| AvailabilityCheckError::InvalidColumn(index, e))?, - )) - }) - .collect::, AvailabilityCheckError>>()?; + .map(KzgVerifiedCustodyDataColumn::from_asserted_custody) + .collect::>(); self.availability_cache.put_kzg_verified_data_columns( block_root, @@ -365,7 +360,8 @@ impl DataAvailabilityChecker { .iter() .map(|custody_column| custody_column.as_data_column()), &self.kzg, - )?; + ) + .map_err(AvailabilityCheckError::InvalidColumn)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, @@ -432,8 +428,9 @@ impl DataAvailabilityChecker { // verify kzg for all data columns at once if !all_data_columns.is_empty() { - // TODO: Need to also attribute which specific block is faulty - verify_kzg_for_data_column_list_with_scoring(all_data_columns.iter(), &self.kzg)?; + // Attributes fault to the specific peer that sent an invalid column + verify_kzg_for_data_column_list_with_scoring(all_data_columns.iter(), &self.kzg) + .map_err(AvailabilityCheckError::InvalidColumn)?; } for block in blocks { @@ -716,32 +713,6 @@ async fn availability_cache_maintenance_service( } } -fn verify_kzg_for_data_column_list_with_scoring<'a, E: EthSpec, I>( - data_column_iter: I, - kzg: &'a Kzg, -) -> Result<(), AvailabilityCheckError> -where - I: Iterator>> + Clone, -{ - let Err(batch_err) = verify_kzg_for_data_column_list(data_column_iter.clone(), kzg) else { - return Ok(()); - }; - - let data_columns = data_column_iter.collect::>(); - // Find which column is invalid. If len is 1 or 0 continue to default case below. - // If len > 1 at least one column MUST fail. - if data_columns.len() > 1 { - for data_column in data_columns { - if let Err(e) = verify_kzg_for_data_column(data_column.clone(), kzg) { - return Err(AvailabilityCheckError::InvalidColumn(data_column.index, e)); - } - } - } - - // len 0 should never happen - Err(AvailabilityCheckError::InvalidColumn(0, batch_err)) -} - /// A fully available block that is ready to be imported into fork choice. #[derive(Clone, Debug, PartialEq)] pub struct AvailableBlock { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index cfdb3cfe91..1ab85ab105 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -4,7 +4,7 @@ use types::{BeaconStateError, ColumnIndex, Hash256}; #[derive(Debug)] pub enum Error { InvalidBlobs(KzgError), - InvalidColumn(ColumnIndex, KzgError), + InvalidColumn(Vec<(ColumnIndex, KzgError)>), ReconstructColumnsError(KzgError), KzgCommitmentMismatch { blob_commitment: KzgCommitment, diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 565e76704e..1262fcdeb8 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -239,6 +239,18 @@ impl KzgVerifiedDataColumn { pub fn new(data_column: Arc>, kzg: &Kzg) -> Result { verify_kzg_for_data_column(data_column, kzg) } + + pub fn from_batch( + data_columns: Vec>>, + kzg: &Kzg, + ) -> Result, Vec<(ColumnIndex, KzgError)>> { + verify_kzg_for_data_column_list_with_scoring(data_columns.iter(), kzg)?; + Ok(data_columns + .into_iter() + .map(|column| Self { data: column }) + .collect()) + } + pub fn to_data_column(self) -> Arc> { self.data } @@ -378,6 +390,38 @@ where Ok(()) } +/// Complete kzg verification for a list of `DataColumnSidecar`s. +/// +/// If there's at least one invalid column, it re-verifies all columns individually to identify the +/// first column that is invalid. This is necessary to attribute fault to the specific peer that +/// sent bad data. The re-verification cost should not be significant. If a peer sends invalid data it +/// will be quickly banned. +pub fn verify_kzg_for_data_column_list_with_scoring<'a, E: EthSpec, I>( + data_column_iter: I, + kzg: &'a Kzg, +) -> Result<(), Vec<(ColumnIndex, KzgError)>> +where + I: Iterator>> + Clone, +{ + if verify_kzg_for_data_column_list(data_column_iter.clone(), kzg).is_ok() { + return Ok(()); + }; + + // Find all columns that are invalid and identify by index. If we hit this condition there + // should be at least one invalid column + let errors = data_column_iter + .filter_map(|data_column| { + if let Err(e) = verify_kzg_for_data_column(data_column.clone(), kzg) { + Some((data_column.index, e)) + } else { + None + } + }) + .collect::>(); + + Err(errors) +} + pub fn validate_data_column_sidecar_for_gossip( data_column: Arc>, subnet: u64, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index ac4df42a4e..2172c8dcd8 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -36,6 +36,7 @@ use beacon_chain::data_availability_checker::{ use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; pub use common::RequestState; use fnv::FnvHashMap; +use itertools::Itertools; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; @@ -644,8 +645,15 @@ impl BlockLookups { // but future errors may follow the same pattern. Generalize this // pattern with https://github.com/sigp/lighthouse/pull/6321 BlockError::AvailabilityCheck( - AvailabilityCheckError::InvalidColumn(index, _), - ) => peer_group.of_index(index as usize).collect(), + AvailabilityCheckError::InvalidColumn(errors), + ) => errors + .iter() + // Collect all peers that sent a column that was invalid. Must + // run .unique as a single peer can send multiple invalid + // columns. Penalize once to avoid insta-bans + .flat_map(|(index, _)| peer_group.of_index((*index) as usize)) + .unique() + .collect(), _ => peer_group.all().collect(), }; for peer in peers_to_penalize { From 7e4b27c922a9c6b96ce6f7d6512328d3ee53f40a Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 3 Feb 2025 23:08:31 +0300 Subject: [PATCH 29/52] Migrate validator client to clap derive (#6300) Partially #5900 Migrate the validator client cli to clap derive --- Cargo.lock | 1 + account_manager/src/lib.rs | 12 - account_manager/src/validator/mod.rs | 11 - account_manager/src/wallet/mod.rs | 11 - beacon_node/src/cli.rs | 9 - book/src/help_vc.md | 23 +- boot_node/src/cli.rs | 9 - database_manager/src/cli.rs | 10 - lighthouse/src/cli.rs | 5 +- lighthouse/src/main.rs | 86 +- lighthouse/tests/validator_client.rs | 13 +- .../beacon_node_fallback/Cargo.toml | 1 + .../src/beacon_node_health.rs | 39 +- .../beacon_node_fallback/src/lib.rs | 17 +- validator_client/src/cli.rs | 950 +++++++++--------- validator_client/src/config.rs | 205 ++-- validator_client/src/lib.rs | 10 +- validator_manager/src/lib.rs | 14 +- 18 files changed, 653 insertions(+), 773 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf7793fbb1..cba98c6991 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -884,6 +884,7 @@ dependencies = [ name = "beacon_node_fallback" version = "0.1.0" dependencies = [ + "clap", "environment", "eth2", "futures", diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 534939cf6b..44ec638a09 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -2,11 +2,8 @@ mod common; pub mod validator; pub mod wallet; -use clap::Arg; -use clap::ArgAction; use clap::ArgMatches; use clap::Command; -use clap_utils::FLAG_HEADER; use environment::Environment; use types::EthSpec; @@ -21,15 +18,6 @@ pub fn cli_app() -> Command { .visible_aliases(["a", "am", "account"]) .about("Utilities for generating and managing Ethereum 2.0 accounts.") .display_order(0) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .subcommand(wallet::cli_app()) .subcommand(validator::cli_app()) } diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index 61584cbfbb..b699301cde 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -8,7 +8,6 @@ pub mod slashing_protection; use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS}; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::FLAG_HEADER; use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; use std::path::PathBuf; @@ -20,16 +19,6 @@ pub fn cli_app() -> Command { Command::new(CMD) .display_order(0) .about("Provides commands for managing Eth2 validators.") - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - .global(true), - ) .arg( Arg::new(VALIDATOR_DIR_FLAG) .long(VALIDATOR_DIR_FLAG) diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index c34f0363a4..f6f3bb0419 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -4,7 +4,6 @@ pub mod recover; use crate::WALLETS_DIR_FLAG; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::FLAG_HEADER; use directory::{parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; use std::fs::create_dir_all; use std::path::PathBuf; @@ -15,16 +14,6 @@ pub fn cli_app() -> Command { Command::new(CMD) .about("Manage wallets, from which validator keys can be derived.") .display_order(0) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - .global(true) - ) .arg( Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1339c15825..ce4c5f9476 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -18,15 +18,6 @@ pub fn cli_app() -> Command { /* * Configuration directory locations. */ - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - ) .arg( Arg::new("network-dir") .long("network-dir") diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 71e21d68c9..948a09f44d 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -18,16 +18,16 @@ Options: certificate path. --broadcast Comma-separated list of beacon API topics to broadcast to all beacon - nodes. Possible values are: none, attestations, blocks, subscriptions, - sync-committee. Default (when flag is omitted) is to broadcast - subscriptions only. + nodes. Default (when flag is omitted) is to broadcast subscriptions + only. [possible values: none, attestations, blocks, subscriptions, + sync-committee] --builder-boost-factor Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing between a builder payload header and payload from the local execution node. - --builder-registration-timestamp-override + --builder-registration-timestamp-override This flag takes a unix timestamp value that will be used to override - the timestamp used in the builder api registration + the timestamp used in the builder api registration. -d, --datadir Used to specify a custom root data directory for lighthouse keys and databases. Defaults to $HOME/.lighthouse/{network} where network is @@ -41,7 +41,7 @@ Options: The gas limit to be used in all builder proposals for all validators managed by this validator client. Note this will not necessarily be used if the gas limit set here moves too far from the previous block's - gas limit. [default: 30,000,000] + gas limit. [default: 30000000] --genesis-state-url A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server URLs can generally be used with @@ -68,7 +68,8 @@ Options: is supplied, the CORS allowed origin is set to the listen address of this server (e.g., http://localhost:5062). --http-port - Set the listen TCP port for the RESTful HTTP API server. + Set the listen TCP port for the RESTful HTTP API server. [default: + 5062] --http-token-path Path to file containing the HTTP API token for validator client authentication. If not specified, defaults to @@ -96,6 +97,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --metrics-address
    Set the listen address for the Prometheus metrics HTTP server. + [default: 127.0.0.1] --metrics-allow-origin Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not recommended in production). If no value @@ -103,6 +105,7 @@ Options: this server (e.g., http://localhost:5064). --metrics-port Set the listen TCP port for the Prometheus metrics HTTP server. + [default: 5064] --monitoring-endpoint
    Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor your setup on certain services @@ -113,7 +116,7 @@ Options: provide an untrusted URL. --monitoring-endpoint-period Defines how many seconds to wait between each message sent to the - monitoring-endpoint. Default: 60s + monitoring-endpoint. [default: 60] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] @@ -145,8 +148,8 @@ Options: each validator along with the common slashing protection database and the validator_definitions.yml --web3-signer-keep-alive-timeout - Keep-alive timeout for each web3signer connection. Set to 'null' to - never timeout [default: 20000] + Keep-alive timeout for each web3signer connection. Set to '0' to never + timeout. [default: 20000] --web3-signer-max-idle-connections Maximum number of idle connections to maintain per web3signer host. Default is unlimited. diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index 440a9d27e2..0f274885d1 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -13,15 +13,6 @@ pub fn cli_app() -> Command { surface compared to a full beacon node.") .styles(get_color_style()) .display_order(0) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - ) .arg( Arg::new("enr-address") .long("enr-address") diff --git a/database_manager/src/cli.rs b/database_manager/src/cli.rs index 9db807df2c..c62da1206f 100644 --- a/database_manager/src/cli.rs +++ b/database_manager/src/cli.rs @@ -66,16 +66,6 @@ pub struct DatabaseManager { )] pub backend: store::config::DatabaseBackend, - #[clap( - long, - global = true, - help = "Prints help information", - action = clap::ArgAction::HelpLong, - display_order = 0, - help_heading = FLAG_HEADER - )] - help: Option, - #[clap(subcommand)] pub subcommand: DatabaseManagerSubcommand, } diff --git a/lighthouse/src/cli.rs b/lighthouse/src/cli.rs index 90d3e811eb..ed665d2a47 100644 --- a/lighthouse/src/cli.rs +++ b/lighthouse/src/cli.rs @@ -1,9 +1,12 @@ use clap::Parser; use database_manager::cli::DatabaseManager; use serde::{Deserialize, Serialize}; +use validator_client::cli::ValidatorClient; #[derive(Parser, Clone, Deserialize, Serialize, Debug)] pub enum LighthouseSubcommands { #[clap(name = "database_manager")] - DatabaseManager(DatabaseManager), + DatabaseManager(Box), + #[clap(name = "validator_client")] + ValidatorClient(Box), } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index dd7401d49e..d7a14e3809 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -399,10 +399,10 @@ fn main() { .action(ArgAction::HelpLong) .display_order(0) .help_heading(FLAG_HEADER) + .global(true) ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) - .subcommand(validator_client::cli_app()) .subcommand(account_manager::cli_app()) .subcommand(validator_manager::cli_app()); @@ -673,12 +673,49 @@ fn run( return Ok(()); } - if let Ok(LighthouseSubcommands::DatabaseManager(db_manager_config)) = - LighthouseSubcommands::from_arg_matches(matches) - { - info!(log, "Running database manager for {} network", network_name); - database_manager::run(matches, &db_manager_config, environment)?; - return Ok(()); + match LighthouseSubcommands::from_arg_matches(matches) { + Ok(LighthouseSubcommands::DatabaseManager(db_manager_config)) => { + info!(log, "Running database manager for {} network", network_name); + database_manager::run(matches, &db_manager_config, environment)?; + return Ok(()); + } + Ok(LighthouseSubcommands::ValidatorClient(validator_client_config)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = validator_client::Config::from_cli( + matches, + &validator_client_config, + context.log(), + ) + .map_err(|e| format!("Unable to initialize validator config: {}", e))?; + // Dump configs if `dump-config` or `dump-chain-config` flags are set + clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; + + let shutdown_flag = matches.get_flag("immediate-shutdown"); + if shutdown_flag { + info!(log, "Validator client immediate shutdown triggered."); + return Ok(()); + } + + executor.clone().spawn( + async move { + if let Err(e) = ProductionValidatorClient::new(context, config) + .and_then(|mut vc| async move { vc.start_service().await }) + .await + { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send(ShutdownReason::Failure("Failed to start validator client")); + } + }, + "validator_client", + ); + } + Err(_) => (), }; info!(log, "Lighthouse started"; "version" => VERSION); @@ -733,38 +770,9 @@ fn run( "beacon_node", ); } - Some(("validator_client", matches)) => { - let context = environment.core_context(); - let log = context.log().clone(); - let executor = context.executor.clone(); - let config = validator_client::Config::from_cli(matches, context.log()) - .map_err(|e| format!("Unable to initialize validator config: {}", e))?; - // Dump configs if `dump-config` or `dump-chain-config` flags are set - clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; - - let shutdown_flag = matches.get_flag("immediate-shutdown"); - if shutdown_flag { - info!(log, "Validator client immediate shutdown triggered."); - return Ok(()); - } - - executor.clone().spawn( - async move { - if let Err(e) = ProductionValidatorClient::new(context, config) - .and_then(|mut vc| async move { vc.start_service().await }) - .await - { - crit!(log, "Failed to start validator client"; "reason" => e); - // Ignore the error since it always occurs during normal operation when - // shutting down. - let _ = executor - .shutdown_sender() - .try_send(ShutdownReason::Failure("Failed to start validator client")); - } - }, - "validator_client", - ); - } + // TODO(clap-derive) delete this once we've fully migrated to clap derive. + // Qt the moment this needs to exist so that we dont trigger a crit. + Some(("validator_client", _)) => (), _ => { crit!(log, "No subcommand supplied. See --help ."); return Err("No subcommand supplied.".into()); diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 1945399c86..f28e7d9829 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -407,6 +407,13 @@ fn metrics_port_flag() { .with_config(|config| assert_eq!(config.http_metrics.listen_port, 9090)); } #[test] +fn metrics_port_flag_default() { + CommandLineTest::new() + .flag("metrics", None) + .run() + .with_config(|config| assert_eq!(config.http_metrics.listen_port, 5064)); +} +#[test] fn metrics_allow_origin_flag() { CommandLineTest::new() .flag("metrics", None) @@ -458,7 +465,7 @@ fn no_doppelganger_protection_flag() { fn no_gas_limit_flag() { CommandLineTest::new() .run() - .with_config(|config| assert!(config.validator_store.gas_limit.is_none())); + .with_config(|config| assert!(config.validator_store.gas_limit == Some(30_000_000))); } #[test] fn gas_limit_flag() { @@ -560,7 +567,7 @@ fn broadcast_flag() { }); // Other valid variants CommandLineTest::new() - .flag("broadcast", Some("blocks, subscriptions")) + .flag("broadcast", Some("blocks,subscriptions")) .run() .with_config(|config| { assert_eq!( @@ -605,7 +612,7 @@ fn beacon_nodes_sync_tolerances_flag() { } #[test] -#[should_panic(expected = "Unknown API topic")] +#[should_panic(expected = "invalid value")] fn wrong_broadcast_flag() { CommandLineTest::new() .flag("broadcast", Some("foo, subscriptions")) diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index c15ded43d7..2c30290110 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -9,6 +9,7 @@ name = "beacon_node_fallback" path = "src/lib.rs" [dependencies] +clap = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/beacon_node_health.rs b/validator_client/beacon_node_fallback/src/beacon_node_health.rs index e5b0487656..80d3fb7efd 100644 --- a/validator_client/beacon_node_fallback/src/beacon_node_health.rs +++ b/validator_client/beacon_node_fallback/src/beacon_node_health.rs @@ -1,11 +1,9 @@ use super::CandidateError; use eth2::BeaconNodeHttpClient; -use itertools::Itertools; use serde::{Deserialize, Serialize}; use slog::{warn, Logger}; use std::cmp::Ordering; use std::fmt::{Debug, Display, Formatter}; -use std::str::FromStr; use types::Slot; /// Sync distances between 0 and DEFAULT_SYNC_TOLERANCE are considered `synced`. @@ -53,29 +51,6 @@ impl Default for BeaconNodeSyncDistanceTiers { } } -impl FromStr for BeaconNodeSyncDistanceTiers { - type Err = String; - - fn from_str(s: &str) -> Result { - let values: (u64, u64, u64) = s - .split(',') - .map(|s| { - s.parse() - .map_err(|e| format!("Invalid sync distance modifier: {e:?}")) - }) - .collect::, _>>()? - .into_iter() - .collect_tuple() - .ok_or("Invalid number of sync distance modifiers".to_string())?; - - Ok(BeaconNodeSyncDistanceTiers { - synced: Slot::new(values.0), - small: Slot::new(values.0 + values.1), - medium: Slot::new(values.0 + values.1 + values.2), - }) - } -} - impl BeaconNodeSyncDistanceTiers { /// Takes a given sync distance and determines its tier based on the `sync_tolerance` defined by /// the CLI. @@ -90,6 +65,17 @@ impl BeaconNodeSyncDistanceTiers { SyncDistanceTier::Large } } + + pub fn from_vec(tiers: &[u64]) -> Result { + if tiers.len() != 3 { + return Err("Invalid number of sync distance modifiers".to_string()); + } + Ok(BeaconNodeSyncDistanceTiers { + synced: Slot::new(tiers[0]), + small: Slot::new(tiers[0] + tiers[1]), + medium: Slot::new(tiers[0] + tiers[1] + tiers[2]), + }) + } } /// Execution Node health metrics. @@ -320,7 +306,6 @@ mod tests { SyncDistanceTier, }; use crate::Config; - use std::str::FromStr; use types::Slot; #[test] @@ -423,7 +408,7 @@ mod tests { // medium 9..=12 // large: 13.. - let distance_tiers = BeaconNodeSyncDistanceTiers::from_str("4,4,4").unwrap(); + let distance_tiers = BeaconNodeSyncDistanceTiers::from_vec(&[4, 4, 4]).unwrap(); let synced_low = new_distance_tier(0, &distance_tiers); let synced_high = new_distance_tier(4, &distance_tiers); diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index beae176193..839e60d011 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -7,6 +7,7 @@ use beacon_node_health::{ check_node_health, BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, IsOptimistic, SyncDistanceTier, }; +use clap::ValueEnum; use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; @@ -20,7 +21,8 @@ use std::future::Future; use std::marker::PhantomData; use std::sync::Arc; use std::time::{Duration, Instant}; -use strum::{EnumString, EnumVariantNames}; +use std::vec::Vec; +use strum::EnumVariantNames; use tokio::{sync::RwLock, time::sleep}; use types::{ChainSpec, Config as ConfigSpec, EthSpec, Slot}; use validator_metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_REQUESTS}; @@ -727,9 +729,10 @@ async fn sort_nodes_by_health(nodes: &mut Vec } /// Serves as a cue for `BeaconNodeFallback` to tell which requests need to be broadcasted. -#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumString, EnumVariantNames)] +#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumVariantNames, ValueEnum)] #[strum(serialize_all = "kebab-case")] pub enum ApiTopic { + None, Attestations, Blocks, Subscriptions, @@ -749,7 +752,6 @@ mod tests { use crate::beacon_node_health::BeaconNodeHealthTier; use eth2::SensitiveUrl; use eth2::Timeouts; - use std::str::FromStr; use strum::VariantNames; use types::{MainnetEthSpec, Slot}; @@ -758,10 +760,13 @@ mod tests { #[test] fn api_topic_all() { let all = ApiTopic::all(); - assert_eq!(all.len(), ApiTopic::VARIANTS.len()); - assert!(ApiTopic::VARIANTS + // ignore NONE variant + let mut variants = ApiTopic::VARIANTS.to_vec(); + variants.retain(|s| *s != "none"); + assert_eq!(all.len(), variants.len()); + assert!(variants .iter() - .map(|topic| ApiTopic::from_str(topic).unwrap()) + .map(|topic| ApiTopic::from_str(topic, true).unwrap()) .eq(all.into_iter())); } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index b2d1ebb3c2..dfcd2064e5 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -1,490 +1,478 @@ -use clap::{builder::ArgPredicate, Arg, ArgAction, Command}; -use clap_utils::{get_color_style, FLAG_HEADER}; +use beacon_node_fallback::ApiTopic; +use clap::builder::ArgPredicate; +pub use clap::{FromArgMatches, Parser}; +use clap_utils::get_color_style; +use clap_utils::FLAG_HEADER; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use types::Address; -pub fn cli_app() -> Command { - Command::new("validator_client") - .visible_aliases(["v", "vc", "validator"]) - .styles(get_color_style()) - .display_order(0) - .about( - "When connected to a beacon node, performs the duties of a staked \ +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap( + name = "validator_client", + visible_aliases = &["v", "vc", "validator"], + about = "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", - ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - ) - .arg( - Arg::new("beacon-nodes") - .long("beacon-nodes") - .value_name("NETWORK_ADDRESSES") - .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ - Default is http://localhost:5052." - ) - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("proposer-nodes") - .long("proposer-nodes") - .value_name("NETWORK_ADDRESSES") - .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ - These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." - ) - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("broadcast") - .long("broadcast") - .value_name("API_TOPICS") - .help("Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ - Possible values are: none, attestations, blocks, subscriptions, \ - sync-committee. Default (when flag is omitted) is to broadcast \ - subscriptions only." - ) - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("validators-dir") - .long("validators-dir") - .alias("validator-dir") - .value_name("VALIDATORS_DIR") - .help( - "The directory which contains the validator keystores, deposit data for \ - each validator along with the common slashing protection database \ - and the validator_definitions.yml" - ) - .action(ArgAction::Set) - .conflicts_with("datadir") - .display_order(0) - ) - .arg( - Arg::new("secrets-dir") - .long("secrets-dir") - .value_name("SECRETS_DIRECTORY") - .help( - "The directory which contains the password to unlock the validator \ - voting keypairs. Each password should be contained in a file where the \ - name is the 0x-prefixed hex representation of the validators voting public \ - key. Defaults to ~/.lighthouse/{network}/secrets.", - ) - .action(ArgAction::Set) - .conflicts_with("datadir") - .display_order(0) - ) - .arg( - Arg::new("init-slashing-protection") - .long("init-slashing-protection") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .help( - "If present, do not require the slashing protection database to exist before \ - running. You SHOULD NOT use this flag unless you're certain that a new \ - slashing protection database is required. Usually, your database \ - will have been initialized when you imported your validator keys. If you \ - misplace your database and then run with this flag you risk being slashed." - ) - .display_order(0) - ) - .arg( - Arg::new("disable-auto-discover") - .long("disable-auto-discover") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .help( - "If present, do not attempt to discover new validators in the validators-dir. Validators \ - will need to be manually added to the validator_definitions.yml file." - ) - .display_order(0) - ) - .arg( - Arg::new("use-long-timeouts") - .long("use-long-timeouts") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .help("If present, the validator client will use longer timeouts for requests \ - made to the beacon node. This flag is generally not recommended, \ - longer timeouts can cause missed duties when fallbacks are used.") - .display_order(0) - ) - .arg( - Arg::new("beacon-nodes-tls-certs") - .long("beacon-nodes-tls-certs") - .value_name("CERTIFICATE-FILES") - .action(ArgAction::Set) - .help("Comma-separated paths to custom TLS certificates to use when connecting \ - to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ - in addition to the OS trust store. Commas must only be used as a \ - delimiter, and must not be part of the certificate path.") - .display_order(0) - ) - // This overwrites the graffiti configured in the beacon node. - .arg( - Arg::new("graffiti") - .long("graffiti") - .help("Specify your custom graffiti to be included in blocks.") - .value_name("GRAFFITI") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("graffiti-file") - .long("graffiti-file") - .help("Specify a graffiti file to load validator graffitis from.") - .value_name("GRAFFITI-FILE") - .action(ArgAction::Set) - .conflicts_with("graffiti") - .display_order(0) - ) - .arg( - Arg::new("suggested-fee-recipient") - .long("suggested-fee-recipient") - .help("Once the merge has happened, this address will receive transaction fees \ - from blocks proposed by this validator client. If a fee recipient is \ - configured in the validator definitions it takes priority over this value.") - .value_name("FEE-RECIPIENT") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("distributed") - .long("distributed") - .help("Enables functionality required for running the validator in a distributed validator cluster.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* REST API related arguments */ - .arg( - Arg::new("http") - .long("http") - .help("Enable the RESTful HTTP API server. Disabled by default.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* - * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is - * unsafe to publish on a public network. - * - * If the `--http-address` flag is used, the `--unencrypted-http-transport` flag - * must also be used in order to make it clear to the user that this is unsafe. - */ - .arg( - Arg::new("http-address") - .long("http-address") - .requires("http") - .value_name("ADDRESS") - .help("Set the address for the HTTP address. The HTTP server is not encrypted \ - and therefore it is unsafe to publish on a public network. When this \ - flag is used, it additionally requires the explicit use of the \ - `--unencrypted-http-transport` flag to ensure the user is aware of the \ - risks involved. For access via the Internet, users should apply \ - transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.") - .requires("unencrypted-http-transport") - .display_order(0) - ) - .arg( - Arg::new("unencrypted-http-transport") - .long("unencrypted-http-transport") - .help("This is a safety flag to ensure that the user is aware that the http \ - transport is unencrypted and using a custom HTTP address is unsafe.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .requires("http-address") - .display_order(0) - ) - .arg( - Arg::new("http-port") - .long("http-port") - .requires("http") - .value_name("PORT") - .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("http", ArgPredicate::IsPresent, "5062") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("http-allow-origin") - .long("http-allow-origin") - .requires("http") - .value_name("ORIGIN") - .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ + styles = get_color_style(), + next_line_help = true, + term_width = 80, + disable_help_flag = true, + disable_help_subcommand = true, + display_order = 0, +)] +pub struct ValidatorClient { + #[clap( + long, + value_name = "NETWORK_ADDRESSES", + value_delimiter = ',', + help = "Comma-separated addresses to one or more beacon node HTTP APIs. \ + Default is http://localhost:5052.", + display_order = 0 + )] + pub beacon_nodes: Option>, + + #[clap( + long, + value_name = "NETWORK_ADDRESSES", + value_delimiter = ',', + help = "Comma-separated addresses to one or more beacon node HTTP APIs. \ + These specify nodes that are used to send beacon block proposals. \ + A failure will revert back to the standard beacon nodes specified in --beacon-nodes.", + display_order = 0 + )] + pub proposer_nodes: Option>, + + #[clap( + long, + value_name = "API_TOPICS", + value_delimiter = ',', + help = "Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ + Default (when flag is omitted) is to broadcast subscriptions only.", + display_order = 0 + )] + pub broadcast: Option>, + + #[clap( + long, + alias = "validator-dir", + value_name = "VALIDATORS_DIR", + conflicts_with = "datadir", + help = "The directory which contains the validator keystores, deposit data for \ + each validator along with the common slashing protection database \ + and the validator_definitions.yml", + display_order = 0 + )] + pub validators_dir: Option, + + #[clap( + long, + value_name = "SECRETS_DIRECTORY", + conflicts_with = "datadir", + help = "The directory which contains the password to unlock the validator \ + voting keypairs. Each password should be contained in a file where the \ + name is the 0x-prefixed hex representation of the validators voting public \ + key. Defaults to ~/.lighthouse/{network}/secrets.", + display_order = 0 + )] + pub secrets_dir: Option, + + #[clap( + long, + help = "If present, do not require the slashing protection database to exist before \ + running. You SHOULD NOT use this flag unless you're certain that a new \ + slashing protection database is required. Usually, your database \ + will have been initialized when you imported your validator keys. If you \ + misplace your database and then run with this flag you risk being slashed.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub init_slashing_protection: bool, + + #[clap( + long, + help = "If present, do not attempt to discover new validators in the validators-dir. Validators \ + will need to be manually added to the validator_definitions.yml file.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub disable_auto_discover: bool, + + #[clap( + long, + help = "If present, the validator client will use longer timeouts for requests \ + made to the beacon node. This flag is generally not recommended, \ + longer timeouts can cause missed duties when fallbacks are used.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub use_long_timeouts: bool, + + #[clap( + long, + value_name = "CERTIFICATE-FILES", + value_delimiter = ',', + help = "Comma-separated paths to custom TLS certificates to use when connecting \ + to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ + in addition to the OS trust store. Commas must only be used as a \ + delimiter, and must not be part of the certificate path.", + display_order = 0 + )] + pub beacon_nodes_tls_certs: Option>, + + // This overwrites the graffiti configured in the beacon node. + #[clap( + long, + value_name = "GRAFFITI", + help = "Specify your custom graffiti to be included in blocks.", + display_order = 0 + )] + pub graffiti: Option, + + #[clap( + long, + value_name = "GRAFFITI-FILE", + conflicts_with = "graffiti", + help = "Specify a graffiti file to load validator graffitis from.", + display_order = 0 + )] + pub graffiti_file: Option, + + #[clap( + long, + value_name = "FEE-RECIPIENT", + help = "Once the merge has happened, this address will receive transaction fees \ + from blocks proposed by this validator client. If a fee recipient is \ + configured in the validator definitions it takes priority over this value.", + display_order = 0 + )] + pub suggested_fee_recipient: Option
    , + + #[clap( + long, + help = "Enables functionality required for running the validator in a distributed validator cluster.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub distributed: bool, + + /* REST API related arguments */ + #[clap( + long, + help = "Enable the RESTful HTTP API server. Disabled by default.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub http: bool, + + /* + * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is + * unsafe to publish on a public network. + * + * If the `--http-address` flag is used, the `--unencrypted-http-transport` flag + * must also be used in order to make it clear to the user that this is unsafe. + */ + #[clap( + long, + value_name = "ADDRESS", + requires = "unencrypted_http_transport", + help = "Set the address for the HTTP address. The HTTP server is not encrypted \ + and therefore it is unsafe to publish on a public network. When this \ + flag is used, it additionally requires the explicit use of the \ + `--unencrypted-http-transport` flag to ensure the user is aware of the \ + risks involved. For access via the Internet, users should apply \ + transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.", + display_order = 0 + )] + pub http_address: Option, + + #[clap( + long, + requires = "http_address", + help = "This is a safety flag to ensure that the user is aware that the http \ + transport is unencrypted and using a custom HTTP address is unsafe.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub unencrypted_http_transport: bool, + + #[clap( + long, + value_name = "PORT", + default_value_t = 5062, + help = "Set the listen TCP port for the RESTful HTTP API server.", + display_order = 0 + )] + pub http_port: u16, + + #[clap( + long, + value_name = "ORIGIN", + help = "Set the value of the Access-Control-Allow-Origin response HTTP header. \ Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ - address of this server (e.g., http://localhost:5062).") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("http-allow-keystore-export") - .long("http-allow-keystore-export") - .requires("http") - .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ - API method, which allows exporting keystores and passwords to HTTP API \ - consumers who have access to the API token. This method is useful for \ - exporting validators, however it should be used with caution since it \ - exposes private key data to authorized users.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("http-store-passwords-in-secrets-dir") - .long("http-store-passwords-in-secrets-dir") - .requires("http") - .help("If present, any validators created via the HTTP will have keystore \ - passwords stored in the secrets-dir rather than the validator \ - definitions file.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("http-token-path") - .long("http-token-path") - .requires("http") - .value_name("HTTP_TOKEN_PATH") - .help( - "Path to file containing the HTTP API token for validator client authentication. \ - If not specified, defaults to {validators-dir}/api-token.txt." - ) - .action(ArgAction::Set) - .display_order(0) - ) - /* Prometheus metrics HTTP server related arguments */ - .arg( - Arg::new("metrics") - .long("metrics") - .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("metrics-address") - .long("metrics-address") - .requires("metrics") - .value_name("ADDRESS") - .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("metrics-port") - .long("metrics-port") - .requires("metrics") - .value_name("PORT") - .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", ArgPredicate::IsPresent, "5064") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("metrics-allow-origin") - .long("metrics-allow-origin") - .requires("metrics") - .value_name("ORIGIN") - .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ - Use * to allow any origin (not recommended in production). \ - If no value is supplied, the CORS allowed origin is set to the listen \ - address of this server (e.g., http://localhost:5064).") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("enable-high-validator-count-metrics") - .long("enable-high-validator-count-metrics") - .help("Enable per validator metrics for > 64 validators. \ - Note: This flag is automatically enabled for <= 64 validators. \ - Enabling this metric for higher validator counts will lead to higher volume \ - of prometheus metrics being collected.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* - * Explorer metrics - */ - .arg( - Arg::new("monitoring-endpoint") - .long("monitoring-endpoint") - .value_name("ADDRESS") - .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ + address of this server (e.g., http://localhost:5062).", + display_order = 0 + )] + pub http_allow_origin: Option, + + #[clap( + long, + requires = "http", + help = "If present, allow access to the DELETE /lighthouse/keystores HTTP \ + API method, which allows exporting keystores and passwords to HTTP API \ + consumers who have access to the API token. This method is useful for \ + exporting validators, however it should be used with caution since it \ + exposes private key data to authorized users.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub http_allow_keystore_export: bool, + + #[clap( + long, + requires = "http", + help = "If present, any validators created via the HTTP will have keystore \ + passwords stored in the secrets-dir rather than the validator \ + definitions file.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub http_store_passwords_in_secrets_dir: bool, + + #[clap( + long, + requires = "http", + help = "Path to file containing the HTTP API token for validator client authentication. \ + If not specified, defaults to {validators-dir}/api-token.txt.", + display_order = 0 + )] + pub http_token_path: Option, + + /* Prometheus metrics HTTP server related arguments */ + #[clap( + long, + help = "Enable the Prometheus metrics HTTP server. Disabled by default.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub metrics: bool, + + #[clap( + long, + value_name = "ADDRESS", + requires = "metrics", + default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1"), + help = "Set the listen address for the Prometheus metrics HTTP server. [default: 127.0.0.1]", + display_order = 0 + )] + pub metrics_address: Option, + + #[clap( + long, + value_name = "PORT", + requires = "metrics", + default_value_t = 5064, + help = "Set the listen TCP port for the Prometheus metrics HTTP server.", + display_order = 0 + )] + pub metrics_port: u16, + + #[clap( + long, + value_name = "ORIGIN", + requires = "metrics", + help = "Set the value of the Access-Control-Allow-Origin response HTTP header. \ + Use * to allow any origin (not recommended in production). \ + If no value is supplied, the CORS allowed origin is set to the listen \ + address of this server (e.g., http://localhost:5064).", + display_order = 0 + )] + pub metrics_allow_origin: Option, + + #[clap( + long, + help = "Enable per validator metrics for > 64 validators. \ + Note: This flag is automatically enabled for <= 64 validators. \ + Enabling this metric for higher validator counts will lead to higher volume \ + of prometheus metrics being collected.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub enable_high_validator_count_metrics: bool, + + /* Explorer metrics */ + #[clap( + long, + value_name = "ADDRESS", + help = "Enables the monitoring service for sending system metrics to a remote endpoint. \ This can be used to monitor your setup on certain services (e.g. beaconcha.in). \ This flag sets the endpoint where the beacon node metrics will be sent. \ Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ - and never provide an untrusted URL.") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("monitoring-endpoint-period") - .long("monitoring-endpoint-period") - .value_name("SECONDS") - .help("Defines how many seconds to wait between each message sent to \ - the monitoring-endpoint. Default: 60s") - .requires("monitoring-endpoint") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("enable-doppelganger-protection") - .long("enable-doppelganger-protection") - .value_name("ENABLE_DOPPELGANGER_PROTECTION") - .help("If this flag is set, Lighthouse will delay startup for three epochs and \ - monitor for messages on the network by any of the validators managed by this \ - client. This will result in three (possibly four) epochs worth of missed \ - attestations. If an attestation is detected during this period, it means it is \ - very likely that you are running a second validator client with the same keys. \ - This validator client will immediately shutdown if this is detected in order \ - to avoid potentially committing a slashable offense. Use this flag in order to \ - ENABLE this functionality, without this flag Lighthouse will begin attesting \ - immediately.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("builder-proposals") - .long("builder-proposals") - .alias("private-tx-proposals") - .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ - headers during proposals and will sign over headers. Useful for outsourcing \ - execution payload construction during proposals.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("builder-registration-timestamp-override") - .long("builder-registration-timestamp-override") - .alias("builder-registration-timestamp-override") - .help("This flag takes a unix timestamp value that will be used to override the \ - timestamp used in the builder api registration") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("gas-limit") - .long("gas-limit") - .value_name("INTEGER") - .action(ArgAction::Set) - .help("The gas limit to be used in all builder proposals for all validators managed \ - by this validator client. Note this will not necessarily be used if the gas limit \ - set here moves too far from the previous block's gas limit. [default: 30,000,000]") - .requires("builder-proposals") - .display_order(0) - ) - .arg( - Arg::new("disable-latency-measurement-service") - .long("disable-latency-measurement-service") - .help("Disables the service that periodically attempts to measure latency to BNs.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("validator-registration-batch-size") - .long("validator-registration-batch-size") - .value_name("INTEGER") - .help("Defines the number of validators per \ - validator/register_validator request sent to the BN. This value \ - can be reduced to avoid timeouts from builders.") - .default_value("500") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("builder-boost-factor") - .long("builder-boost-factor") - .value_name("UINT64") - .help("Defines the boost factor, \ - a percentage multiplier to apply to the builder's payload value \ - when choosing between a builder payload header and payload from \ - the local execution node.") - .conflicts_with("prefer-builder-proposals") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("prefer-builder-proposals") - .long("prefer-builder-proposals") - .help("If this flag is set, Lighthouse will always prefer blocks \ - constructed by builders, regardless of payload value.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("beacon-nodes-sync-tolerances") - .long("beacon-nodes-sync-tolerances") - .value_name("SYNC_TOLERANCES") - .help("A comma-separated list of 3 values which sets the size of each sync distance range when \ - determining the health of each connected beacon node. \ - The first value determines the `Synced` range. \ - If a connected beacon node is synced to within this number of slots it is considered 'Synced'. \ - The second value determines the `Small` sync distance range. \ - This range starts immediately after the `Synced` range. \ - The third value determines the `Medium` sync distance range. \ - This range starts immediately after the `Small` range. \ - Any sync distance value beyond that is considered `Large`. \ - For example, a value of `8,8,48` would have ranges like the following: \ - `Synced`: 0..=8 \ - `Small`: 9..=16 \ - `Medium`: 17..=64 \ - `Large`: 65.. \ - These values are used to determine what ordering beacon node fallbacks are used in. \ - Generally, `Synced` nodes are preferred over `Small` and so on. \ - Nodes in the `Synced` range will tie-break based on their ordering in `--beacon-nodes`. \ - This ensures the primary beacon node is prioritised. \ - [default: 8,8,48]") - .action(ArgAction::Set) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("disable-slashing-protection-web3signer") - .long("disable-slashing-protection-web3signer") - .help("Disable Lighthouse's slashing protection for all web3signer keys. This can \ - reduce the I/O burden on the VC but is only safe if slashing protection \ - is enabled on the remote signer and is implemented correctly. DO NOT ENABLE \ - THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ - THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ - ENABLING WEB3SIGNER'S SLASHING PROTECTION.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* - * Experimental/development options. - */ - .arg( - Arg::new("web3-signer-keep-alive-timeout") - .long("web3-signer-keep-alive-timeout") - .value_name("MILLIS") - .default_value("20000") - .help("Keep-alive timeout for each web3signer connection. Set to 'null' to never \ - timeout") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("web3-signer-max-idle-connections") - .long("web3-signer-max-idle-connections") - .value_name("COUNT") - .help("Maximum number of idle connections to maintain per web3signer host. Default \ - is unlimited.") - .action(ArgAction::Set) - .display_order(0) - ) + and never provide an untrusted URL.", + display_order = 0 + )] + pub monitoring_endpoint: Option, + + #[clap( + long, + value_name = "SECONDS", + requires = "monitoring_endpoint", + default_value_t = 60, + help = "Defines how many seconds to wait between each message sent to \ + the monitoring-endpoint.", + display_order = 0 + )] + pub monitoring_endpoint_period: u64, + + #[clap( + long, + value_name = "BOOLEAN", + help = "If this flag is set, Lighthouse will delay startup for three epochs and \ + monitor for messages on the network by any of the validators managed by this \ + client. This will result in three (possibly four) epochs worth of missed \ + attestations. If an attestation is detected during this period, it means it is \ + very likely that you are running a second validator client with the same keys. \ + This validator client will immediately shutdown if this is detected in order \ + to avoid potentially committing a slashable offense. Use this flag in order to \ + ENABLE this functionality, without this flag Lighthouse will begin attesting \ + immediately.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub enable_doppelganger_protection: bool, + + #[clap( + long, + alias = "private-tx-proposals", + help = "If this flag is set, Lighthouse will query the Beacon Node for only block \ + headers during proposals and will sign over headers. Useful for outsourcing \ + execution payload construction during proposals.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub builder_proposals: bool, + + #[clap( + long, + value_name = "UNIX-TIMESTAMP", + help = "This flag takes a unix timestamp value that will be used to override the \ + timestamp used in the builder api registration.", + display_order = 0 + )] + pub builder_registration_timestamp_override: Option, + + #[clap( + long, + value_name = "INTEGER", + default_value_t = 30_000_000, + requires = "builder_proposals", + help = "The gas limit to be used in all builder proposals for all validators managed \ + by this validator client. Note this will not necessarily be used if the gas limit \ + set here moves too far from the previous block's gas limit.", + display_order = 0 + )] + pub gas_limit: u64, + + #[clap( + long, + value_name = "BOOLEAN", + help = "Disables the service that periodically attempts to measure latency to BNs.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub disable_latency_measurement_service: bool, + + #[clap( + long, + value_name = "INTEGER", + default_value_t = 500, + help = "Defines the number of validators per \ + validator/register_validator request sent to the BN. This value \ + can be reduced to avoid timeouts from builders.", + display_order = 0 + )] + pub validator_registration_batch_size: usize, + + #[clap( + long, + value_name = "UINT64", + help = "Defines the boost factor, \ + a percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.", + conflicts_with = "prefer_builder_proposals", + display_order = 0 + )] + pub builder_boost_factor: Option, + + #[clap( + long, + help = "If this flag is set, Lighthouse will always prefer blocks \ + constructed by builders, regardless of payload value.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub prefer_builder_proposals: bool, + + #[clap( + long, + help = "A comma-separated list of 3 values which sets the size of each sync distance range when \ + determining the health of each connected beacon node. \ + The first value determines the `Synced` range. \ + If a connected beacon node is synced to within this number of slots it is considered 'Synced'. \ + The second value determines the `Small` sync distance range. \ + This range starts immediately after the `Synced` range. \ + The third value determines the `Medium` sync distance range. \ + This range starts immediately after the `Small` range. \ + Any sync distance value beyond that is considered `Large`. \ + For example, a value of `8,8,48` would have ranges like the following: \ + `Synced`: 0..=8 \ + `Small`: 9..=16 \ + `Medium`: 17..=64 \ + `Large`: 65.. \ + These values are used to determine what ordering beacon node fallbacks are used in. \ + Generally, `Synced` nodes are preferred over `Small` and so on. \ + Nodes in the `Synced` range will tie-break based on their ordering in `--beacon-nodes`. \ + This ensures the primary beacon node is prioritised.", + display_order = 0, + value_delimiter = ',', + default_value = "8,8,48", + help_heading = FLAG_HEADER, + value_name = "SYNC_TOLERANCES" + )] + pub beacon_nodes_sync_tolerances: Vec, + + #[clap( + long, + help = "Disable Lighthouse's slashing protection for all web3signer keys. This can \ + reduce the I/O burden on the VC but is only safe if slashing protection \ + is enabled on the remote signer and is implemented correctly. DO NOT ENABLE \ + THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ + THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ + ENABLING WEB3SIGNER'S SLASHING PROTECTION.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub disable_slashing_protection_web3signer: bool, + + /* Experimental/development options */ + #[clap( + long, + value_name = "MILLIS", + default_value_t = 20000, + help = "Keep-alive timeout for each web3signer connection. Set to '0' to never \ + timeout.", + display_order = 0 + )] + pub web3_signer_keep_alive_timeout: u64, + + #[clap( + long, + value_name = "COUNT", + help = "Maximum number of idle connections to maintain per web3signer host. Default \ + is unlimited.", + display_order = 0 + )] + pub web3_signer_max_idle_connections: Option, } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index bb72ef81c8..2a848e2022 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,6 +1,8 @@ -use beacon_node_fallback::{beacon_node_health::BeaconNodeSyncDistanceTiers, ApiTopic}; +use crate::cli::ValidatorClient; +use beacon_node_fallback::beacon_node_health::BeaconNodeSyncDistanceTiers; +use beacon_node_fallback::ApiTopic; use clap::ArgMatches; -use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_optional, parse_required}; +use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_required}; use directory::{ get_network_dir, DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, DEFAULT_VALIDATOR_DIR, @@ -14,9 +16,8 @@ use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; -use std::str::FromStr; use std::time::Duration; -use types::{Address, GRAFFITI_BYTES_LEN}; +use types::GRAFFITI_BYTES_LEN; use validator_http_api::{self, PK_FILENAME}; use validator_http_metrics; use validator_store::Config as ValidatorStoreConfig; @@ -132,7 +133,11 @@ impl Default for Config { impl Config { /// Returns a `Default` implementation of `Self` with some parameters modified by the supplied /// `cli_args`. - pub fn from_cli(cli_args: &ArgMatches, log: &Logger) -> Result { + pub fn from_cli( + cli_args: &ArgMatches, + validator_client_config: &ValidatorClient, + log: &Logger, + ) -> Result { let mut config = Config::default(); let default_root_dir = dirs::home_dir() @@ -145,11 +150,12 @@ impl Config { validator_dir = Some(base_dir.join(DEFAULT_VALIDATOR_DIR)); secrets_dir = Some(base_dir.join(DEFAULT_SECRET_DIR)); } - if cli_args.get_one::("validators-dir").is_some() { - validator_dir = Some(parse_required(cli_args, "validators-dir")?); + + if let Some(validator_dir_path) = validator_client_config.validators_dir.as_ref() { + validator_dir = Some(validator_dir_path.clone()); } - if cli_args.get_one::("secrets-dir").is_some() { - secrets_dir = Some(parse_required(cli_args, "secrets-dir")?); + if let Some(secrets_dir_path) = validator_client_config.secrets_dir.as_ref() { + secrets_dir = Some(secrets_dir_path.clone()); } config.validator_dir = validator_dir.unwrap_or_else(|| { @@ -169,35 +175,36 @@ impl Config { .map_err(|e| format!("Failed to create {:?}: {:?}", config.validator_dir, e))?; } - if let Some(beacon_nodes) = parse_optional::(cli_args, "beacon-nodes")? { + if let Some(beacon_nodes) = validator_client_config.beacon_nodes.as_ref() { config.beacon_nodes = beacon_nodes - .split(',') - .map(SensitiveUrl::parse) + .iter() + .map(|s| SensitiveUrl::parse(s)) .collect::>() .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; } - if let Some(proposer_nodes) = parse_optional::(cli_args, "proposer-nodes")? { + + if let Some(proposer_nodes) = validator_client_config.proposer_nodes.as_ref() { config.proposer_nodes = proposer_nodes - .split(',') - .map(SensitiveUrl::parse) + .iter() + .map(|s| SensitiveUrl::parse(s)) .collect::>() .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; } - config.disable_auto_discover = cli_args.get_flag("disable-auto-discover"); - config.init_slashing_protection = cli_args.get_flag("init-slashing-protection"); - config.use_long_timeouts = cli_args.get_flag("use-long-timeouts"); + config.disable_auto_discover = validator_client_config.disable_auto_discover; + config.init_slashing_protection = validator_client_config.init_slashing_protection; + config.use_long_timeouts = validator_client_config.use_long_timeouts; - if let Some(graffiti_file_path) = cli_args.get_one::("graffiti-file") { + if let Some(graffiti_file_path) = validator_client_config.graffiti_file.as_ref() { let mut graffiti_file = GraffitiFile::new(graffiti_file_path.into()); graffiti_file .read_graffiti_file() .map_err(|e| format!("Error reading graffiti file: {:?}", e))?; config.graffiti_file = Some(graffiti_file); - info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path); + info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path.to_str()); } - if let Some(input_graffiti) = cli_args.get_one::("graffiti") { + if let Some(input_graffiti) = validator_client_config.graffiti.as_ref() { let graffiti_bytes = input_graffiti.as_bytes(); if graffiti_bytes.len() > GRAFFITI_BYTES_LEN { return Err(format!( @@ -216,55 +223,40 @@ impl Config { } } - if let Some(input_fee_recipient) = - parse_optional::
    (cli_args, "suggested-fee-recipient")? - { + if let Some(input_fee_recipient) = validator_client_config.suggested_fee_recipient { config.validator_store.fee_recipient = Some(input_fee_recipient); } - if let Some(tls_certs) = parse_optional::(cli_args, "beacon-nodes-tls-certs")? { - config.beacon_nodes_tls_certs = Some(tls_certs.split(',').map(PathBuf::from).collect()); + if let Some(tls_certs) = validator_client_config.beacon_nodes_tls_certs.as_ref() { + config.beacon_nodes_tls_certs = Some(tls_certs.iter().map(PathBuf::from).collect()); } - if cli_args.get_flag("distributed") { - config.distributed = true; - } + config.distributed = validator_client_config.distributed; - if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { - config.broadcast_topics = broadcast_topics - .split(',') - .filter(|t| *t != "none") - .map(|t| { - t.trim() - .parse::() - .map_err(|_| format!("Unknown API topic to broadcast: {t}")) - }) - .collect::>()?; + if let Some(mut broadcast_topics) = validator_client_config.broadcast.clone() { + broadcast_topics.retain(|topic| *topic != ApiTopic::None); + config.broadcast_topics = broadcast_topics; } /* * Beacon node fallback */ - if let Some(sync_tolerance) = cli_args.get_one::("beacon-nodes-sync-tolerances") { - config.beacon_node_fallback.sync_tolerances = - BeaconNodeSyncDistanceTiers::from_str(sync_tolerance)?; - } else { - config.beacon_node_fallback.sync_tolerances = BeaconNodeSyncDistanceTiers::default(); - } + config.beacon_node_fallback.sync_tolerances = BeaconNodeSyncDistanceTiers::from_vec( + &validator_client_config.beacon_nodes_sync_tolerances, + )?; /* * Web3 signer */ - if let Some(s) = parse_optional::(cli_args, "web3-signer-keep-alive-timeout")? { - config.initialized_validators.web3_signer_keep_alive_timeout = if s == "null" { - None - } else { - Some(Duration::from_millis( - s.parse().map_err(|_| "invalid timeout value".to_string())?, - )) - } + if validator_client_config.web3_signer_keep_alive_timeout == 0 { + config.initialized_validators.web3_signer_keep_alive_timeout = None + } else { + config.initialized_validators.web3_signer_keep_alive_timeout = Some( + Duration::from_millis(validator_client_config.web3_signer_keep_alive_timeout), + ); } - if let Some(n) = parse_optional::(cli_args, "web3-signer-max-idle-connections")? { + + if let Some(n) = validator_client_config.web3_signer_max_idle_connections { config .initialized_validators .web3_signer_max_idle_connections = Some(n); @@ -274,12 +266,10 @@ impl Config { * Http API server */ - if cli_args.get_flag("http") { - config.http_api.enabled = true; - } + config.http_api.enabled = validator_client_config.http; - if let Some(address) = cli_args.get_one::("http-address") { - if cli_args.get_flag("unencrypted-http-transport") { + if let Some(address) = &validator_client_config.http_address { + if validator_client_config.unencrypted_http_transport { config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; @@ -291,13 +281,9 @@ impl Config { } } - if let Some(port) = cli_args.get_one::("http-port") { - config.http_api.listen_port = port - .parse::() - .map_err(|_| "http-port is not a valid u16.")?; - } + config.http_api.listen_port = validator_client_config.http_port; - if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { + if let Some(allow_origin) = validator_client_config.http_allow_origin.as_ref() { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -306,15 +292,11 @@ impl Config { config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.get_flag("http-allow-keystore-export") { - config.http_api.allow_keystore_export = true; - } + config.http_api.allow_keystore_export = validator_client_config.http_allow_keystore_export; + config.http_api.store_passwords_in_secrets_dir = + validator_client_config.http_store_passwords_in_secrets_dir; - if cli_args.get_flag("http-store-passwords-in-secrets-dir") { - config.http_api.store_passwords_in_secrets_dir = true; - } - - if let Some(http_token_path) = cli_args.get_one::("http-token-path") { + if let Some(http_token_path) = &validator_client_config.http_token_path { config.http_api.http_token_path = PathBuf::from(http_token_path); } else { // For backward compatibility, default to the path under the validator dir if not provided. @@ -325,27 +307,19 @@ impl Config { * Prometheus metrics HTTP server */ - if cli_args.get_flag("metrics") { - config.http_metrics.enabled = true; - } + config.http_metrics.enabled = validator_client_config.metrics; + config.enable_high_validator_count_metrics = + validator_client_config.enable_high_validator_count_metrics; - if cli_args.get_flag("enable-high-validator-count-metrics") { - config.enable_high_validator_count_metrics = true; - } - - if let Some(address) = cli_args.get_one::("metrics-address") { - config.http_metrics.listen_addr = address + if let Some(metrics_address) = &validator_client_config.metrics_address { + config.http_metrics.listen_addr = metrics_address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.get_one::("metrics-port") { - config.http_metrics.listen_port = port - .parse::() - .map_err(|_| "metrics-port is not a valid u16.")?; - } + config.http_metrics.listen_port = validator_client_config.metrics_port; - if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { + if let Some(allow_origin) = validator_client_config.metrics_allow_origin.as_ref() { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -361,9 +335,8 @@ impl Config { /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { - let update_period_secs = - clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; + if let Some(monitoring_endpoint) = validator_client_config.monitoring_endpoint.as_ref() { + let update_period_secs = Some(validator_client_config.monitoring_endpoint_period); config.monitoring_api = Some(monitoring_api::Config { db_path: None, freezer_db_path: None, @@ -372,56 +345,34 @@ impl Config { }); } - if cli_args.get_flag("enable-doppelganger-protection") { - config.enable_doppelganger_protection = true; - } + config.enable_doppelganger_protection = + validator_client_config.enable_doppelganger_protection; + config.validator_store.builder_proposals = validator_client_config.builder_proposals; + config.validator_store.prefer_builder_proposals = + validator_client_config.prefer_builder_proposals; + config.validator_store.gas_limit = Some(validator_client_config.gas_limit); - if cli_args.get_flag("builder-proposals") { - config.validator_store.builder_proposals = true; - } - - if cli_args.get_flag("prefer-builder-proposals") { - config.validator_store.prefer_builder_proposals = true; - } - - config.validator_store.gas_limit = cli_args - .get_one::("gas-limit") - .map(|gas_limit| { - gas_limit - .parse::() - .map_err(|_| "gas-limit is not a valid u64.") - }) - .transpose()?; - - if let Some(registration_timestamp_override) = - cli_args.get_one::("builder-registration-timestamp-override") - { - config.builder_registration_timestamp_override = Some( - registration_timestamp_override - .parse::() - .map_err(|_| "builder-registration-timestamp-override is not a valid u64.")?, - ); - } - - config.validator_store.builder_boost_factor = - parse_optional(cli_args, "builder-boost-factor")?; + config.builder_registration_timestamp_override = + validator_client_config.builder_registration_timestamp_override; + config.validator_store.builder_boost_factor = validator_client_config.builder_boost_factor; config.enable_latency_measurement_service = - !cli_args.get_flag("disable-latency-measurement-service"); + !validator_client_config.disable_latency_measurement_service; config.validator_registration_batch_size = - parse_required(cli_args, "validator-registration-batch-size")?; + validator_client_config.validator_registration_batch_size; + if config.validator_registration_batch_size == 0 { return Err("validator-registration-batch-size cannot be 0".to_string()); } config.validator_store.enable_web3signer_slashing_protection = - if cli_args.get_flag("disable-slashing-protection-web3signer") { + if validator_client_config.disable_slashing_protection_web3signer { warn!( log, "Slashing protection for remote keys disabled"; "info" => "ensure slashing protection on web3signer is enabled or you WILL \ - get slashed" + get slashed" ); false } else { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 8ebfe98b15..5f69bf125e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -1,9 +1,9 @@ -mod cli; +pub mod cli; pub mod config; mod latency; mod notifier; -pub use cli::cli_app; +use crate::cli::ValidatorClient; pub use config::Config; use initialized_validators::InitializedValidators; use metrics::set_gauge; @@ -11,11 +11,10 @@ use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use account_utils::validator_definitions::ValidatorDefinitions; use beacon_node_fallback::{ start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, }; - -use account_utils::validator_definitions::ValidatorDefinitions; use clap::ArgMatches; use doppelganger_service::DoppelgangerService; use environment::RuntimeContext; @@ -96,8 +95,9 @@ impl ProductionValidatorClient { pub async fn new_from_cli( context: RuntimeContext, cli_args: &ArgMatches, + validator_client_config: &ValidatorClient, ) -> Result { - let config = Config::from_cli(cli_args, context.log()) + let config = Config::from_cli(cli_args, validator_client_config, context.log()) .map_err(|e| format!("Unable to initialize config: {}", e))?; Self::new(context, config).await } diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index 8e43cd5977..9beccd3bde 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -1,5 +1,5 @@ -use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::{get_color_style, FLAG_HEADER}; +use clap::{ArgMatches, Command}; +use clap_utils::get_color_style; use common::write_to_json_file; use environment::Environment; use serde::Serialize; @@ -46,16 +46,6 @@ pub fn cli_app() -> Command { .display_order(0) .styles(get_color_style()) .about("Utilities for managing a Lighthouse validator client via the HTTP API.") - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - .global(true), - ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) From df131b2a6c58609d3f772cd1ac89487a0d4247d0 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Tue, 4 Feb 2025 00:47:11 +0000 Subject: [PATCH 30/52] chore: update peerDAS KZG library to 0.5.3 (#6906) This optimizes the time it takes to load the context, so that tests do not time out --- Cargo.lock | 29 ++++++++++++----------------- Cargo.toml | 9 +-------- crypto/kzg/Cargo.toml | 6 ------ 3 files changed, 13 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cba98c6991..5707d21953 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1577,9 +1577,9 @@ dependencies = [ [[package]] name = "crate_crypto_internal_eth_kzg_bls12_381" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23be5253f1bd7fd411721a58712308c4747d0a41d040bbf8ebb78d52909a480" +checksum = "48603155907d588e487aea229f61a28d9a918c95c9aa987055ba29502225810b" dependencies = [ "blst", "blstrs", @@ -1591,9 +1591,9 @@ dependencies = [ [[package]] name = "crate_crypto_internal_eth_kzg_erasure_codes" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2067ce20ef380ff33a93ce0af62bea22d35531b7f3586224d8d5176ec6cf578" +checksum = "cdf616e4b4f1799191bb1e70b8a29f65e95ab5d74c59972a34998de488d01efd" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", "crate_crypto_internal_eth_kzg_polynomial", @@ -1601,24 +1601,24 @@ dependencies = [ [[package]] name = "crate_crypto_internal_eth_kzg_maybe_rayon" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558f50324ff016e5fe93113c78a72776d790d52f244ae9602a8013a67a189b66" +checksum = "f1ddd0330f34f0b92a9f0b29bc3f8494b30d596ab8b951233ec90b2d72ab132c" [[package]] name = "crate_crypto_internal_eth_kzg_polynomial" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e051c4f5aa5696bd7c504930485436ec62bf14f30a4c2d78504f3f8ec6a3daf" +checksum = "7488314261926373e1c20121c404fabf5b57ca09f48eddc7fef38be1df79a006" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", ] [[package]] name = "crate_crypto_kzg_multi_open_fk20" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ed6bf8993d9f3b361da4ed38f067503e08c0b948af0d6f4bb941dd647c0f2c" +checksum = "d24efdb64e7518848f11069dd9de23bd04455146a9fd5486345d99ed8bfdb049" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", "crate_crypto_internal_eth_kzg_maybe_rayon", @@ -4759,11 +4759,6 @@ version = "0.1.0" dependencies = [ "arbitrary", "c-kzg", - "crate_crypto_internal_eth_kzg_bls12_381", - "crate_crypto_internal_eth_kzg_erasure_codes", - "crate_crypto_internal_eth_kzg_maybe_rayon", - "crate_crypto_internal_eth_kzg_polynomial", - "crate_crypto_kzg_multi_open_fk20", "criterion", "derivative", "ethereum_hashing", @@ -7525,9 +7520,9 @@ dependencies = [ [[package]] name = "rust_eth_kzg" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3291fd0d9c629a56537d74bbc1e7bcaf5be610f2f7b55af85c4fea843c6aeca3" +checksum = "a237a478ee68e491a0f40bbcbb958b79ba9b37aacce459f7ab3ba78f3cbfa9d0" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", "crate_crypto_internal_eth_kzg_erasure_codes", diff --git a/Cargo.toml b/Cargo.toml index e30b6aa2b6..86186da17d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -132,14 +132,7 @@ delay_map = "0.4" derivative = "2" dirs = "3" either = "1.9" -# TODO: rust_eth_kzg is pinned for now while a perf regression is investigated -# The crate_crypto_* dependencies can be removed from this file completely once we update -rust_eth_kzg = "=0.5.1" -crate_crypto_internal_eth_kzg_bls12_381 = "=0.5.1" -crate_crypto_internal_eth_kzg_erasure_codes = "=0.5.1" -crate_crypto_internal_eth_kzg_maybe_rayon = "=0.5.1" -crate_crypto_internal_eth_kzg_polynomial = "=0.5.1" -crate_crypto_kzg_multi_open_fk20 = "=0.5.1" +rust_eth_kzg = "0.5.3" discv5 = { version = "0.9", features = ["libp2p"] } env_logger = "0.9" ethereum_hashing = "0.7.0" diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index e26fe59413..bfe0f19cd0 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -8,12 +8,6 @@ edition = "2021" [dependencies] arbitrary = { workspace = true } c-kzg = { workspace = true } -# Required to maintain the pin from https://github.com/sigp/lighthouse/pull/6608 -crate_crypto_internal_eth_kzg_bls12_381 = { workspace = true } -crate_crypto_internal_eth_kzg_erasure_codes = { workspace = true } -crate_crypto_internal_eth_kzg_maybe_rayon = { workspace = true } -crate_crypto_internal_eth_kzg_polynomial = { workspace = true } -crate_crypto_kzg_multi_open_fk20 = { workspace = true } derivative = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } From d1061dcf59102d8dbc4787bd1b7b524e5d525d78 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 4 Feb 2025 12:28:37 +1100 Subject: [PATCH 31/52] UX Network Fixes (#6796) There were two things I came across during some recent testing, that this PR addresses. 1 - The default port for IPv6 was set to 9090, which is confusing. I've set this to match its ipv4 counterpart (i.e 9000 and 9001). This makes more sense and is easier to firewall, for those firewalls that support both versions for a single rule. 2 - Watching the NAT status of lighthouse, I notice we only set the field to 1 once the NAT is passed. We don't give it a default 0 (false). So we only see results when its successful. On peer disconnects, i've piggy-backed a loop of the connected peers to also watch and check for NAT status updates. --- beacon_node/src/cli.rs | 3 +-- beacon_node/src/config.rs | 14 ++++++++++---- book/src/advanced_networking.md | 8 ++++---- book/src/help_bn.md | 4 ++-- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index ce4c5f9476..29faa7f220 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -176,8 +176,7 @@ pub fn cli_app() -> Command { .long("port6") .value_name("PORT") .help("The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and \ - IPv6. Defaults to 9090 when required. The Quic UDP port will be set to this value + 1.") - .default_value("9090") + IPv6. Defaults to --port. The Quic UDP port will be set to this value + 1.") .action(ArgAction::Set) .display_order(0) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6d3c18d363..0f8f3a8012 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -936,12 +936,11 @@ pub fn parse_listening_addresses( .expect("--port has a default value") .parse::() .map_err(|parse_error| format!("Failed to parse --port as an integer: {parse_error}"))?; - let port6 = cli_args + let maybe_port6 = cli_args .get_one::("port6") .map(|s| str::parse::(s)) .transpose() - .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? - .unwrap_or(9090); + .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))?; // parse the possible discovery ports. let maybe_disc_port = cli_args @@ -989,6 +988,10 @@ pub fn parse_listening_addresses( warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); } + // If we are only listening on ipv6 and the user has specified --port6, lets just use + // that. + let port = maybe_port6.unwrap_or(port); + // use zero ports if required. If not, use the given port. let tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) @@ -1055,6 +1058,9 @@ pub fn parse_listening_addresses( }) } (Some(ipv4), Some(ipv6)) => { + // If --port6 is not set, we use --port + let port6 = maybe_port6.unwrap_or(port); + let ipv4_tcp_port = use_zero_ports .then(unused_port::unused_tcp4_port) .transpose()? @@ -1074,7 +1080,7 @@ pub fn parse_listening_addresses( ipv4_tcp_port + 1 }); - // Defaults to 9090 when required + // Defaults to 9000 when required let ipv6_tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) .transpose()? diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index c0f6b5485e..0dc1000aa0 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -162,8 +162,8 @@ To listen over both IPv4 and IPv6: > > **IPv6**: > -> It listens on the default value of --port6 (`9090`) for both UDP and TCP. -> QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. +> It listens on the default value of --port6 (`9000`) for both UDP and TCP. +> QUIC will use port `9001` for UDP, which is the default `--port6` value (`9000`) + 1. > When using `--listen-address :: --listen-address --port 9909 --discovery-port6 9999`, listening will be set up as follows: > @@ -174,8 +174,8 @@ To listen over both IPv4 and IPv6: > > **IPv6**: > -> It listens on the default value of `--port6` (`9090`) for TCP, and port `9999` for UDP. -> QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. +> It listens on the default value of `--port6` (`9000`) for TCP, and port `9999` for UDP. +> QUIC will use port `9001` for UDP, which is the default `--port6` value (`9000`) + 1. ### Configuring Lighthouse to advertise IPv6 reachable addresses diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 2d12010094..3bc8a2c275 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -301,8 +301,8 @@ Options: [default: 9000] --port6 The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 - and IPv6. Defaults to 9090 when required. The Quic UDP port will be - set to this value + 1. [default: 9090] + and IPv6. Defaults to --port. The Quic UDP port will be set to this + value + 1. --prepare-payload-lookahead The time before the start of a proposal slot at which payload attributes should be sent. Low values are useful for execution nodes From 56f201a257e86d0aeacc6072da11d08e8c7abc63 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 4 Feb 2025 05:00:37 +0300 Subject: [PATCH 32/52] Add check to Lockbud CI job (#6898) --- .github/workflows/test-suite.yml | 3 +-- scripts/ci/check-lockbud.sh | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) create mode 100755 scripts/ci/check-lockbud.sh diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0ee9dbb622..0f91c86617 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -65,8 +65,7 @@ jobs: - name: Install dependencies run: apt update && apt install -y cmake libclang-dev - name: Check for deadlocks - run: | - cargo lockbud -k deadlock -b -l tokio_util + run: ./scripts/ci/check-lockbud.sh target-branch-check: name: target-branch-check diff --git a/scripts/ci/check-lockbud.sh b/scripts/ci/check-lockbud.sh new file mode 100755 index 0000000000..8e1d33b53b --- /dev/null +++ b/scripts/ci/check-lockbud.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Run lockbud to check for deadlocks and capture the output +output=$(cargo lockbud -k deadlock -b -l tokio_util 2>&1) + +# Check if lockbud returned any issues +if echo "$output" | grep -q '"bug_kind"'; then + # Print the JSON payload + echo "Lockbud detected issues:" + echo "$output" + + # Exit with a non-zero status to indicate an error + exit 1 +else + echo "No issues detected by Lockbud." + exit 0 +fi \ No newline at end of file From 3d06bc26d159536645d994121dcfffba1801074a Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Tue, 4 Feb 2025 14:43:37 +0800 Subject: [PATCH 33/52] Add test to beacon node fallback feature (#6568) --- Cargo.lock | 67 +++++++ Cargo.toml | 5 + testing/validator_test_rig/Cargo.toml | 14 ++ testing/validator_test_rig/src/lib.rs | 1 + .../src/mock_beacon_node.rs | 132 +++++++++++++ validator_client/Cargo.toml | 3 - .../beacon_node_fallback/Cargo.toml | 4 + .../beacon_node_fallback/src/lib.rs | 176 +++++++++++++++++- validator_client/src/lib.rs | 16 +- 9 files changed, 405 insertions(+), 13 deletions(-) create mode 100644 testing/validator_test_rig/Cargo.toml create mode 100644 testing/validator_test_rig/src/lib.rs create mode 100644 testing/validator_test_rig/src/mock_beacon_node.rs diff --git a/Cargo.lock b/Cargo.lock index 5707d21953..4581fb9ce0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -543,6 +543,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-channel" version = "1.9.0" @@ -889,6 +899,7 @@ dependencies = [ "eth2", "futures", "itertools 0.10.5", + "logging", "serde", "slog", "slot_clock", @@ -896,6 +907,7 @@ dependencies = [ "tokio", "types", "validator_metrics", + "validator_test_rig", ] [[package]] @@ -1485,6 +1497,16 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +[[package]] +name = "colored" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" +dependencies = [ + "lazy_static", + "windows-sys 0.59.0", +] + [[package]] name = "compare_fields" version = "0.2.0" @@ -5794,6 +5816,30 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9366861eb2a2c436c20b12c8dbec5f798cea6b47ad99216be0282942e2c81ea0" +[[package]] +name = "mockito" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652cd6d169a36eaf9d1e6bce1a221130439a966d7f27858af66a33a66e9c4ee2" +dependencies = [ + "assert-json-diff", + "bytes", + "colored", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-util", + "log", + "rand 0.8.5", + "regex", + "serde_json", + "serde_urlencoded", + "similar", + "tokio", +] + [[package]] name = "moka" version = "0.12.10" @@ -8175,6 +8221,12 @@ dependencies = [ "validator_metrics", ] +[[package]] +name = "similar" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" + [[package]] name = "simple_asn1" version = "0.6.3" @@ -9090,6 +9142,7 @@ dependencies = [ "bytes", "libc", "mio", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2", @@ -9868,6 +9921,20 @@ dependencies = [ "validator_metrics", ] +[[package]] +name = "validator_test_rig" +version = "0.1.0" +dependencies = [ + "eth2", + "logging", + "mockito", + "regex", + "sensitive_url", + "serde_json", + "slog", + "types", +] + [[package]] name = "valuable" version = "0.1.1" diff --git a/Cargo.toml b/Cargo.toml index 86186da17d..73912f6082 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,8 +86,10 @@ members = [ "testing/simulator", "testing/state_transition_vectors", "testing/test-test_logger", + "testing/validator_test_rig", "testing/web3signer_tests", + "validator_client", "validator_client/beacon_node_fallback", "validator_client/doppelganger_service", @@ -155,6 +157,7 @@ log = "0.4" lru = "0.12" maplit = "1" milhouse = "0.3" +mockito = "1.5.0" num_cpus = "1" parking_lot = "0.12" paste = "1" @@ -261,6 +264,7 @@ malloc_utils = { path = "common/malloc_utils" } merkle_proof = { path = "consensus/merkle_proof" } monitoring_api = { path = "common/monitoring_api" } network = { path = "beacon_node/network" } +node_test_rig = { path = "testing/node_test_rig" } operation_pool = { path = "beacon_node/operation_pool" } pretty_reqwest_error = { path = "common/pretty_reqwest_error" } proto_array = { path = "consensus/proto_array" } @@ -283,6 +287,7 @@ validator_http_api = { path = "validator_client/http_api" } validator_http_metrics = { path = "validator_client/http_metrics" } validator_metrics = { path = "validator_client/validator_metrics" } validator_store = { path = "validator_client/validator_store" } +validator_test_rig = { path = "testing/validator_test_rig" } warp_utils = { path = "common/warp_utils" } xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" } zstd = "0.13" diff --git a/testing/validator_test_rig/Cargo.toml b/testing/validator_test_rig/Cargo.toml new file mode 100644 index 0000000000..76560b8afc --- /dev/null +++ b/testing/validator_test_rig/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "validator_test_rig" +version = "0.1.0" +edition = { workspace = true } + +[dependencies] +eth2 = { workspace = true } +logging = { workspace = true } +mockito = { workspace = true } +regex = { workspace = true } +sensitive_url = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } +types = { workspace = true } diff --git a/testing/validator_test_rig/src/lib.rs b/testing/validator_test_rig/src/lib.rs new file mode 100644 index 0000000000..a0a979dfc8 --- /dev/null +++ b/testing/validator_test_rig/src/lib.rs @@ -0,0 +1 @@ +pub mod mock_beacon_node; diff --git a/testing/validator_test_rig/src/mock_beacon_node.rs b/testing/validator_test_rig/src/mock_beacon_node.rs new file mode 100644 index 0000000000..f875116155 --- /dev/null +++ b/testing/validator_test_rig/src/mock_beacon_node.rs @@ -0,0 +1,132 @@ +use eth2::types::{GenericResponse, SyncingData}; +use eth2::{BeaconNodeHttpClient, StatusCode, Timeouts}; +use logging::test_logger; +use mockito::{Matcher, Mock, Server, ServerGuard}; +use regex::Regex; +use sensitive_url::SensitiveUrl; +use slog::{info, Logger}; +use std::marker::PhantomData; +use std::str::FromStr; +use std::sync::{Arc, Mutex}; +use std::time::Duration; +use types::{ChainSpec, ConfigAndPreset, EthSpec, SignedBlindedBeaconBlock}; + +pub struct MockBeaconNode { + server: ServerGuard, + pub beacon_api_client: BeaconNodeHttpClient, + log: Logger, + _phantom: PhantomData, + pub received_blocks: Arc>>>, +} + +impl MockBeaconNode { + pub async fn new() -> Self { + // mock server logging + let server = Server::new_async().await; + let beacon_api_client = BeaconNodeHttpClient::new( + SensitiveUrl::from_str(&server.url()).unwrap(), + Timeouts::set_all(Duration::from_secs(1)), + ); + let log = test_logger(); + Self { + server, + beacon_api_client, + log, + _phantom: PhantomData, + received_blocks: Arc::new(Mutex::new(Vec::new())), + } + } + + /// Resets all mocks + #[allow(dead_code)] + pub fn reset_mocks(&mut self) { + self.server.reset(); + } + + pub fn mock_config_spec(&mut self, spec: &ChainSpec) { + let path_pattern = Regex::new(r"^/eth/v1/config/spec$").unwrap(); + let config_and_preset = ConfigAndPreset::from_chain_spec::(spec, None); + let data = GenericResponse::from(config_and_preset); + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(200) + .with_body(serde_json::to_string(&data).unwrap()) + .create(); + } + + pub fn mock_get_node_syncing(&mut self, response: SyncingData) { + let path_pattern = Regex::new(r"^/eth/v1/node/syncing$").unwrap(); + + let data = GenericResponse::from(response); + + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(200) + .with_body(serde_json::to_string(&data).unwrap()) + .create(); + } + + /// Mocks the `post_beacon_blinded_blocks_v2_ssz` response with an optional `delay`. + pub fn mock_post_beacon_blinded_blocks_v2_ssz(&mut self, delay: Duration) -> Mock { + let path_pattern = Regex::new(r"^/eth/v2/beacon/blinded_blocks$").unwrap(); + let log = self.log.clone(); + let url = self.server.url(); + + let received_blocks = Arc::clone(&self.received_blocks); + + self.server + .mock("POST", Matcher::Regex(path_pattern.to_string())) + .match_header("content-type", "application/octet-stream") + .with_status(200) + .with_body_from_request(move |request| { + info!( + log, + "{}", + format!( + "Received published block request on server {} with delay {} s", + url, + delay.as_secs(), + ) + ); + + let body = request.body().expect("Failed to get request body"); + let block: SignedBlindedBeaconBlock = + SignedBlindedBeaconBlock::any_from_ssz_bytes(body) + .expect("Failed to deserialize body as SignedBlindedBeaconBlock"); + + received_blocks.lock().unwrap().push(block); + + std::thread::sleep(delay); + vec![] + }) + .create() + } + + pub fn mock_offline_node(&mut self) -> Mock { + let path_pattern = Regex::new(r"^/eth/v1/node/version$").unwrap(); + + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(StatusCode::INTERNAL_SERVER_ERROR.as_u16() as usize) + .with_header("content-type", "application/json") + .with_body(r#"{"message":"Internal Server Error"}"#) + .create() + } + + pub fn mock_online_node(&mut self) -> Mock { + let path_pattern = Regex::new(r"^/eth/v1/node/version$").unwrap(); + + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + r#"{ + "data": { + "version": "lighthouse-mock" + } + }"#, + ) + .create() + } +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 504d96ae1c..fb6007b00a 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -8,9 +8,6 @@ edition = { workspace = true } name = "validator_client" path = "src/lib.rs" -[dev-dependencies] -tokio = { workspace = true } - [dependencies] account_utils = { workspace = true } beacon_node_fallback = { workspace = true } diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index 2c30290110..598020d137 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -21,3 +21,7 @@ strum = { workspace = true } tokio = { workspace = true } types = { workspace = true } validator_metrics = { workspace = true } + +[dev-dependencies] +logging = { workspace = true } +validator_test_rig = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 839e60d011..abcf74a1a6 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -752,8 +752,12 @@ mod tests { use crate::beacon_node_health::BeaconNodeHealthTier; use eth2::SensitiveUrl; use eth2::Timeouts; + use logging::test_logger; + use slot_clock::TestingSlotClock; use strum::VariantNames; - use types::{MainnetEthSpec, Slot}; + use types::{BeaconBlockDeneb, MainnetEthSpec, Slot}; + use types::{EmptyBlock, Signature, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; + use validator_test_rig::mock_beacon_node::MockBeaconNode; type E = MainnetEthSpec; @@ -772,7 +776,7 @@ mod tests { #[tokio::test] async fn check_candidate_order() { - // These fields is irrelvant for sorting. They are set to arbitrary values. + // These fields are irrelevant for sorting. They are set to arbitrary values. let head = Slot::new(99); let optimistic_status = IsOptimistic::No; let execution_status = ExecutionEngineHealth::Healthy; @@ -880,4 +884,172 @@ mod tests { assert_eq!(candidates, expected_candidates); } + + async fn new_mock_beacon_node( + index: usize, + spec: &ChainSpec, + ) -> (MockBeaconNode, CandidateBeaconNode) { + let mut mock_beacon_node = MockBeaconNode::::new().await; + mock_beacon_node.mock_config_spec(spec); + + let beacon_node = + CandidateBeaconNode::::new(mock_beacon_node.beacon_api_client.clone(), index); + + (mock_beacon_node, beacon_node) + } + + fn create_beacon_node_fallback( + candidates: Vec>, + topics: Vec, + spec: Arc, + log: Logger, + ) -> BeaconNodeFallback { + let mut beacon_node_fallback = + BeaconNodeFallback::new(candidates, Config::default(), topics, spec, log); + + beacon_node_fallback.set_slot_clock(TestingSlotClock::new( + Slot::new(1), + Duration::from_secs(0), + Duration::from_secs(12), + )); + + beacon_node_fallback + } + + #[tokio::test] + async fn update_all_candidates_should_update_sync_status() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + let (mut mock_beacon_node_3, beacon_node_3) = new_mock_beacon_node(2, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + // Put this out of order to be sorted later + vec![ + beacon_node_2.clone(), + beacon_node_3.clone(), + beacon_node_1.clone(), + ], + vec![], + spec.clone(), + test_logger(), + ); + + // BeaconNodeHealthTier 1 + mock_beacon_node_1.mock_get_node_syncing(eth2::types::SyncingData { + is_syncing: false, + is_optimistic: false, + el_offline: false, + head_slot: Slot::new(1), + sync_distance: Slot::new(0), + }); + // BeaconNodeHealthTier 3 + mock_beacon_node_2.mock_get_node_syncing(eth2::types::SyncingData { + is_syncing: false, + is_optimistic: false, + el_offline: true, + head_slot: Slot::new(1), + sync_distance: Slot::new(0), + }); + // BeaconNodeHealthTier 5 + mock_beacon_node_3.mock_get_node_syncing(eth2::types::SyncingData { + is_syncing: false, + is_optimistic: true, + el_offline: false, + head_slot: Slot::new(1), + sync_distance: Slot::new(0), + }); + + beacon_node_fallback.update_all_candidates().await; + + let candidates = beacon_node_fallback.candidates.read().await; + assert_eq!( + vec![beacon_node_1, beacon_node_2, beacon_node_3], + *candidates + ); + } + + #[tokio::test] + async fn broadcast_should_send_to_all_bns() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + vec![beacon_node_1, beacon_node_2], + vec![ApiTopic::Blocks], + spec.clone(), + test_logger(), + ); + + mock_beacon_node_1.mock_post_beacon_blinded_blocks_v2_ssz(Duration::from_secs(0)); + mock_beacon_node_2.mock_post_beacon_blinded_blocks_v2_ssz(Duration::from_secs(0)); + + let signed_block = SignedBlindedBeaconBlock::::Deneb(SignedBeaconBlockDeneb { + message: BeaconBlockDeneb::empty(&spec), + signature: Signature::empty(), + }); + + // trigger broadcast to `post_beacon_blinded_blocks_v2` + let result = beacon_node_fallback + .broadcast(|client| { + let signed_block_cloned = signed_block.clone(); + async move { + client + .post_beacon_blinded_blocks_v2_ssz(&signed_block_cloned, None) + .await + } + }) + .await; + + assert!(result.is_ok()); + + let received_blocks_from_bn_1 = mock_beacon_node_1.received_blocks.lock().unwrap(); + let received_blocks_from_bn_2 = mock_beacon_node_2.received_blocks.lock().unwrap(); + assert_eq!(received_blocks_from_bn_1.len(), 1); + assert_eq!(received_blocks_from_bn_2.len(), 1); + } + + #[tokio::test] + async fn first_success_should_try_nodes_in_order() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + let (mut mock_beacon_node_3, beacon_node_3) = new_mock_beacon_node(2, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + vec![beacon_node_1, beacon_node_2, beacon_node_3], + vec![], + spec.clone(), + test_logger(), + ); + + let mock1 = mock_beacon_node_1.mock_offline_node(); + let mock2 = mock_beacon_node_2.mock_offline_node(); + let mock3 = mock_beacon_node_3.mock_online_node(); + + let result_success = beacon_node_fallback + .first_success(|client| async move { client.get_node_version().await }) + .await; + + // mock3 expects to be called once since it is online in the first pass + mock3.expect(1).assert(); + assert!(result_success.is_ok()); + + // make all beacon node offline and the result should error + let _mock3 = mock_beacon_node_3.mock_offline_node(); + + let result_failure = beacon_node_fallback + .first_success(|client| async move { client.get_node_version().await }) + .await; + + assert!(result_failure.is_err()); + + // Both mock1 and mock2 should be called 3 times: + // - the first time is for the result_success case, + // - the second time is when it calls all 3 mock beacon nodes and all fails in the first pass, + // - which gives the third call because the function gives a second pass if no candidates succeeded in the first pass + mock1.expect(3).assert(); + mock2.expect(3).assert(); + } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 5f69bf125e..70236d6a3c 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -203,15 +203,15 @@ impl ProductionValidatorClient { config.initialized_validators.clone(), log.clone(), ) - .await - .map_err(|e| { - match e { - UnableToOpenVotingKeystore(err) => { - format!("Unable to initialize validators: {:?}. If you have recently moved the location of your data directory \ + .await + .map_err(|e| { + match e { + UnableToOpenVotingKeystore(err) => { + format!("Unable to initialize validators: {:?}. If you have recently moved the location of your data directory \ make sure to update the location of voting_keystore_path in your validator_definitions.yml", err) - }, - err => { - format!("Unable to initialize validators: {:?}", err)} + }, + err => { + format!("Unable to initialize validators: {:?}", err)} } })?; From 7bfdb337293553e124249caa0c9ce1d149f0657d Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 4 Feb 2025 17:14:41 -0800 Subject: [PATCH 34/52] Return error if getBlobs not supported (#6911) N/A Previously, we were returning an empty vec of Nones if get_blobs was not supported in the EL. This results in confusing logging where we try to process the empty list of blobs and log a bunch of Unexpected errors. See ``` Feb 03 17:32:12.383 DEBG Fetching blobs from the EL, num_expected_blobs: 6, block_root: 0x7326fe2dc1cb9036c9de7a07a662c86a339085597849016eadf061b70b7815ba, service: fetch_engine_blobs, service: beacon, module: beac on_chain::fetch_blobs:84 Feb 03 17:32:12.384 DEBG Processing engine blobs, num_fetched_blobs: 0, block_root: 0x7326fe2dc1cb9036c9de7a07a662c86a339085597849016eadf061b70b7815ba, service: fetch_engine_blobs, service: beacon, module: beacon_c hain::fetch_blobs:197 Feb 03 17:32:12.384 ERRO Error fetching or processing blobs from EL, block_root: 0x7326fe2dc1cb9036c9de7a07a662c86a339085597849016eadf061b70b7815ba, error: BlobProcessingError(AvailabilityCheck(Unexpected)), module : network::network_beacon_processor:1011 ``` The error we should be getting is that getBlobs is not supported, this PR adds a new error variant and returns that. --- beacon_node/execution_layer/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 4e0fe1de16..6e5e4fca01 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -158,6 +158,7 @@ pub enum Error { }, ZeroLengthTransaction, PayloadBodiesByRangeNotSupported, + GetBlobsNotSupported, InvalidJWTSecret(String), InvalidForkForPayload, InvalidPayloadBody(String), @@ -1871,7 +1872,7 @@ impl ExecutionLayer { .map_err(Box::new) .map_err(Error::EngineError) } else { - Ok(vec![None; query.len()]) + Err(Error::GetBlobsNotSupported) } } From 2193f6a4d4634ec1b86991d211bd57a03cceba0b Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 5 Feb 2025 04:08:28 -0300 Subject: [PATCH 35/52] Add individual by_range sync requests (#6497) Part of - https://github.com/sigp/lighthouse/issues/6258 To address PeerDAS sync issues we need to make individual by_range requests within a batch retriable. We should adopt the same pattern for lookup sync where each request (block/blobs/columns) is tracked individually within a "meta" request that group them all and handles retry logic. - Building on https://github.com/sigp/lighthouse/pull/6398 second step is to add individual request accumulators for `blocks_by_range`, `blobs_by_range`, and `data_columns_by_range`. This will allow each request to progress independently and be retried separately. Most of the logic is just piping, excuse the large diff. This PR does not change the logic of how requests are handled or retried. This will be done in a future PR changing the logic of `RangeBlockComponentsRequest`. ### Before - Sync manager receives block with `SyncRequestId::RangeBlockAndBlobs` - Insert block into `SyncNetworkContext::range_block_components_requests` - (If received stream terminators of all requests) - Return `Vec`, and insert into `range_sync` ### Now - Sync manager receives block with `SyncRequestId::RangeBlockAndBlobs` - Insert block into `SyncNetworkContext:: blocks_by_range_requests` - (If received stream terminator of this request) - Return `Vec`, and insert into `SyncNetworkContext::components_by_range_requests ` - (If received a result for all requests) - Return `Vec`, and insert into `range_sync` --- .../lighthouse_network/src/rpc/methods.rs | 21 + .../src/rpc/self_limiter.rs | 22 +- .../src/service/api_types.rs | 58 +- beacon_node/network/src/router.rs | 2 +- .../src/sync/block_sidecar_coupling.rs | 172 +++--- beacon_node/network/src/sync/manager.rs | 203 ++++--- .../network/src/sync/network_context.rs | 563 ++++++++++-------- .../src/sync/network_context/requests.rs | 11 +- .../requests/blobs_by_range.rs | 56 ++ .../network_context/requests/blobs_by_root.rs | 2 +- .../requests/blocks_by_range.rs | 48 ++ .../requests/data_columns_by_range.rs | 54 ++ .../requests/data_columns_by_root.rs | 5 +- .../network/src/sync/range_sync/batch.rs | 32 +- beacon_node/network/src/sync/tests/range.rs | 29 +- 15 files changed, 776 insertions(+), 502 deletions(-) create mode 100644 beacon_node/network/src/sync/network_context/requests/blobs_by_range.rs create mode 100644 beacon_node/network/src/sync/network_context/requests/blocks_by_range.rs create mode 100644 beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index ad6bea455e..2f6200a836 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -411,6 +411,27 @@ impl OldBlocksByRangeRequest { } } +impl From for OldBlocksByRangeRequest { + fn from(req: BlocksByRangeRequest) -> Self { + match req { + BlocksByRangeRequest::V1(ref req) => { + OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: req.start_slot, + count: req.count, + step: 1, + }) + } + BlocksByRangeRequest::V2(ref req) => { + OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: req.start_slot, + count: req.count, + step: 1, + }) + } + } + } +} + /// Request a number of beacon block bodies from a peer. #[superstruct(variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq)))] #[derive(Clone, Debug, PartialEq)] diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e0c8593f29..ae63e5cdb5 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -217,7 +217,7 @@ mod tests { use crate::rpc::rate_limiter::Quota; use crate::rpc::self_limiter::SelfRateLimiter; use crate::rpc::{Ping, Protocol, RequestType}; - use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; + use crate::service::api_types::{AppRequestId, RequestId, SingleLookupReqId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; @@ -238,12 +238,16 @@ mod tests { let mut limiter: SelfRateLimiter = SelfRateLimiter::new(config, fork_context, log).unwrap(); let peer_id = PeerId::random(); + let lookup_id = 0; for i in 1..=5u32 { let _ = limiter.allows( peer_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { - id: i, + RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + id: SingleLookupReqId { + lookup_id, + req_id: i, + }, })), RequestType::Ping(Ping { data: i as u64 }), ); @@ -261,9 +265,9 @@ mod tests { for i in 2..=5u32 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { - id, - })) if id == i + RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + id: SingleLookupReqId { req_id, .. }, + })) if req_id == i, )); } @@ -286,9 +290,9 @@ mod tests { for i in 3..=5 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { - id - })) if id == i + RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + id: SingleLookupReqId { req_id, .. }, + })) if req_id == i, )); } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 85fabbb0c3..800d988d1a 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ - BlobSidecar, DataColumnSidecar, EthSpec, Hash256, LightClientBootstrap, + BlobSidecar, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; @@ -31,8 +31,12 @@ pub enum SyncRequestId { SingleBlob { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. DataColumnsByRoot(DataColumnsByRootRequestId), - /// Range request that is composed by both a block range request and a blob range request. - RangeBlockAndBlobs { id: Id }, + /// Blocks by range request + BlocksByRange(BlocksByRangeRequestId), + /// Blobs by range request + BlobsByRange(BlobsByRangeRequestId), + /// Data columns by range request + DataColumnsByRange(DataColumnsByRangeRequestId), } /// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. @@ -43,12 +47,60 @@ pub struct DataColumnsByRootRequestId { pub requester: DataColumnsByRootRequester, } +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct BlocksByRangeRequestId { + /// Id to identify this attempt at a blocks_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request for block components. + pub parent_request_id: ComponentsByRangeRequestId, +} + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct BlobsByRangeRequestId { + /// Id to identify this attempt at a blobs_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request for block components. + pub parent_request_id: ComponentsByRangeRequestId, +} + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRangeRequestId { + /// Id to identify this attempt at a data_columns_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request for block components. + pub parent_request_id: ComponentsByRangeRequestId, +} + +/// Block components by range request for range sync. Includes an ID for downstream consumers to +/// handle retries and tie all their sub requests together. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct ComponentsByRangeRequestId { + /// Each `RangeRequestId` may request the same data in a later retry. This Id identifies the + /// current attempt. + pub id: Id, + /// What sync component is issuing a components by range request and expecting data back + pub requester: RangeRequestId, +} + +/// Range sync chain or backfill batch +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub enum RangeRequestId { + RangeSync { chain_id: Id, batch_id: Epoch }, + BackfillSync { batch_id: Epoch }, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum DataColumnsByRootRequester { Sampling(SamplingId), Custody(CustodyId), } +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub enum RangeRequester { + RangeSync { chain_id: u64, batch_id: Epoch }, + BackfillSync { batch_id: Epoch }, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct SamplingId { pub id: SamplingRequester, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 41b9f2c91e..36e5c391e9 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -624,7 +624,7 @@ impl Router { ) { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { - id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, + id @ SyncRequestId::BlocksByRange { .. } => id, other => { crit!(self.log, "BlocksByRange response on incorrect request"; "request" => ?other); return; diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 70a3fe4f5a..6c8a8eab63 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,7 +1,6 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; -use lighthouse_network::PeerId; use std::{ collections::{HashMap, VecDeque}, sync::Arc, @@ -29,9 +28,6 @@ pub struct RangeBlockComponentsRequest { /// Used to determine if the number of data columns stream termination this accumulator should /// wait for. This may be less than the number of `expects_custody_columns` due to request batching. num_custody_column_requests: Option, - /// The peers the request was made to. - pub(crate) peer_ids: Vec, - max_blobs_per_block: usize, } impl RangeBlockComponentsRequest { @@ -39,8 +35,6 @@ impl RangeBlockComponentsRequest { expects_blobs: bool, expects_custody_columns: Option>, num_custody_column_requests: Option, - peer_ids: Vec, - max_blobs_per_block: usize, ) -> Self { Self { blocks: <_>::default(), @@ -52,50 +46,42 @@ impl RangeBlockComponentsRequest { expects_blobs, expects_custody_columns, num_custody_column_requests, - peer_ids, - max_blobs_per_block, } } - // TODO: This function should be deprecated when simplying the retry mechanism of this range - // requests. - pub fn get_requirements(&self) -> (bool, Option>) { - (self.expects_blobs, self.expects_custody_columns.clone()) + pub fn add_blocks(&mut self, blocks: Vec>>) { + for block in blocks { + self.blocks.push_back(block); + } + self.is_blocks_stream_terminated = true; } - pub fn add_block_response(&mut self, block_opt: Option>>) { - match block_opt { - Some(block) => self.blocks.push_back(block), - None => self.is_blocks_stream_terminated = true, + pub fn add_blobs(&mut self, blobs: Vec>>) { + for blob in blobs { + self.blobs.push_back(blob); } + self.is_sidecars_stream_terminated = true; } - pub fn add_sidecar_response(&mut self, sidecar_opt: Option>>) { - match sidecar_opt { - Some(sidecar) => self.blobs.push_back(sidecar), - None => self.is_sidecars_stream_terminated = true, - } - } - - pub fn add_data_column(&mut self, column_opt: Option>>) { - match column_opt { - Some(column) => self.data_columns.push_back(column), - // TODO(das): this mechanism is dangerous, if somehow there are two requests for the - // same column index it can terminate early. This struct should track that all requests - // for all custody columns terminate. - None => self.custody_columns_streams_terminated += 1, + pub fn add_custody_columns(&mut self, columns: Vec>>) { + for column in columns { + self.data_columns.push_back(column); } + // TODO(das): this mechanism is dangerous, if somehow there are two requests for the + // same column index it can terminate early. This struct should track that all requests + // for all custody columns terminate. + self.custody_columns_streams_terminated += 1; } pub fn into_responses(self, spec: &ChainSpec) -> Result>, String> { if let Some(expects_custody_columns) = self.expects_custody_columns.clone() { self.into_responses_with_custody_columns(expects_custody_columns, spec) } else { - self.into_responses_with_blobs() + self.into_responses_with_blobs(spec) } } - fn into_responses_with_blobs(self) -> Result>, String> { + fn into_responses_with_blobs(self, spec: &ChainSpec) -> Result>, String> { let RangeBlockComponentsRequest { blocks, blobs, .. } = self; // There can't be more more blobs than blocks. i.e. sending any blob (empty @@ -103,7 +89,8 @@ impl RangeBlockComponentsRequest { let mut responses = Vec::with_capacity(blocks.len()); let mut blob_iter = blobs.into_iter().peekable(); for block in blocks.into_iter() { - let mut blob_list = Vec::with_capacity(self.max_blobs_per_block); + let max_blobs_per_block = spec.max_blobs_per_block(block.epoch()) as usize; + let mut blob_list = Vec::with_capacity(max_blobs_per_block); while { let pair_next_blob = blob_iter .peek() @@ -114,7 +101,7 @@ impl RangeBlockComponentsRequest { blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); } - let mut blobs_buffer = vec![None; self.max_blobs_per_block]; + let mut blobs_buffer = vec![None; max_blobs_per_block]; for blob in blob_list { let blob_index = blob.index as usize; let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { @@ -128,7 +115,7 @@ impl RangeBlockComponentsRequest { } let blobs = RuntimeVariableList::new( blobs_buffer.into_iter().flatten().collect::>(), - self.max_blobs_per_block, + max_blobs_per_block, ) .map_err(|_| "Blobs returned exceeds max length".to_string())?; responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) @@ -246,30 +233,25 @@ mod tests { use beacon_chain::test_utils::{ generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, NumBlobs, }; - use lighthouse_network::PeerId; use rand::SeedableRng; - use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; + use std::sync::Arc; + use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E, SignedBeaconBlock}; #[test] fn no_blobs_into_responses() { let spec = test_spec::(); - let peer_id = PeerId::random(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec) .0 + .into() }) - .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; - let mut info = - RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id], max_len); + .collect::>>>(); + let mut info = RangeBlockComponentsRequest::::new(false, None, None); // Send blocks and complete terminate response - for block in blocks { - info.add_block_response(Some(block.into())); - } - info.add_block_response(None); + info.add_blocks(blocks); // Assert response is finished and RpcBlocks can be constructed assert!(info.is_finished()); @@ -279,7 +261,6 @@ mod tests { #[test] fn empty_blobs_into_responses() { let spec = test_spec::(); - let peer_id = PeerId::random(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -291,19 +272,15 @@ mod tests { &spec, ) .0 + .into() }) - .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; - let mut info = - RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id], max_len); + .collect::>>>(); + let mut info = RangeBlockComponentsRequest::::new(true, None, None); // Send blocks and complete terminate response - for block in blocks { - info.add_block_response(Some(block.into())); - } - info.add_block_response(None); + info.add_blocks(blocks); // Expect no blobs returned - info.add_sidecar_response(None); + info.add_blobs(vec![]); // Assert response is finished and RpcBlocks can be constructed, even if blobs weren't returned. // This makes sure we don't expect blobs here when they have expired. Checking this logic should @@ -316,7 +293,6 @@ mod tests { fn rpc_block_with_custody_columns() { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; - let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -328,34 +304,24 @@ mod tests { ) }) .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; let mut info = RangeBlockComponentsRequest::::new( false, Some(expects_custody_columns.clone()), Some(expects_custody_columns.len()), - vec![PeerId::random()], - max_len, ); // Send blocks and complete terminate response - for block in &blocks { - info.add_block_response(Some(block.0.clone().into())); - } - info.add_block_response(None); + info.add_blocks(blocks.iter().map(|b| b.0.clone().into()).collect()); // Assert response is not finished assert!(!info.is_finished()); - // Send data columns interleaved - for block in &blocks { - for column in &block.1 { - if expects_custody_columns.contains(&column.index) { - info.add_data_column(Some(column.clone())); - } - } - } - - // Terminate the requests - for (i, _column_index) in expects_custody_columns.iter().enumerate() { - info.add_data_column(None); + // Send data columns + for (i, &column_index) in expects_custody_columns.iter().enumerate() { + info.add_custody_columns( + blocks + .iter() + .flat_map(|b| b.1.iter().filter(|d| d.index == column_index).cloned()) + .collect(), + ); if i < expects_custody_columns.len() - 1 { assert!( @@ -377,8 +343,21 @@ mod tests { #[test] fn rpc_block_with_custody_columns_batched() { let spec = test_spec::(); - let expects_custody_columns = vec![1, 2, 3, 4]; - let num_of_data_column_requests = 2; + let batched_column_requests = [vec![1_u64, 2], vec![3, 4]]; + let expects_custody_columns = batched_column_requests + .iter() + .flatten() + .cloned() + .collect::>(); + let custody_column_request_ids = + (0..batched_column_requests.len() as u32).collect::>(); + let num_of_data_column_requests = custody_column_request_ids.len(); + + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(num_of_data_column_requests), + ); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) @@ -391,34 +370,25 @@ mod tests { ) }) .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(num_of_data_column_requests), - vec![PeerId::random()], - max_len, - ); + // Send blocks and complete terminate response - for block in &blocks { - info.add_block_response(Some(block.0.clone().into())); - } - info.add_block_response(None); + info.add_blocks(blocks.iter().map(|b| b.0.clone().into()).collect()); // Assert response is not finished assert!(!info.is_finished()); - // Send data columns interleaved - for block in &blocks { - for column in &block.1 { - if expects_custody_columns.contains(&column.index) { - info.add_data_column(Some(column.clone())); - } - } - } + for (i, column_indices) in batched_column_requests.iter().enumerate() { + // Send the set of columns in the same batch request + info.add_custody_columns( + blocks + .iter() + .flat_map(|b| { + b.1.iter() + .filter(|d| column_indices.contains(&d.index)) + .cloned() + }) + .collect::>(), + ); - // Terminate the requests - for i in 0..num_of_data_column_requests { - info.add_data_column(None); if i < num_of_data_column_requests - 1 { assert!( !info.is_finished(), diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index fd91dc78b1..fc31e83727 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -36,7 +36,7 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; use super::network_context::{ - BlockOrBlob, CustodyByRootResult, RangeRequestId, RpcEvent, SyncNetworkContext, + CustodyByRootResult, RangeBlockComponent, RangeRequestId, RpcEvent, SyncNetworkContext, }; use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; @@ -47,7 +47,6 @@ use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, }; -use crate::sync::block_sidecar_coupling::RangeBlockComponentsRequest; use crate::sync::network_context::PeerGroup; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::validator_monitor::timestamp_now; @@ -57,8 +56,9 @@ use beacon_chain::{ use futures::StreamExt; use lighthouse_network::rpc::RPCError; use lighthouse_network::service::api_types::{ - CustodyRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingId, - SamplingRequester, SingleLookupReqId, SyncRequestId, + BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyRequester, + DataColumnsByRangeRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, + SamplingId, SamplingRequester, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -491,36 +491,14 @@ impl SyncManager { SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) } - SyncRequestId::RangeBlockAndBlobs { id } => { - if let Some(sender_id) = self.network.range_request_failed(id) { - match sender_id { - RangeRequestId::RangeSync { chain_id, batch_id } => { - self.range_sync.inject_error( - &mut self.network, - peer_id, - batch_id, - chain_id, - id, - ); - self.update_sync_state(); - } - RangeRequestId::BackfillSync { batch_id } => match self - .backfill_sync - .inject_error(&mut self.network, batch_id, &peer_id, id) - { - Ok(_) => {} - Err(_) => self.update_sync_state(), - }, - } - } else { - debug!( - self.log, - "RPC error for range request has no associated entry in network context, ungraceful disconnect"; - "peer_id" => %peer_id, - "request_id" => %id, - "error" => ?error, - ); - } + SyncRequestId::BlocksByRange(req_id) => { + self.on_blocks_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) + } + SyncRequestId::BlobsByRange(req_id) => { + self.on_blobs_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) + } + SyncRequestId::DataColumnsByRange(req_id) => { + self.on_data_columns_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) } } } @@ -1051,14 +1029,13 @@ impl SyncManager { SyncRequestId::SingleBlock { id } => self.on_single_block_response( id, peer_id, - match block { - Some(block) => RpcEvent::Response(block, seen_timestamp), - None => RpcEvent::StreamTermination, - }, + RpcEvent::from_chunk(block, seen_timestamp), + ), + SyncRequestId::BlocksByRange(id) => self.on_blocks_by_range_response( + id, + peer_id, + RpcEvent::from_chunk(block, seen_timestamp), ), - SyncRequestId::RangeBlockAndBlobs { id } => { - self.range_block_and_blobs_response(id, peer_id, block.into()) - } _ => { crit!(self.log, "bad request id for block"; "peer_id" => %peer_id ); } @@ -1094,14 +1071,13 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => self.on_single_blob_response( id, peer_id, - match blob { - Some(blob) => RpcEvent::Response(blob, seen_timestamp), - None => RpcEvent::StreamTermination, - }, + RpcEvent::from_chunk(blob, seen_timestamp), + ), + SyncRequestId::BlobsByRange(id) => self.on_blobs_by_range_response( + id, + peer_id, + RpcEvent::from_chunk(blob, seen_timestamp), ), - SyncRequestId::RangeBlockAndBlobs { id } => { - self.range_block_and_blobs_response(id, peer_id, blob.into()) - } _ => { crit!(self.log, "bad request id for blob"; "peer_id" => %peer_id); } @@ -1120,19 +1096,14 @@ impl SyncManager { self.on_data_columns_by_root_response( req_id, peer_id, - match data_column { - Some(data_column) => RpcEvent::Response(data_column, seen_timestamp), - None => RpcEvent::StreamTermination, - }, - ); - } - SyncRequestId::RangeBlockAndBlobs { id } => { - self.range_block_and_blobs_response( - id, - peer_id, - BlockOrBlob::CustodyColumns(data_column), + RpcEvent::from_chunk(data_column, seen_timestamp), ); } + SyncRequestId::DataColumnsByRange(id) => self.on_data_columns_by_range_response( + id, + peer_id, + RpcEvent::from_chunk(data_column, seen_timestamp), + ), _ => { crit!(self.log, "bad request id for data_column"; "peer_id" => %peer_id); } @@ -1188,6 +1159,54 @@ impl SyncManager { } } + fn on_blocks_by_range_response( + &mut self, + id: BlocksByRangeRequestId, + peer_id: PeerId, + block: RpcEvent>>, + ) { + if let Some(resp) = self.network.on_blocks_by_range_response(id, peer_id, block) { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::Block(resp), + ); + } + } + + fn on_blobs_by_range_response( + &mut self, + id: BlobsByRangeRequestId, + peer_id: PeerId, + blob: RpcEvent>>, + ) { + if let Some(resp) = self.network.on_blobs_by_range_response(id, peer_id, blob) { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::Blob(resp), + ); + } + } + + fn on_data_columns_by_range_response( + &mut self, + id: DataColumnsByRangeRequestId, + peer_id: PeerId, + data_column: RpcEvent>>, + ) { + if let Some(resp) = self + .network + .on_data_columns_by_range_response(id, peer_id, data_column) + { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::CustodyColumns(resp), + ); + } + } + fn on_custody_by_root_result( &mut self, requester: CustodyRequester, @@ -1230,27 +1249,26 @@ impl SyncManager { /// Handles receiving a response for a range sync request that should have both blocks and /// blobs. - fn range_block_and_blobs_response( + fn on_range_components_response( &mut self, - id: Id, + range_request_id: ComponentsByRangeRequestId, peer_id: PeerId, - block_or_blob: BlockOrBlob, + range_block_component: RangeBlockComponent, ) { if let Some(resp) = self .network - .range_block_and_blob_response(id, block_or_blob) + .range_block_component_response(range_request_id, range_block_component) { - let epoch = resp.sender_id.batch_id(); - match resp.responses { + match resp { Ok(blocks) => { - match resp.sender_id { + match range_request_id.requester { RangeRequestId::RangeSync { chain_id, batch_id } => { self.range_sync.blocks_by_range_response( &mut self.network, peer_id, chain_id, batch_id, - id, + range_request_id.id, blocks, ); self.update_sync_state(); @@ -1260,7 +1278,7 @@ impl SyncManager { &mut self.network, batch_id, &peer_id, - id, + range_request_id.id, blocks, ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), @@ -1274,36 +1292,25 @@ impl SyncManager { } } } - Err(e) => { - // Re-insert the request so we can retry - self.network.insert_range_blocks_and_blobs_request( - id, - resp.sender_id, - RangeBlockComponentsRequest::new( - resp.expects_blobs, - resp.expects_custody_columns, - None, - vec![], - self.chain.spec.max_blobs_per_block(epoch) as usize, - ), - ); - // inform range that the request needs to be treated as failed - // With time we will want to downgrade this log - warn!( - self.log, - "Blocks and blobs request for range received invalid data"; - "peer_id" => %peer_id, - "sender_id" => ?resp.sender_id, - "error" => e.clone() - ); - let id = SyncRequestId::RangeBlockAndBlobs { id }; - self.network.report_peer( - peer_id, - PeerAction::MidToleranceError, - "block_blob_faulty_batch", - ); - self.inject_error(peer_id, id, RPCError::InvalidData(e)) - } + Err(_) => match range_request_id.requester { + RangeRequestId::RangeSync { chain_id, batch_id } => { + self.range_sync.inject_error( + &mut self.network, + peer_id, + batch_id, + chain_id, + range_request_id.id, + ); + self.update_sync_state(); + } + RangeRequestId::BackfillSync { batch_id } => match self + .backfill_sync + .inject_error(&mut self.network, batch_id, &peer_id, range_request_id.id) + { + Ok(_) => {} + Err(_) => self.update_sync_state(), + }, + }, } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 4135f901b1..0cd21de7f4 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -5,7 +5,7 @@ use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; -use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; +use super::range_sync::ByRangeRequestType; use super::SyncMessage; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; @@ -17,13 +17,12 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{ - BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, - OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, -}; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; +pub use lighthouse_network::service::api_types::RangeRequestId; use lighthouse_network::service::api_types::{ - AppRequestId, CustodyId, CustodyRequester, DataColumnsByRootRequestId, + AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, + CustodyId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; @@ -32,8 +31,8 @@ use rand::prelude::IteratorRandom; use rand::thread_rng; pub use requests::LookupVerifyError; use requests::{ - ActiveRequests, BlobsByRootRequestItems, BlocksByRootRequestItems, - DataColumnsByRootRequestItems, + ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, + BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, }; use slog::{debug, error, warn}; use std::collections::hash_map::Entry; @@ -50,33 +49,6 @@ use types::{ pub mod custody; mod requests; -pub struct BlocksAndBlobsByRangeResponse { - pub sender_id: RangeRequestId, - pub responses: Result>, String>, - pub expects_blobs: bool, - pub expects_custody_columns: Option>, -} - -#[derive(Debug, Clone, Copy)] -pub enum RangeRequestId { - RangeSync { - chain_id: ChainId, - batch_id: BatchId, - }, - BackfillSync { - batch_id: BatchId, - }, -} - -impl RangeRequestId { - pub fn batch_id(&self) -> BatchId { - match self { - RangeRequestId::RangeSync { batch_id, .. } => *batch_id, - RangeRequestId::BackfillSync { batch_id, .. } => *batch_id, - } - } -} - #[derive(Debug)] pub enum RpcEvent { StreamTermination, @@ -84,6 +56,15 @@ pub enum RpcEvent { RPCError(RPCError), } +impl RpcEvent { + pub fn from_chunk(chunk: Option, seen_timestamp: Duration) -> Self { + match chunk { + Some(item) => RpcEvent::Response(item, seen_timestamp), + None => RpcEvent::StreamTermination, + } + } +} + pub type RpcResponseResult = Result<(T, Duration), RpcResponseError>; pub type CustodyByRootResult = Result<(DataColumnSidecarList, PeerGroup), RpcResponseError>; @@ -93,6 +74,7 @@ pub enum RpcResponseError { RpcError(RPCError), VerifyError(LookupVerifyError), CustodyRequestError(CustodyRequestError), + BlockComponentCouplingError(String), } #[derive(Debug, PartialEq, Eq)] @@ -110,16 +92,6 @@ pub enum SendErrorProcessor { ProcessorNotAvailable, } -impl std::fmt::Display for RpcResponseError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - RpcResponseError::RpcError(e) => write!(f, "RPC Error: {:?}", e), - RpcResponseError::VerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), - RpcResponseError::CustodyRequestError(e) => write!(f, "Custody Request Error: {:?}", e), - } - } -} - impl From for RpcResponseError { fn from(e: RPCError) -> Self { RpcResponseError::RpcError(e) @@ -199,13 +171,22 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRoot requests data_columns_by_root_requests: ActiveRequests>, + /// A mapping of active BlocksByRange requests + blocks_by_range_requests: + ActiveRequests>, + /// A mapping of active BlobsByRange requests + blobs_by_range_requests: + ActiveRequests>, + /// A mapping of active DataColumnsByRange requests + data_columns_by_range_requests: + ActiveRequests>, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, - /// BlocksByRange requests paired with BlobsByRange - range_block_components_requests: - FnvHashMap)>, + /// BlocksByRange requests paired with other ByRange requests for data components + components_by_range_requests: + FnvHashMap>, /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. @@ -223,22 +204,10 @@ pub struct SyncNetworkContext { } /// Small enumeration to make dealing with block and blob requests easier. -pub enum BlockOrBlob { - Block(Option>>), - Blob(Option>>), - CustodyColumns(Option>>), -} - -impl From>>> for BlockOrBlob { - fn from(block: Option>>) -> Self { - BlockOrBlob::Block(block) - } -} - -impl From>>> for BlockOrBlob { - fn from(blob: Option>>) -> Self { - BlockOrBlob::Blob(blob) - } +pub enum RangeBlockComponent { + Block(RpcResponseResult>>>), + Blob(RpcResponseResult>>>), + CustodyColumns(RpcResponseResult>>>), } impl SyncNetworkContext { @@ -256,8 +225,11 @@ impl SyncNetworkContext { blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), + blocks_by_range_requests: ActiveRequests::new("blocks_by_range"), + blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), + data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), custody_by_root_requests: <_>::default(), - range_block_components_requests: FnvHashMap::default(), + components_by_range_requests: FnvHashMap::default(), network_beacon_processor, chain, fork_context, @@ -272,37 +244,60 @@ impl SyncNetworkContext { /// Returns the ids of all the requests made to the given peer_id. pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { - let failed_range_ids = - self.range_block_components_requests - .iter() - .filter_map(|(id, request)| { - if request.1.peer_ids.contains(peer_id) { - Some(SyncRequestId::RangeBlockAndBlobs { id: *id }) - } else { - None - } - }); + // Note: using destructuring pattern without a default case to make sure we don't forget to + // add new request types to this function. Otherwise, lookup sync can break and lookups + // will get stuck if a peer disconnects during an active requests. + let Self { + network_send: _, + request_id: _, + blocks_by_root_requests, + blobs_by_root_requests, + data_columns_by_root_requests, + blocks_by_range_requests, + blobs_by_range_requests, + data_columns_by_range_requests, + // custody_by_root_requests is a meta request of data_columns_by_root_requests + custody_by_root_requests: _, + // components_by_range_requests is a meta request of various _by_range requests + components_by_range_requests: _, + execution_engine_state: _, + network_beacon_processor: _, + chain: _, + fork_context: _, + log: _, + } = self; - let failed_block_ids = self - .blocks_by_root_requests + let blocks_by_root_ids = blocks_by_root_requests .active_requests_of_peer(peer_id) .into_iter() .map(|id| SyncRequestId::SingleBlock { id: *id }); - let failed_blob_ids = self - .blobs_by_root_requests + let blobs_by_root_ids = blobs_by_root_requests .active_requests_of_peer(peer_id) .into_iter() .map(|id| SyncRequestId::SingleBlob { id: *id }); - let failed_data_column_by_root_ids = self - .data_columns_by_root_requests + let data_column_by_root_ids = data_columns_by_root_requests .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); + let blocks_by_range_ids = blocks_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::BlocksByRange(*req_id)); + let blobs_by_range_ids = blobs_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::BlobsByRange(*req_id)); + let data_column_by_range_ids = data_columns_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::DataColumnsByRange(*req_id)); - failed_range_ids - .chain(failed_block_ids) - .chain(failed_blob_ids) - .chain(failed_data_column_by_root_ids) + blocks_by_root_ids + .chain(blobs_by_root_ids) + .chain(data_column_by_root_ids) + .chain(blocks_by_range_ids) + .chain(blobs_by_range_ids) + .chain(data_column_by_range_ids) .collect() } @@ -361,117 +356,62 @@ impl SyncNetworkContext { peer_id: PeerId, batch_type: ByRangeRequestType, request: BlocksByRangeRequest, - sender_id: RangeRequestId, + requester: RangeRequestId, ) -> Result { - let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); - let id = self.next_id(); - let mut requested_peers = vec![peer_id]; - debug!( - self.log, - "Sending BlocksByRange request"; - "method" => "BlocksByRange", - "count" => request.count(), - "epoch" => epoch, - "peer" => %peer_id, - "id" => id, - ); - let rpc_request = match request { - BlocksByRangeRequest::V1(ref req) => { - RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { - start_slot: req.start_slot, - count: req.count, - step: 1, - })) - } - BlocksByRangeRequest::V2(ref req) => { - RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { - start_slot: req.start_slot, - count: req.count, - step: 1, - })) - } + // Create the overall components_by_range request ID before its individual components + let id = ComponentsByRangeRequestId { + id: self.next_id(), + requester, }; - self.network_send - .send(NetworkMessage::SendRequest { + + let _blocks_req_id = self.send_blocks_by_range_request(peer_id, request.clone(), id)?; + + let blobs_req_id = if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { + Some(self.send_blobs_by_range_request( peer_id, - request: rpc_request, - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - }) - .map_err(|_| RpcRequestSendError::NetworkSendError)?; - - let expected_blobs = if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { - debug!( - self.log, - "Sending BlobsByRange requests"; - "method" => "BlobsByRange", - "count" => request.count(), - "epoch" => epoch, - "peer" => %peer_id, - ); - - // Create the blob request based on the blocks request. - self.network_send - .send(NetworkMessage::SendRequest { - peer_id, - request: RequestType::BlobsByRange(BlobsByRangeRequest { - start_slot: *request.start_slot(), - count: *request.count(), - }), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - }) - .map_err(|_| RpcRequestSendError::NetworkSendError)?; - true + BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }, + id, + )?) } else { - false + None }; - let (expects_columns, num_of_column_req) = + let (expects_columns, data_column_requests) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { let column_indexes = self.network_globals().sampling_columns.clone(); - let mut num_of_custody_column_req = 0; - for (peer_id, columns_by_range_request) in - self.make_columns_by_range_requests(request, &column_indexes)? - { - requested_peers.push(peer_id); - - debug!( - self.log, - "Sending DataColumnsByRange requests"; - "method" => "DataColumnsByRange", - "count" => columns_by_range_request.count, - "epoch" => epoch, - "columns" => ?columns_by_range_request.columns, - "peer" => %peer_id, - "id" => id, - ); - - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: RequestType::DataColumnsByRange(columns_by_range_request), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + let data_column_requests = self + .make_columns_by_range_requests(request, &column_indexes)? + .into_iter() + .map(|(peer_id, columns_by_range_request)| { + self.send_data_columns_by_range_request( + peer_id, + columns_by_range_request, + id, + ) }) - .map_err(|_| RpcRequestSendError::NetworkSendError)?; + .collect::, _>>()?; - num_of_custody_column_req += 1; - } - - (Some(column_indexes), Some(num_of_custody_column_req)) + ( + Some(column_indexes.into_iter().collect::>()), + Some(data_column_requests), + ) } else { (None, None) }; - let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch); + let expected_blobs = blobs_req_id.is_some(); let info = RangeBlockComponentsRequest::new( expected_blobs, - expects_columns.map(|c| c.into_iter().collect()), - num_of_column_req, - requested_peers, - max_blobs_len as usize, + expects_columns, + data_column_requests.map(|items| items.len()), ); - self.range_block_components_requests - .insert(id, (sender_id, info)); - Ok(id) + self.components_by_range_requests.insert(id, info); + + Ok(id.id) } fn make_columns_by_range_requests( @@ -508,54 +448,43 @@ impl SyncNetworkContext { Ok(peer_id_to_request_map) } - pub fn range_request_failed(&mut self, request_id: Id) -> Option { - let sender_id = self - .range_block_components_requests - .remove(&request_id) - .map(|(sender_id, _info)| sender_id); - if let Some(sender_id) = sender_id { - debug!( - self.log, - "Sync range request failed"; - "request_id" => request_id, - "sender_id" => ?sender_id - ); - Some(sender_id) - } else { - debug!(self.log, "Sync range request failed"; "request_id" => request_id); - None - } - } - /// Received a blocks by range or blobs by range response for a request that couples blocks ' /// and blobs. - pub fn range_block_and_blob_response( + pub fn range_block_component_response( &mut self, - request_id: Id, - block_or_blob: BlockOrBlob, - ) -> Option> { - let Entry::Occupied(mut entry) = self.range_block_components_requests.entry(request_id) - else { + id: ComponentsByRangeRequestId, + range_block_component: RangeBlockComponent, + ) -> Option>, RpcResponseError>> { + let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); return None; }; - let (_, info) = entry.get_mut(); - match block_or_blob { - BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), - BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), - BlockOrBlob::CustodyColumns(column) => info.add_data_column(column), + if let Err(e) = { + let request = entry.get_mut(); + match range_block_component { + RangeBlockComponent::Block(resp) => resp.map(|(blocks, _)| { + request.add_blocks(blocks); + }), + RangeBlockComponent::Blob(resp) => resp.map(|(blobs, _)| { + request.add_blobs(blobs); + }), + RangeBlockComponent::CustodyColumns(resp) => resp.map(|(custody_columns, _)| { + request.add_custody_columns(custody_columns); + }), + } + } { + entry.remove(); + return Some(Err(e)); } - if info.is_finished() { + + if entry.get_mut().is_finished() { // If the request is finished, dequeue everything - let (sender_id, info) = entry.remove(); - let (expects_blobs, expects_custody_columns) = info.get_requirements(); - Some(BlocksAndBlobsByRangeResponse { - sender_id, - responses: info.into_responses(&self.chain.spec), - expects_blobs, - expects_custody_columns, - }) + let request = entry.remove(); + let blocks = request + .into_responses(&self.chain.spec) + .map_err(RpcResponseError::BlockComponentCouplingError); + Some(blocks) } else { None } @@ -831,6 +760,125 @@ impl SyncNetworkContext { } } + fn send_blocks_by_range_request( + &mut self, + peer_id: PeerId, + request: BlocksByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + ) -> Result { + let id = BlocksByRangeRequestId { + id: self.next_id(), + parent_request_id, + }; + debug!( + self.log, + "Sending BlocksByRange request"; + "method" => "BlocksByRange", + "count" => request.count(), + "epoch" => Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()), + "peer" => %peer_id, + "id" => ?id, + ); + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlocksByRange(request.clone().into()), + request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; + + self.blocks_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know if there are missed blocks. + false, + BlocksByRangeRequestItems::new(request), + ); + Ok(id) + } + + fn send_blobs_by_range_request( + &mut self, + peer_id: PeerId, + request: BlobsByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + ) -> Result { + let id = BlobsByRangeRequestId { + id: self.next_id(), + parent_request_id, + }; + let request_epoch = Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()); + debug!( + self.log, + "Sending BlobsByRange requests"; + "method" => "BlobsByRange", + "count" => request.count, + "epoch" => request_epoch, + "peer" => %peer_id, + "id" => ?id, + ); + + // Create the blob request based on the blocks request. + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlobsByRange(request.clone()), + request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; + + let max_blobs_per_block = self.chain.spec.max_blobs_per_block(request_epoch); + self.blobs_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know if there are missed blocks. + false, + BlobsByRangeRequestItems::new(request, max_blobs_per_block), + ); + Ok(id) + } + + fn send_data_columns_by_range_request( + &mut self, + peer_id: PeerId, + request: DataColumnsByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + ) -> Result { + let id = DataColumnsByRangeRequestId { + id: self.next_id(), + parent_request_id, + }; + debug!( + self.log, + "Sending DataColumnsByRange requests"; + "method" => "DataColumnsByRange", + "count" => request.count, + "epoch" => Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), + "columns" => ?request.columns, + "peer" => %peer_id, + "id" => ?id, + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: RequestType::DataColumnsByRange(request.clone()), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; + + self.data_columns_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know if there are missed blocks. + false, + DataColumnsByRangeRequestItems::new(request), + ); + Ok(id) + } + pub fn is_execution_engine_online(&self) -> bool { self.execution_engine_state == EngineState::Online } @@ -929,16 +977,6 @@ impl SyncNetworkContext { } } - pub fn insert_range_blocks_and_blobs_request( - &mut self, - id: Id, - sender_id: RangeRequestId, - info: RangeBlockComponentsRequest, - ) { - self.range_block_components_requests - .insert(id, (sender_id, info)); - } - /// Attempt to make progress on all custody_by_root requests. Some request may be stale waiting /// for custody peers. Returns a Vec of results as zero or more requests may fail in this /// attempt. @@ -1037,6 +1075,41 @@ impl SyncNetworkContext { self.report_rpc_response_errors(resp, peer_id) } + #[allow(clippy::type_complexity)] + pub(crate) fn on_blocks_by_range_response( + &mut self, + id: BlocksByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>>>> { + let resp = self.blocks_by_range_requests.on_response(id, rpc_event); + self.report_rpc_response_errors(resp, peer_id) + } + + #[allow(clippy::type_complexity)] + pub(crate) fn on_blobs_by_range_response( + &mut self, + id: BlobsByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>>>> { + let resp = self.blobs_by_range_requests.on_response(id, rpc_event); + self.report_rpc_response_errors(resp, peer_id) + } + + #[allow(clippy::type_complexity)] + pub(crate) fn on_data_columns_by_range_response( + &mut self, + id: DataColumnsByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>> { + let resp = self + .data_columns_by_range_requests + .on_response(id, rpc_event); + self.report_rpc_response_errors(resp, peer_id) + } + fn report_rpc_response_errors( &mut self, resp: Option>, @@ -1191,21 +1264,27 @@ impl SyncNetworkContext { } pub(crate) fn register_metrics(&self) { - metrics::set_gauge_vec( - &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, - &["blocks_by_root"], - self.blocks_by_root_requests.len() as i64, - ); - metrics::set_gauge_vec( - &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, - &["blobs_by_root"], - self.blobs_by_root_requests.len() as i64, - ); - metrics::set_gauge_vec( - &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, - &["range_blocks"], - self.range_block_components_requests.len() as i64, - ); + for (id, count) in [ + ("blocks_by_root", self.blocks_by_root_requests.len()), + ("blobs_by_root", self.blobs_by_root_requests.len()), + ( + "data_columns_by_root", + self.data_columns_by_root_requests.len(), + ), + ("blocks_by_range", self.blocks_by_range_requests.len()), + ("blobs_by_range", self.blobs_by_range_requests.len()), + ( + "data_columns_by_range", + self.data_columns_by_range_requests.len(), + ), + ("custody_by_root", self.custody_by_root_requests.len()), + ( + "components_by_range", + self.components_by_range_requests.len(), + ), + ] { + metrics::set_gauge_vec(&metrics::SYNC_ACTIVE_NETWORK_REQUESTS, &[id], count as i64); + } } } diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 4a5a16459d..c9b85e47b6 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -4,10 +4,13 @@ use beacon_chain::validator_monitor::timestamp_now; use fnv::FnvHashMap; use lighthouse_network::PeerId; use strum::IntoStaticStr; -use types::Hash256; +use types::{Hash256, Slot}; +pub use blobs_by_range::BlobsByRangeRequestItems; pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; +pub use blocks_by_range::BlocksByRangeRequestItems; pub use blocks_by_root::{BlocksByRootRequestItems, BlocksByRootSingleRequest}; +pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; @@ -16,8 +19,11 @@ use crate::metrics; use super::{RpcEvent, RpcResponseResult}; +mod blobs_by_range; mod blobs_by_root; +mod blocks_by_range; mod blocks_by_root; +mod data_columns_by_range; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -26,8 +32,9 @@ pub enum LookupVerifyError { TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), + UnrequestedSlot(Slot), InvalidInclusionProof, - DuplicateData, + DuplicatedData(Slot, u64), InternalError(String), } diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_range.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_range.rs new file mode 100644 index 0000000000..9c6f516199 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_range.rs @@ -0,0 +1,56 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use std::sync::Arc; +use types::{BlobSidecar, EthSpec}; + +/// Accumulates results of a blobs_by_range request. Only returns items after receiving the +/// stream termination. +pub struct BlobsByRangeRequestItems { + request: BlobsByRangeRequest, + items: Vec>>, + max_blobs_per_block: u64, +} + +impl BlobsByRangeRequestItems { + pub fn new(request: BlobsByRangeRequest, max_blobs_per_block: u64) -> Self { + Self { + request, + items: vec![], + max_blobs_per_block, + } + } +} + +impl ActiveRequestItems for BlobsByRangeRequestItems { + type Item = Arc>; + + fn add(&mut self, blob: Self::Item) -> Result { + if blob.slot() < self.request.start_slot + || blob.slot() >= self.request.start_slot + self.request.count + { + return Err(LookupVerifyError::UnrequestedSlot(blob.slot())); + } + if blob.index >= self.max_blobs_per_block { + return Err(LookupVerifyError::UnrequestedIndex(blob.index)); + } + if !blob.verify_blob_sidecar_inclusion_proof() { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if self + .items + .iter() + .any(|existing| existing.slot() == blob.slot() && existing.index == blob.index) + { + return Err(LookupVerifyError::DuplicatedData(blob.slot(), blob.index)); + } + + self.items.push(blob); + + // Skip check if blobs are ready as it's rare that all blocks have max blobs + Ok(false) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index a670229884..547c51198e 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -57,7 +57,7 @@ impl ActiveRequestItems for BlobsByRootRequestItems { return Err(LookupVerifyError::UnrequestedIndex(blob.index)); } if self.items.iter().any(|b| b.index == blob.index) { - return Err(LookupVerifyError::DuplicateData); + return Err(LookupVerifyError::DuplicatedData(blob.slot(), blob.index)); } self.items.push(blob); diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_range.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_range.rs new file mode 100644 index 0000000000..c7d2dda01e --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_range.rs @@ -0,0 +1,48 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::BlocksByRangeRequest; +use std::sync::Arc; +use types::{EthSpec, SignedBeaconBlock}; + +/// Accumulates results of a blocks_by_range request. Only returns items after receiving the +/// stream termination. +pub struct BlocksByRangeRequestItems { + request: BlocksByRangeRequest, + items: Vec>>, +} + +impl BlocksByRangeRequestItems { + pub fn new(request: BlocksByRangeRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for BlocksByRangeRequestItems { + type Item = Arc>; + + fn add(&mut self, block: Self::Item) -> Result { + if block.slot().as_u64() < *self.request.start_slot() + || block.slot().as_u64() >= self.request.start_slot() + self.request.count() + { + return Err(LookupVerifyError::UnrequestedSlot(block.slot())); + } + if self + .items + .iter() + .any(|existing| existing.slot() == block.slot()) + { + // DuplicatedData is a common error for all components, default index to 0 + return Err(LookupVerifyError::DuplicatedData(block.slot(), 0)); + } + + self.items.push(block); + + Ok(self.items.len() >= *self.request.count() as usize) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs new file mode 100644 index 0000000000..9dabb2defa --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs @@ -0,0 +1,54 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::methods::DataColumnsByRangeRequest; +use std::sync::Arc; +use types::{DataColumnSidecar, EthSpec}; + +/// Accumulates results of a data_columns_by_range request. Only returns items after receiving the +/// stream termination. +pub struct DataColumnsByRangeRequestItems { + request: DataColumnsByRangeRequest, + items: Vec>>, +} + +impl DataColumnsByRangeRequestItems { + pub fn new(request: DataColumnsByRangeRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for DataColumnsByRangeRequestItems { + type Item = Arc>; + + fn add(&mut self, data_column: Self::Item) -> Result { + if data_column.slot() < self.request.start_slot + || data_column.slot() >= self.request.start_slot + self.request.count + { + return Err(LookupVerifyError::UnrequestedSlot(data_column.slot())); + } + if !self.request.columns.contains(&data_column.index) { + return Err(LookupVerifyError::UnrequestedIndex(data_column.index)); + } + if !data_column.verify_inclusion_proof() { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if self.items.iter().any(|existing| { + existing.slot() == data_column.slot() && existing.index == data_column.index + }) { + return Err(LookupVerifyError::DuplicatedData( + data_column.slot(), + data_column.index, + )); + } + + self.items.push(data_column); + + Ok(self.items.len() >= self.request.count as usize * self.request.columns.len()) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs index 1b8d46ff07..4e02737f08 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -57,7 +57,10 @@ impl ActiveRequestItems for DataColumnsByRootRequestItems { return Err(LookupVerifyError::UnrequestedIndex(data_column.index)); } if self.items.iter().any(|d| d.index == data_column.index) { - return Err(LookupVerifyError::DuplicateData); + return Err(LookupVerifyError::DuplicatedData( + data_column.slot(), + data_column.index, + )); } self.items.push(data_column); diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 53fb55b14d..818fde07b8 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,4 +1,4 @@ -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::RpcBlock; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; @@ -277,36 +277,6 @@ impl BatchInfo { > { match self.state.poison() { BatchState::Downloading(peer, _request_id) => { - // verify that blocks are in range - if let Some(last_slot) = blocks.last().map(|b| b.slot()) { - // the batch is non-empty - let first_slot = blocks[0].slot(); - - let failed_range = if first_slot < self.start_slot { - Some((self.start_slot, first_slot)) - } else if self.end_slot < last_slot { - Some((self.end_slot, last_slot)) - } else { - None - }; - - if let Some((expected, received)) = failed_range { - // this is a failed download, register the attempt and check if the batch - // can be tried again - self.failed_download_attempts.push(peer); - self.state = if self.failed_download_attempts.len() - >= B::max_batch_download_attempts() as usize - { - BatchState::Failed - } else { - // drop the blocks - BatchState::AwaitingDownload - }; - - return Err(Ok((expected, received, self.outcome()))); - } - } - let received = blocks.len(); self.state = BatchState::AwaitingProcessing(peer, blocks, Instant::now()); Ok(received) diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index cfd89f7b44..f78b44308d 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -11,7 +11,10 @@ use lighthouse_network::rpc::methods::{ OldBlocksByRangeRequestV2, }; use lighthouse_network::rpc::{RequestType, StatusMessage}; -use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; +use lighthouse_network::service::api_types::{ + AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, + SyncRequestId, +}; use lighthouse_network::{PeerId, SyncInfo}; use std::time::Duration; use types::{ @@ -28,8 +31,8 @@ pub(crate) enum DataSidecars { enum ByRangeDataRequestIds { PreDeneb, - PrePeerDAS(Id, PeerId), - PostPeerDAS(Vec<(Id, PeerId)>), + PrePeerDAS(BlobsByRangeRequestId, PeerId), + PostPeerDAS(Vec<(DataColumnsByRangeRequestId, PeerId)>), } /// Sync tests are usually written in the form: @@ -151,7 +154,7 @@ impl TestRig { fn find_blocks_by_range_request( &mut self, request_filter: RequestFilter, - ) -> ((Id, PeerId), ByRangeDataRequestIds) { + ) -> ((BlocksByRangeRequestId, PeerId), ByRangeDataRequestIds) { let filter_f = |peer: PeerId, start_slot: u64| { if let Some(expected_epoch) = request_filter.epoch { let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); @@ -175,7 +178,7 @@ impl TestRig { RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( OldBlocksByRangeRequestV2 { start_slot, .. }, )), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) @@ -190,7 +193,7 @@ impl TestRig { RequestType::DataColumnsByRange(DataColumnsByRangeRequest { start_slot, .. }), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) { @@ -206,7 +209,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id, request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot, .. }), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) @@ -225,10 +228,10 @@ impl TestRig { // Complete the request with a single stream termination self.log(&format!( - "Completing BlocksByRange request {blocks_req_id} with empty stream" + "Completing BlocksByRange request {blocks_req_id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::RangeBlockAndBlobs { id: blocks_req_id }, + request_id: SyncRequestId::BlocksByRange(blocks_req_id), peer_id: block_peer, beacon_block: None, seen_timestamp: D, @@ -239,10 +242,10 @@ impl TestRig { ByRangeDataRequestIds::PrePeerDAS(id, peer_id) => { // Complete the request with a single stream termination self.log(&format!( - "Completing BlobsByRange request {id} with empty stream" + "Completing BlobsByRange request {id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::RangeBlockAndBlobs { id }, + request_id: SyncRequestId::BlobsByRange(id), peer_id, blob_sidecar: None, seen_timestamp: D, @@ -252,10 +255,10 @@ impl TestRig { // Complete the request with a single stream termination for (id, peer_id) in data_column_req_ids { self.log(&format!( - "Completing DataColumnsByRange request {id} with empty stream" + "Completing DataColumnsByRange request {id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcDataColumn { - request_id: SyncRequestId::RangeBlockAndBlobs { id }, + request_id: SyncRequestId::DataColumnsByRange(id), peer_id, data_column: None, seen_timestamp: D, From a4e3f361bfae83d779a1e09dd4581d01686ff33a Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Thu, 6 Feb 2025 10:49:51 +0530 Subject: [PATCH 36/52] Update metrics.rs (#6863) Fixes #5206, a low-hanging fruit. --- beacon_node/beacon_chain/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ae3add7f03..295adf11fe 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -108,7 +108,7 @@ pub static BLOCK_PROCESSING_POST_EXEC_PROCESSING: LazyLock> = try_create_histogram_with_buckets( "beacon_block_processing_post_exec_pre_attestable_seconds", "Time between finishing execution processing and the block becoming attestable", - linear_buckets(5e-3, 5e-3, 10), + linear_buckets(0.01, 0.01, 15), ) }); pub static BLOCK_PROCESSING_DATA_COLUMNS_WAIT: LazyLock> = LazyLock::new(|| { From 364a978f12c85ed4e263311e34f814b0ae506932 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 6 Feb 2025 18:08:20 +1100 Subject: [PATCH 37/52] Fix attestation queue length metric (#6924) We were using the wrong queue length for attestation work event metrics. --- beacon_node/beacon_processor/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 07d2a90df9..92f4636c95 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -1430,7 +1430,7 @@ impl BeaconProcessor { if let Some(modified_queue_id) = modified_queue_id { let queue_len = match modified_queue_id { - WorkType::GossipAttestation => aggregate_queue.len(), + WorkType::GossipAttestation => attestation_queue.len(), WorkType::UnknownBlockAttestation => unknown_block_attestation_queue.len(), WorkType::GossipAttestationBatch => 0, // No queue WorkType::GossipAggregate => aggregate_queue.len(), From 9c45a0e8c1e236e34ba8dd4aa6aae190fc99ea27 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 7 Feb 2025 15:53:14 +1100 Subject: [PATCH 38/52] Use old geth version due to breaking changes. (#6936) A temporary workaround for the failing execution tests for geth. https://github.com/sigp/lighthouse/actions/runs/13192297954 The test is broken due to the following breaking changes in geth that requires updating our tests: 1. removal of `personal` namespace in v1.14.12: See #30704 2. removal of `totalDifficulty` field from RPC in v1.14.11. See #30386. Using an older version for now (` 1.14.10`) as we need to get things merged for the upcoming release. Will create a separate issue to fix this. --- testing/execution_engine_integration/src/geth.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 0bd96a5c93..ea143ed433 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,7 +7,10 @@ use std::{env, fs}; use tempfile::TempDir; use unused_port::unused_tcp4_port; -const GETH_BRANCH: &str = "master"; +// This is not currently used due to the following breaking changes in geth that requires updating our tests: +// 1. removal of `personal` namespace in v1.14.12: See #30704 +// 2. removal of `totalDifficulty` field from RPC in v1.14.11. See #30386. +// const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -27,12 +30,14 @@ pub fn build(execution_clients_dir: &Path) { } // Get the latest tag on the branch - let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); - build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); + // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + // Using an older release due to breaking changes in recent releases. See comment on `GETH_BRANCH` const. + let release_tag = "v1.14.10"; + build_utils::checkout(&repo_dir, dbg!(release_tag)).unwrap(); // Build geth build_utils::check_command_output(build_result(&repo_dir), || { - format!("geth make failed using release {last_release}") + format!("geth make failed using release {release_tag}") }); } From 59afe41d61cb8fd5eb06c229c292097bae0b614d Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 7 Feb 2025 02:38:36 -0300 Subject: [PATCH 39/52] Reduce ForkName boilerplate in fork-context (#6933) Noted that there's a bit of fork boiler plate in fork context. If we list a mapping of ForkName -> fork_version in the ForkName enum we can get rid of it :) Not much, but should make the next fork a tiny tit less annoying --- consensus/types/src/fork_context.rs | 71 +++++++---------------------- consensus/types/src/fork_name.rs | 39 ++++++++++++---- 2 files changed, 47 insertions(+), 63 deletions(-) diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 33f1c51d44..8f3632d895 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -22,61 +22,22 @@ impl ForkContext { genesis_validators_root: Hash256, spec: &ChainSpec, ) -> Self { - let mut fork_to_digest = vec![( - ForkName::Base, - ChainSpec::compute_fork_digest(spec.genesis_fork_version, genesis_validators_root), - )]; - - // Only add Altair to list of forks if it's enabled - // Note: `altair_fork_epoch == None` implies altair hasn't been activated yet on the config. - if spec.altair_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Altair, - ChainSpec::compute_fork_digest(spec.altair_fork_version, genesis_validators_root), - )); - } - - // Only add Bellatrix to list of forks if it's enabled - // Note: `bellatrix_fork_epoch == None` implies bellatrix hasn't been activated yet on the config. - if spec.bellatrix_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Bellatrix, - ChainSpec::compute_fork_digest( - spec.bellatrix_fork_version, - genesis_validators_root, - ), - )); - } - - if spec.capella_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Capella, - ChainSpec::compute_fork_digest(spec.capella_fork_version, genesis_validators_root), - )); - } - - if spec.deneb_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Deneb, - ChainSpec::compute_fork_digest(spec.deneb_fork_version, genesis_validators_root), - )); - } - - if spec.electra_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Electra, - ChainSpec::compute_fork_digest(spec.electra_fork_version, genesis_validators_root), - )); - } - - if spec.fulu_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Fulu, - ChainSpec::compute_fork_digest(spec.fulu_fork_version, genesis_validators_root), - )); - } - - let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); + let fork_to_digest: HashMap = ForkName::list_all() + .into_iter() + .filter_map(|fork| { + if fork.fork_epoch(spec).is_some() { + Some(( + fork, + ChainSpec::compute_fork_digest( + ForkName::fork_version(fork, spec), + genesis_validators_root, + ), + )) + } else { + None + } + }) + .collect(); let digest_to_fork = fork_to_digest .clone() diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 40557e0cb9..4a46baf57f 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -34,14 +34,37 @@ impl ForkName { } pub fn list_all_fork_epochs(spec: &ChainSpec) -> Vec<(ForkName, Option)> { - vec![ - (ForkName::Altair, spec.altair_fork_epoch), - (ForkName::Bellatrix, spec.bellatrix_fork_epoch), - (ForkName::Capella, spec.capella_fork_epoch), - (ForkName::Deneb, spec.deneb_fork_epoch), - (ForkName::Electra, spec.electra_fork_epoch), - (ForkName::Fulu, spec.fulu_fork_epoch), - ] + ForkName::list_all() + .into_iter() + // Skip Base + .skip(1) + .map(|fork| (fork, fork.fork_epoch(spec))) + .collect() + } + + pub fn fork_epoch(self, spec: &ChainSpec) -> Option { + match self { + Self::Base => Some(Epoch::new(0)), + Self::Altair => spec.altair_fork_epoch, + Self::Bellatrix => spec.bellatrix_fork_epoch, + Self::Capella => spec.capella_fork_epoch, + Self::Deneb => spec.deneb_fork_epoch, + Self::Electra => spec.electra_fork_epoch, + Self::Fulu => spec.fulu_fork_epoch, + } + } + + /// Returns the fork version of a fork + pub fn fork_version(self, spec: &ChainSpec) -> [u8; 4] { + match self { + Self::Base => spec.genesis_fork_version, + Self::Altair => spec.altair_fork_version, + Self::Bellatrix => spec.bellatrix_fork_version, + Self::Capella => spec.capella_fork_version, + Self::Deneb => spec.deneb_fork_version, + Self::Electra => spec.electra_fork_version, + Self::Fulu => spec.fulu_fork_version, + } } pub fn latest() -> ForkName { From 7408719de8449a6441bbab5fcd27431f55df7d3d Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Fri, 7 Feb 2025 16:48:52 +0900 Subject: [PATCH 40/52] Remove unused metrics (#6817) N/A Removed metrics that were defined but not used anywhere. --- beacon_node/beacon_chain/src/metrics.rs | 55 ------------------------- 1 file changed, 55 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 295adf11fe..d1c7a2a5df 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -85,12 +85,6 @@ pub static BLOCK_PROCESSING_COMMITTEE: LazyLock> = LazyLock::n "Time spent building/obtaining committees for block processing.", ) }); -pub static BLOCK_PROCESSING_SIGNATURE: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_block_processing_signature_seconds", - "Time spent doing signature verification for a block.", - ) -}); pub static BLOCK_PROCESSING_CORE: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_block_processing_core_seconds", @@ -591,12 +585,6 @@ pub static FORK_CHOICE_WRITE_LOCK_AQUIRE_TIMES: LazyLock> = La exponential_buckets(1e-3, 4.0, 7), ) }); -pub static FORK_CHOICE_SET_HEAD_LAG_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_fork_choice_set_head_lag_times", - "Time taken between finding the head and setting the canonical head value", - ) -}); pub static BALANCES_CACHE_HITS: LazyLock> = LazyLock::new(|| { try_create_int_counter( "beacon_balances_cache_hits_total", @@ -651,12 +639,6 @@ pub static DEFAULT_ETH1_VOTES: LazyLock> = LazyLock::new(|| { /* * Chain Head */ -pub static UPDATE_HEAD_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_update_head_seconds", - "Time taken to update the canonical head", - ) -}); pub static HEAD_STATE_SLOT: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "beacon_head_state_slot", @@ -1547,20 +1529,6 @@ pub static SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_OP_POOL: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "beacon_sync_contribution_processing_signature_setup_seconds", - "Time spent on setting up for the signature verification of sync contribution processing" - ) - }); -pub static SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "beacon_sync_contribution_processing_signature_seconds", - "Time spent on the signature verification of sync contribution processing", - ) - }); /* * General Sync Committee Contribution Processing @@ -1690,13 +1658,6 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_int_counter( - "beacon_data_column_sidecar_processing_successes_total", - "Number of data column sidecars verified for gossip", - ) - }); pub static BLOBS_FROM_EL_HIT_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -1873,15 +1834,6 @@ pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock ) }, ); -/* - * Availability related metrics - */ -pub static BLOCK_AVAILABILITY_DELAY: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "block_availability_delay", - "Duration between start of the slot and the time at which all components of the block are available.", - ) -}); /* * Data Availability cache metrics @@ -1900,13 +1852,6 @@ pub static DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "data_availability_overflow_store_cache_size", - "Number of entries in the data availability overflow store cache.", - ) - }); pub static DATA_AVAILABILITY_RECONSTRUCTION_TIME: LazyLock> = LazyLock::new(|| { try_create_histogram( From 921d95217d97af123b6a1e45916ed94e763d25a4 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 7 Feb 2025 04:48:58 -0300 Subject: [PATCH 41/52] Remove un-used batch sync error condition (#6917) - PR https://github.com/sigp/lighthouse/pull/6497 made obsolete some consistency checks inside the batch I forgot to remove the consumers of those errors Remove un-used batch sync error condition, which was a nested `Result<_, Result<_, E>>` --- .../network/src/sync/backfill_sync/mod.rs | 20 +-------- .../network/src/sync/range_sync/batch.rs | 9 ++-- .../network/src/sync/range_sync/chain.rs | 45 ++++++------------- 3 files changed, 18 insertions(+), 56 deletions(-) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index a3d2c82642..4220f85fc3 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -422,24 +422,8 @@ impl BackFillSync { self.request_batches(network)?; self.process_completed_batches(network) } - Err(result) => { - let (expected_boundary, received_boundary, outcome) = match result { - Err(e) => { - self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; - return Ok(ProcessResult::Successful); - } - Ok(v) => v, - }; - warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, - "peer_id" => %peer_id, batch); - - if let BatchOperationOutcome::Failed { blacklist: _ } = outcome { - error!(self.log, "Backfill failed"; "epoch" => batch_id, "received_boundary" => received_boundary, "expected_boundary" => expected_boundary); - self.fail_sync(BackFillError::BatchDownloadFailed(batch_id))?; - return Ok(ProcessResult::Successful); - } - // this batch can't be used, so we need to request it again. - self.retry_batch_download(network, batch_id)?; + Err(e) => { + self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; Ok(ProcessResult::Successful) } } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 818fde07b8..912287a8a4 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -271,10 +271,7 @@ impl BatchInfo { pub fn download_completed( &mut self, blocks: Vec>, - ) -> Result< - usize, /* Received blocks */ - Result<(Slot, Slot, BatchOperationOutcome), WrongState>, - > { + ) -> Result { match self.state.poison() { BatchState::Downloading(peer, _request_id) => { let received = blocks.len(); @@ -284,10 +281,10 @@ impl BatchInfo { BatchState::Poisoned => unreachable!("Poisoned batch"), other => { self.state = other; - Err(Err(WrongState(format!( + Err(WrongState(format!( "Download completed for batch in wrong state {:?}", self.state - )))) + ))) } } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 4eb73f5483..f02262e4b5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -265,40 +265,21 @@ impl SyncingChain { } }; - { - // A stream termination has been sent. This batch has ended. Process a completed batch. - // Remove the request from the peer's active batches - self.peers - .get_mut(peer_id) - .map(|active_requests| active_requests.remove(&batch_id)); + // A stream termination has been sent. This batch has ended. Process a completed batch. + // Remove the request from the peer's active batches + self.peers + .get_mut(peer_id) + .map(|active_requests| active_requests.remove(&batch_id)); - match batch.download_completed(blocks) { - Ok(received) => { - let awaiting_batches = batch_id - .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) - / EPOCHS_PER_BATCH; - debug!(self.log, "Batch downloaded"; "epoch" => batch_id, "blocks" => received, "batch_state" => self.visualize_batch_state(), "awaiting_batches" => awaiting_batches); + let received = batch.download_completed(blocks)?; + let awaiting_batches = batch_id + .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) + / EPOCHS_PER_BATCH; + debug!(self.log, "Batch downloaded"; "epoch" => batch_id, "blocks" => received, "batch_state" => self.visualize_batch_state(), "awaiting_batches" => awaiting_batches); - // pre-emptively request more blocks from peers whilst we process current blocks, - self.request_batches(network)?; - self.process_completed_batches(network) - } - Err(result) => { - let (expected_boundary, received_boundary, outcome) = result?; - warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, - "peer_id" => %peer_id, batch); - - if let BatchOperationOutcome::Failed { blacklist } = outcome { - return Err(RemoveChain::ChainFailed { - blacklist, - failing_batch: batch_id, - }); - } - // this batch can't be used, so we need to request it again. - self.retry_batch_download(network, batch_id) - } - } - } + // pre-emptively request more blocks from peers whilst we process current blocks, + self.request_batches(network)?; + self.process_completed_batches(network) } /// Processes the batch with the given id. From d6596dbe210b4e85bcbc97062e8d257b1a952921 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Fri, 7 Feb 2025 17:19:29 +0800 Subject: [PATCH 42/52] Keep execution payload during historical backfill when prune-payloads set to false (#6766) - #6510 - Keep execution payload during historical backfill when `--prune-payloads false` is set - Add a field in the historical backfill debug log to indicate if execution payload is kept - Add a test to check historical blocks has execution payload when `--prune-payloads false is set - Very minor typo correction that I notice when working on this --- .../beacon_chain/src/historical_blocks.rs | 18 ++++++++++++++---- beacon_node/beacon_chain/tests/store_tests.rs | 19 +++++++++++++++---- .../network_beacon_processor/sync_methods.rs | 1 + beacon_node/store/src/hot_cold_store.rs | 2 +- 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index e22ec95a79..a48f32e7b4 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -130,10 +130,20 @@ impl BeaconChain { }); } - let blinded_block = block.clone_as_blinded(); - // Store block in the hot database without payload. - self.store - .blinded_block_as_kv_store_ops(&block_root, &blinded_block, &mut hot_batch); + if !self.store.get_config().prune_payloads { + // If prune-payloads is set to false, store the block which includes the execution payload + self.store + .block_as_kv_store_ops(&block_root, (*block).clone(), &mut hot_batch)?; + } else { + let blinded_block = block.clone_as_blinded(); + // Store block in the hot database without payload. + self.store.blinded_block_as_kv_store_ops( + &block_root, + &blinded_block, + &mut hot_batch, + ); + } + // Store the blobs too if let Some(blobs) = maybe_blobs { new_oldest_blob_slot = Some(block.slot()); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 8654b33646..7a2df76970 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -47,7 +47,11 @@ type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; fn get_store(db_path: &TempDir) -> Arc, BeaconNodeBackend>> { - get_store_generic(db_path, StoreConfig::default(), test_spec::()) + let store_config = StoreConfig { + prune_payloads: false, + ..StoreConfig::default() + }; + get_store_generic(db_path, store_config, test_spec::()) } fn get_store_generic( @@ -2571,6 +2575,15 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { if block_root != prev_block_root { assert_eq!(block.slot(), slot); } + + // Prune_payloads is set to false in the default config, so the payload should exist + if block.message().execution_payload().is_ok() { + assert!(beacon_chain + .store + .execution_payload_exists(&block_root) + .unwrap(),); + } + prev_block_root = block_root; } @@ -3558,7 +3571,6 @@ fn check_split_slot( /// Check that all the states in a chain dump have the correct tree hash. fn check_chain_dump(harness: &TestHarness, expected_len: u64) { let mut chain_dump = harness.chain.chain_dump().unwrap(); - let split_slot = harness.chain.store.get_split_slot(); assert_eq!(chain_dump.len() as u64, expected_len); @@ -3585,13 +3597,12 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { // Check presence of execution payload on disk. if harness.chain.spec.bellatrix_fork_epoch.is_some() { - assert_eq!( + assert!( harness .chain .store .execution_payload_exists(&checkpoint.beacon_block_root) .unwrap(), - checkpoint.beacon_block.slot() >= split_slot, "incorrect payload storage for block at slot {}: {:?}", checkpoint.beacon_block.slot(), checkpoint.beacon_block_root, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 817e6b6440..338f2bc4c8 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -483,6 +483,7 @@ impl NetworkBeaconProcessor { debug!(self.log, "Backfill batch processed"; "batch_epoch" => epoch, "first_block_slot" => start_slot, + "keep_execution_payload" => !self.chain.store.get_config().prune_payloads, "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "processed_blobs" => n_blobs, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 128c03f771..e4a857b799 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -516,7 +516,7 @@ impl, Cold: ItemStore> HotColdDB .ok_or(Error::AddPayloadLogicError) } - /// Prepare a signed beacon block for storage in the datbase *without* its payload. + /// Prepare a signed beacon block for storage in the database *without* its payload. pub fn blinded_block_as_kv_store_ops( &self, key: &Hash256, From cb117f859d5be56299bf116cd5c8b947ffe855f0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 7 Feb 2025 20:19:32 +1100 Subject: [PATCH 43/52] Fix fetch blobs in all-null case (#6940) Fix another issue with fetch-blobs, similar to: - https://github.com/sigp/lighthouse/pull/6911 Check if the list of blobs returned is all `None`, and if so, do not proceed any further. This prevents an ugly error like: > Feb 03 17:32:12.384 ERRO Error fetching or processing blobs from EL, block_root: 0x7326fe2dc1cb9036c9de7a07a662c86a339085597849016eadf061b70b7815ba, error: BlobProcessingError(AvailabilityCheck(Unexpected)), module : network::network_beacon_processor:1011 --- beacon_node/beacon_chain/src/fetch_blobs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index 5bc2b92ec3..6e365f936d 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -91,7 +91,7 @@ pub async fn fetch_and_process_engine_blobs( .await .map_err(FetchEngineBlobError::RequestFailed)?; - if response.is_empty() { + if response.is_empty() || response.iter().all(|opt| opt.is_none()) { debug!( log, "No blobs fetched from the EL"; From 2bd5bbdffb87fde48f45b9e7e681780324e03b2b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 8 Feb 2025 10:18:57 +1100 Subject: [PATCH 44/52] Optimise and refine `SingleAttestation` conversion (#6934) Closes - https://github.com/sigp/lighthouse/issues/6805 - Use a new `WorkEvent::GossipAttestationToConvert` to handle the conversion from `SingleAttestation` to `Attestation` _on_ the beacon processor (prevents a Tokio thread being blocked). - Improve the error handling for single attestations. I think previously we had no ability to reprocess single attestations for unknown blocks -- we would just error. This seemed to be the case in both gossip processing and processing of `SingleAttestation`s from the HTTP API. - Move the `SingleAttestation -> Attestation` conversion function into `beacon_chain` so that it can return the `attestation_verification::Error` type, which has well-defined error handling and peer penalties. The now-unused variants of `types::Attestation::Error` have been removed. --- .../src/attestation_verification.rs | 33 ++- beacon_node/beacon_chain/src/lib.rs | 1 + .../beacon_chain/src/single_attestation.rs | 46 ++++ beacon_node/beacon_chain/src/test_utils.rs | 4 +- beacon_node/beacon_processor/src/lib.rs | 45 +++- .../http_api/src/publish_attestations.rs | 51 ++-- .../gossip_methods.rs | 219 ++++++++++++++++-- .../src/network_beacon_processor/mod.rs | 81 +++---- consensus/types/src/attestation.rs | 42 +--- .../types/src/sync_committee_contribution.rs | 2 - 10 files changed, 379 insertions(+), 145 deletions(-) create mode 100644 beacon_node/beacon_chain/src/single_attestation.rs diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index a69eb99a51..a70a2caa4f 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -60,9 +60,9 @@ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; use types::{ - Attestation, AttestationRef, BeaconCommittee, BeaconStateError::NoCommitteeFound, ChainSpec, - CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, SelectionProof, - SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, + Attestation, AttestationData, AttestationRef, BeaconCommittee, + BeaconStateError::NoCommitteeFound, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, + IndexedAttestation, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -115,6 +115,17 @@ pub enum Error { /// /// The peer has sent an invalid message. AggregatorNotInCommittee { aggregator_index: u64 }, + /// The `attester_index` for a `SingleAttestation` is not a member of the committee defined + /// by its `beacon_block_root`, `committee_index` and `slot`. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + AttesterNotInCommittee { + attester_index: u64, + committee_index: u64, + slot: Slot, + }, /// The aggregator index refers to a validator index that we have not seen. /// /// ## Peer scoring @@ -485,7 +496,11 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; + verify_propagation_slot_range::<_, T::EthSpec>( + &chain.slot_clock, + attestation.data(), + &chain.spec, + )?; // Check the attestation's epoch matches its target. if attestation.data().slot.epoch(T::EthSpec::slots_per_epoch()) @@ -817,7 +832,11 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; + verify_propagation_slot_range::<_, T::EthSpec>( + &chain.slot_clock, + attestation.data(), + &chain.spec, + )?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -1133,10 +1152,10 @@ fn verify_head_block_is_known( /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. pub fn verify_propagation_slot_range( slot_clock: &S, - attestation: AttestationRef, + attestation: &AttestationData, spec: &ChainSpec, ) -> Result<(), Error> { - let attestation_slot = attestation.data().slot; + let attestation_slot = attestation.slot; let latest_permissible_slot = slot_clock .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 456b3c0dd8..48168aeb02 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -54,6 +54,7 @@ mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; +pub mod single_attestation; pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; diff --git a/beacon_node/beacon_chain/src/single_attestation.rs b/beacon_node/beacon_chain/src/single_attestation.rs new file mode 100644 index 0000000000..fa4f98bb07 --- /dev/null +++ b/beacon_node/beacon_chain/src/single_attestation.rs @@ -0,0 +1,46 @@ +use crate::attestation_verification::Error; +use types::{Attestation, AttestationElectra, BitList, BitVector, EthSpec, SingleAttestation}; + +pub fn single_attestation_to_attestation( + single_attestation: &SingleAttestation, + committee: &[usize], +) -> Result, Error> { + let attester_index = single_attestation.attester_index; + let committee_index = single_attestation.committee_index; + let slot = single_attestation.data.slot; + + let aggregation_bit = committee + .iter() + .enumerate() + .find_map(|(i, &validator_index)| { + if attester_index as usize == validator_index { + return Some(i); + } + None + }) + .ok_or(Error::AttesterNotInCommittee { + attester_index, + committee_index, + slot, + })?; + + let mut committee_bits: BitVector = BitVector::default(); + committee_bits + .set(committee_index as usize, true) + .map_err(|e| Error::Invalid(e.into()))?; + + let mut aggregation_bits = + BitList::with_capacity(committee.len()).map_err(|e| Error::Invalid(e.into()))?; + aggregation_bits + .set(aggregation_bit, true) + .map_err(|e| Error::Invalid(e.into()))?; + + // TODO(electra): consider eventually allowing conversion to non-Electra attestations as well + // to maintain invertability (`Attestation` -> `SingleAttestation` -> `Attestation`). + Ok(Attestation::Electra(AttestationElectra { + aggregation_bits, + committee_bits, + data: single_attestation.data.clone(), + signature: single_attestation.signature.clone(), + })) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 4526b2b360..e61146bfc8 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -7,6 +7,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, + single_attestation::single_attestation_to_attestation, sync_committee_verification::Error as SyncCommitteeError, validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, @@ -1133,7 +1134,8 @@ where let single_attestation = attestation.to_single_attestation_with_attester_index(attester_index as u64)?; - let attestation: Attestation = single_attestation.to_attestation(committee.committee)?; + let attestation: Attestation = + single_attestation_to_attestation(&single_attestation, committee.committee).unwrap(); assert_eq!( single_attestation.committee_index, diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 92f4636c95..2743f93bb3 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -62,9 +62,9 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; use types::{ - Attestation, BeaconState, ChainSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SubnetId, + Attestation, BeaconState, ChainSpec, EthSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, + SingleAttestation, Slot, SubnetId, }; -use types::{EthSpec, Slot}; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, @@ -504,10 +504,10 @@ impl From for WorkEvent { /// Items required to verify a batch of unaggregated gossip attestations. #[derive(Debug)] -pub struct GossipAttestationPackage { +pub struct GossipAttestationPackage { pub message_id: MessageId, pub peer_id: PeerId, - pub attestation: Box>, + pub attestation: Box, pub subnet_id: SubnetId, pub should_import: bool, pub seen_timestamp: Duration, @@ -549,21 +549,32 @@ pub enum BlockingOrAsync { Blocking(BlockingFn), Async(AsyncFn), } +pub type GossipAttestationBatch = Vec>>; /// Indicates the type of work to be performed and therefore its priority and /// queuing specifics. pub enum Work { GossipAttestation { - attestation: Box>, - process_individual: Box) + Send + Sync>, - process_batch: Box>) + Send + Sync>, + attestation: Box>>, + process_individual: Box>) + Send + Sync>, + process_batch: Box) + Send + Sync>, + }, + // Attestation requiring conversion before processing. + // + // For now this is a `SingleAttestation`, but eventually we will switch this around so that + // legacy `Attestation`s are converted and the main processing pipeline operates on + // `SingleAttestation`s. + GossipAttestationToConvert { + attestation: Box>, + process_individual: + Box) + Send + Sync>, }, UnknownBlockAttestation { process_fn: BlockingFn, }, GossipAttestationBatch { - attestations: Vec>, - process_batch: Box>) + Send + Sync>, + attestations: GossipAttestationBatch, + process_batch: Box) + Send + Sync>, }, GossipAggregate { aggregate: Box>, @@ -639,6 +650,7 @@ impl fmt::Debug for Work { #[strum(serialize_all = "snake_case")] pub enum WorkType { GossipAttestation, + GossipAttestationToConvert, UnknownBlockAttestation, GossipAttestationBatch, GossipAggregate, @@ -690,6 +702,7 @@ impl Work { fn to_type(&self) -> WorkType { match self { Work::GossipAttestation { .. } => WorkType::GossipAttestation, + Work::GossipAttestationToConvert { .. } => WorkType::GossipAttestationToConvert, Work::GossipAttestationBatch { .. } => WorkType::GossipAttestationBatch, Work::GossipAggregate { .. } => WorkType::GossipAggregate, Work::GossipAggregateBatch { .. } => WorkType::GossipAggregateBatch, @@ -849,6 +862,7 @@ impl BeaconProcessor { let mut aggregate_queue = LifoQueue::new(queue_lengths.aggregate_queue); let mut aggregate_debounce = TimeLatch::default(); let mut attestation_queue = LifoQueue::new(queue_lengths.attestation_queue); + let mut attestation_to_convert_queue = LifoQueue::new(queue_lengths.attestation_queue); let mut attestation_debounce = TimeLatch::default(); let mut unknown_block_aggregate_queue = LifoQueue::new(queue_lengths.unknown_block_aggregate_queue); @@ -1180,6 +1194,9 @@ impl BeaconProcessor { None } } + // Convert any gossip attestations that need to be converted. + } else if let Some(item) = attestation_to_convert_queue.pop() { + Some(item) // Check sync committee messages after attestations as their rewards are lesser // and they don't influence fork choice. } else if let Some(item) = sync_contribution_queue.pop() { @@ -1301,6 +1318,9 @@ impl BeaconProcessor { match work { _ if can_spawn => self.spawn_worker(work, idle_tx), Work::GossipAttestation { .. } => attestation_queue.push(work), + Work::GossipAttestationToConvert { .. } => { + attestation_to_convert_queue.push(work) + } // Attestation batches are formed internally within the // `BeaconProcessor`, they are not sent from external services. Work::GossipAttestationBatch { .. } => crit!( @@ -1431,6 +1451,7 @@ impl BeaconProcessor { if let Some(modified_queue_id) = modified_queue_id { let queue_len = match modified_queue_id { WorkType::GossipAttestation => attestation_queue.len(), + WorkType::GossipAttestationToConvert => attestation_to_convert_queue.len(), WorkType::UnknownBlockAttestation => unknown_block_attestation_queue.len(), WorkType::GossipAttestationBatch => 0, // No queue WorkType::GossipAggregate => aggregate_queue.len(), @@ -1563,6 +1584,12 @@ impl BeaconProcessor { } => task_spawner.spawn_blocking(move || { process_individual(*attestation); }), + Work::GossipAttestationToConvert { + attestation, + process_individual, + } => task_spawner.spawn_blocking(move || { + process_individual(*attestation); + }), Work::GossipAttestationBatch { attestations, process_batch, diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 1b9949d4d5..10d13e09a5 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -36,8 +36,8 @@ //! attestations and there's no immediate cause for concern. use crate::task_spawner::{Priority, TaskSpawner}; use beacon_chain::{ - validator_monitor::timestamp_now, AttestationError, BeaconChain, BeaconChainError, - BeaconChainTypes, + single_attestation::single_attestation_to_attestation, validator_monitor::timestamp_now, + AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes, }; use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage}; use either::Either; @@ -183,10 +183,10 @@ fn convert_to_attestation<'a, T: BeaconChainTypes>( chain: &Arc>, attestation: &'a Either, SingleAttestation>, ) -> Result>, Error> { - let a = match attestation { - Either::Left(a) => Cow::Borrowed(a), - Either::Right(single_attestation) => chain - .with_committee_cache( + match attestation { + Either::Left(a) => Ok(Cow::Borrowed(a)), + Either::Right(single_attestation) => { + let conversion_result = chain.with_committee_cache( single_attestation.data.target.root, single_attestation .data @@ -197,24 +197,33 @@ fn convert_to_attestation<'a, T: BeaconChainTypes>( single_attestation.data.slot, single_attestation.committee_index, ) else { - return Err(BeaconChainError::AttestationError( - types::AttestationError::NoCommitteeForSlotAndIndex { - slot: single_attestation.data.slot, - index: single_attestation.committee_index, - }, - )); + return Ok(Err(AttestationError::NoCommitteeForSlotAndIndex { + slot: single_attestation.data.slot, + index: single_attestation.committee_index, + })); }; - let attestation = - single_attestation.to_attestation::(committee.committee)?; - - Ok(Cow::Owned(attestation)) + Ok(single_attestation_to_attestation::( + single_attestation, + committee.committee, + ) + .map(Cow::Owned)) }, - ) - .map_err(Error::FailedConversion)?, - }; - - Ok(a) + ); + match conversion_result { + Ok(Ok(attestation)) => Ok(attestation), + Ok(Err(e)) => Err(Error::Validation(e)), + // Map the error returned by `with_committee_cache` for unknown blocks into the + // `UnknownHeadBlock` error that is gracefully handled. + Err(BeaconChainError::MissingBeaconBlock(beacon_block_root)) => { + Err(Error::Validation(AttestationError::UnknownHeadBlock { + beacon_block_root, + })) + } + Err(e) => Err(Error::FailedConversion(e)), + } + } + } } pub async fn publish_attestations( diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index dc8d32800e..090b963cbc 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -14,6 +14,7 @@ use beacon_chain::{ light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, + single_attestation::single_attestation_to_attestation, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -32,12 +33,12 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - beacon_block::BlockImportSource, Attestation, AttestationRef, AttesterSlashing, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + beacon_block::BlockImportSource, Attestation, AttestationData, AttestationRef, + AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, + IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, - SyncSubnetId, + SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use beacon_processor::{ @@ -45,7 +46,7 @@ use beacon_processor::{ QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, ReprocessQueueMessage, }, - DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, + DuplicateCache, GossipAggregatePackage, GossipAttestationBatch, }; /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus @@ -127,6 +128,11 @@ enum FailedAtt { should_import: bool, seen_timestamp: Duration, }, + // This variant is just a dummy variant for now, as SingleAttestation reprocessing is handled + // separately. + SingleUnaggregate { + attestation: Box, + }, Aggregate { attestation: Box>, seen_timestamp: Duration, @@ -135,20 +141,22 @@ enum FailedAtt { impl FailedAtt { pub fn beacon_block_root(&self) -> &Hash256 { - &self.attestation().data().beacon_block_root + &self.attestation_data().beacon_block_root } pub fn kind(&self) -> &'static str { match self { FailedAtt::Unaggregate { .. } => "unaggregated", + FailedAtt::SingleUnaggregate { .. } => "unaggregated", FailedAtt::Aggregate { .. } => "aggregated", } } - pub fn attestation(&self) -> AttestationRef { + pub fn attestation_data(&self) -> &AttestationData { match self { - FailedAtt::Unaggregate { attestation, .. } => attestation.to_ref(), - FailedAtt::Aggregate { attestation, .. } => attestation.message().aggregate(), + FailedAtt::Unaggregate { attestation, .. } => attestation.data(), + FailedAtt::SingleUnaggregate { attestation, .. } => &attestation.data, + FailedAtt::Aggregate { attestation, .. } => attestation.message().aggregate().data(), } } } @@ -229,7 +237,7 @@ impl NetworkBeaconProcessor { pub fn process_gossip_attestation_batch( self: Arc, - packages: Vec>, + packages: GossipAttestationBatch, reprocess_tx: Option>, ) { let attestations_and_subnets = packages @@ -399,6 +407,155 @@ impl NetworkBeaconProcessor { } } + /// Process an unaggregated attestation requiring conversion. + /// + /// This function performs the conversion, and if successfull queues a new message to be + /// processed by `process_gossip_attestation`. If unsuccessful due to block unavailability, + /// a retry message will be pushed to the `reprocess_tx` if it is `Some`. + #[allow(clippy::too_many_arguments)] + pub fn process_gossip_attestation_to_convert( + self: Arc, + message_id: MessageId, + peer_id: PeerId, + single_attestation: Box, + subnet_id: SubnetId, + should_import: bool, + reprocess_tx: Option>, + seen_timestamp: Duration, + ) { + let conversion_result = self.chain.with_committee_cache( + single_attestation.data.target.root, + single_attestation + .data + .slot + .epoch(T::EthSpec::slots_per_epoch()), + |committee_cache, _| { + let slot = single_attestation.data.slot; + let committee_index = single_attestation.committee_index; + let Some(committee) = committee_cache.get_beacon_committee(slot, committee_index) + else { + return Ok(Err(AttnError::NoCommitteeForSlotAndIndex { + slot, + index: committee_index, + })); + }; + + Ok(single_attestation_to_attestation( + &single_attestation, + committee.committee, + )) + }, + ); + + match conversion_result { + Ok(Ok(attestation)) => { + let slot = attestation.data().slot; + if let Err(e) = self.send_unaggregated_attestation( + message_id.clone(), + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + ) { + error!( + &self.log, + "Unable to queue converted SingleAttestation"; + "error" => %e, + "slot" => slot, + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + // Outermost error (from `with_committee_cache`) indicating that the block is not known + // and that this conversion should be retried. + Err(BeaconChainError::MissingBeaconBlock(beacon_block_root)) => { + if let Some(sender) = reprocess_tx { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL, + ); + // We don't know the block, get the sync manager to handle the block lookup, and + // send the attestation to be scheduled for re-processing. + self.sync_tx + .send(SyncMessage::UnknownBlockHashFromAttestation( + peer_id, + beacon_block_root, + )) + .unwrap_or_else(|_| { + warn!( + self.log, + "Failed to send to sync service"; + "msg" => "UnknownBlockHash" + ) + }); + let processor = self.clone(); + // Do not allow this attestation to be re-processed beyond this point. + let reprocess_msg = + ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { + beacon_block_root, + process_fn: Box::new(move || { + processor.process_gossip_attestation_to_convert( + message_id, + peer_id, + single_attestation, + subnet_id, + should_import, + None, + seen_timestamp, + ) + }), + }); + if sender.try_send(reprocess_msg).is_err() { + error!( + self.log, + "Failed to send attestation for re-processing"; + ) + } + } else { + // We shouldn't make any further attempts to process this attestation. + // + // Don't downscore the peer since it's not clear if we requested this head + // block from them or not. + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + Ok(Err(error)) => { + // We already handled reprocessing above so do not attempt it in the error handler. + self.handle_attestation_verification_failure( + peer_id, + message_id, + FailedAtt::SingleUnaggregate { + attestation: single_attestation, + }, + None, + error, + seen_timestamp, + ); + } + Err(error) => { + // We already handled reprocessing above so do not attempt it in the error handler. + self.handle_attestation_verification_failure( + peer_id, + message_id, + FailedAtt::SingleUnaggregate { + attestation: single_attestation, + }, + None, + AttnError::BeaconChainError(error), + seen_timestamp, + ); + } + } + } + /// Process the aggregated attestation received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -2207,9 +2364,9 @@ impl NetworkBeaconProcessor { // network. let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); let hindsight_verification = - attestation_verification::verify_propagation_slot_range( + attestation_verification::verify_propagation_slot_range::<_, T::EthSpec>( seen_clock, - failed_att.attestation(), + failed_att.attestation_data(), &self.chain.spec, ); @@ -2294,6 +2451,19 @@ impl NetworkBeaconProcessor { "attn_agg_not_in_committee", ); } + AttnError::AttesterNotInCommittee { .. } => { + /* + * `SingleAttestation` from a validator is invalid because the `attester_index` is + * not in the claimed committee. There is no reason a non-faulty validator would + * send this message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_single_not_in_committee", + ); + } AttnError::AttestationSupersetKnown { .. } => { /* * The aggregate attestation has already been observed on the network or in @@ -2439,6 +2609,17 @@ impl NetworkBeaconProcessor { }), }) } + FailedAtt::SingleUnaggregate { .. } => { + // This should never happen, as we handle the unknown head block case + // for `SingleAttestation`s separately and should not be able to hit + // an `UnknownHeadBlock` error. + error!( + self.log, + "Dropping SingleAttestation instead of requeueing"; + "block_root" => ?beacon_block_root, + ); + return; + } FailedAtt::Unaggregate { attestation, subnet_id, @@ -2661,7 +2842,7 @@ impl NetworkBeaconProcessor { self.log, "Ignored attestation to finalized block"; "block_root" => ?beacon_block_root, - "attestation_slot" => failed_att.attestation().data().slot, + "attestation_slot" => failed_att.attestation_data().slot, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -2684,9 +2865,9 @@ impl NetworkBeaconProcessor { debug!( self.log, "Dropping attestation"; - "target_root" => ?failed_att.attestation().data().target.root, + "target_root" => ?failed_att.attestation_data().target.root, "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation().data().slot, + "slot" => ?failed_att.attestation_data().slot, "type" => ?attestation_type, "error" => ?e, "peer_id" => % peer_id @@ -2705,7 +2886,7 @@ impl NetworkBeaconProcessor { self.log, "Unable to validate attestation"; "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation().data().slot, + "slot" => ?failed_att.attestation_data().slot, "type" => ?attestation_type, "peer_id" => %peer_id, "error" => ?e, @@ -3106,9 +3287,9 @@ impl NetworkBeaconProcessor { message_id: MessageId, peer_id: PeerId, ) { - let is_timely = attestation_verification::verify_propagation_slot_range( + let is_timely = attestation_verification::verify_propagation_slot_range::<_, T::EthSpec>( &self.chain.slot_clock, - attestation, + attestation.data(), &self.chain.spec, ) .is_ok(); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 5c1d4f24e5..c06a1f6ee3 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -94,46 +94,34 @@ impl NetworkBeaconProcessor { should_import: bool, seen_timestamp: Duration, ) -> Result<(), Error> { - let result = self.chain.with_committee_cache( - single_attestation.data.target.root, - single_attestation - .data - .slot - .epoch(T::EthSpec::slots_per_epoch()), - |committee_cache, _| { - let Some(committee) = committee_cache.get_beacon_committee( - single_attestation.data.slot, - single_attestation.committee_index, - ) else { - warn!( - self.log, - "No beacon committee for slot and index"; - "slot" => single_attestation.data.slot, - "index" => single_attestation.committee_index - ); - return Ok(Ok(())); - }; + let processor = self.clone(); + let process_individual = move |package: GossipAttestationPackage| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation_to_convert( + package.message_id, + package.peer_id, + package.attestation, + package.subnet_id, + package.should_import, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; - let attestation = single_attestation.to_attestation(committee.committee)?; - - Ok(self.send_unaggregated_attestation( - message_id.clone(), + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipAttestationToConvert { + attestation: Box::new(GossipAttestationPackage { + message_id, peer_id, - attestation, + attestation: Box::new(single_attestation), subnet_id, should_import, seen_timestamp, - )) + }), + process_individual: Box::new(process_individual), }, - ); - - match result { - Ok(result) => result, - Err(e) => { - warn!(self.log, "Failed to send SingleAttestation"; "error" => ?e); - Ok(()) - } - } + }) } /// Create a new `Work` event for some unaggregated attestation. @@ -148,18 +136,19 @@ impl NetworkBeaconProcessor { ) -> Result<(), Error> { // Define a closure for processing individual attestations. let processor = self.clone(); - let process_individual = move |package: GossipAttestationPackage| { - let reprocess_tx = processor.reprocess_tx.clone(); - processor.process_gossip_attestation( - package.message_id, - package.peer_id, - package.attestation, - package.subnet_id, - package.should_import, - Some(reprocess_tx), - package.seen_timestamp, - ) - }; + let process_individual = + move |package: GossipAttestationPackage>| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation( + package.message_id, + package.peer_id, + package.attestation, + package.subnet_id, + package.should_import, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; // Define a closure for processing batches of attestations. let processor = self.clone(); diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 276b27b0f8..1485842edb 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -2,7 +2,6 @@ use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, Hash256, Slot}; use crate::{Checkpoint, ForkVersionDeserialize}; use derivative::Derivative; -use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::BitVector; @@ -12,22 +11,17 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, CommitteeIndex, Domain, EthSpec, Fork, - SecretKey, Signature, SignedRoot, + AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, + Signature, SignedRoot, }; #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), - SubnetCountIsZero(ArithError), IncorrectStateVariant, InvalidCommitteeLength, InvalidCommitteeIndex, - AttesterNotInCommittee(u64), - InvalidCommittee, - MissingCommittee, - NoCommitteeForSlotAndIndex { slot: Slot, index: CommitteeIndex }, } impl From for Error { @@ -587,38 +581,6 @@ pub struct SingleAttestation { pub signature: AggregateSignature, } -impl SingleAttestation { - pub fn to_attestation(&self, committee: &[usize]) -> Result, Error> { - let aggregation_bit = committee - .iter() - .enumerate() - .find_map(|(i, &validator_index)| { - if self.attester_index as usize == validator_index { - return Some(i); - } - None - }) - .ok_or(Error::AttesterNotInCommittee(self.attester_index))?; - - let mut committee_bits: BitVector = BitVector::default(); - committee_bits - .set(self.committee_index as usize, true) - .map_err(|_| Error::InvalidCommitteeIndex)?; - - let mut aggregation_bits = - BitList::with_capacity(committee.len()).map_err(|_| Error::InvalidCommitteeLength)?; - - aggregation_bits.set(aggregation_bit, true)?; - - Ok(Attestation::Electra(AttestationElectra { - aggregation_bits, - committee_bits, - data: self.data.clone(), - signature: self.signature.clone(), - })) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index c348c3e8be..9bae770fe5 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -1,7 +1,6 @@ use super::{AggregateSignature, EthSpec, SignedRoot}; use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; -use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -11,7 +10,6 @@ use tree_hash_derive::TreeHash; pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), - SubnetCountIsZero(ArithError), } /// An aggregation of `SyncCommitteeMessage`s, used in creating a `SignedContributionAndProof`. From e3c721817e820514e327b03da2b68c0b809d88cd Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 7 Feb 2025 21:38:24 -0300 Subject: [PATCH 45/52] Remove duplicated fork_epoch and fork_version implementation (#6953) This PR adds an implementation to get fork_version and fork_epoch given a `ForkName`. I didn't realize that this is already implemented in the `ChainSpec` sorry - https://github.com/sigp/lighthouse/pull/6933 Remove duplicated fork_epoch and fork_version implementation --- consensus/types/src/fork_context.rs | 4 ++-- consensus/types/src/fork_name.rs | 27 +-------------------------- 2 files changed, 3 insertions(+), 28 deletions(-) diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 8f3632d895..a6360705ba 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -25,11 +25,11 @@ impl ForkContext { let fork_to_digest: HashMap = ForkName::list_all() .into_iter() .filter_map(|fork| { - if fork.fork_epoch(spec).is_some() { + if spec.fork_epoch(fork).is_some() { Some(( fork, ChainSpec::compute_fork_digest( - ForkName::fork_version(fork, spec), + spec.fork_version_for_name(fork), genesis_validators_root, ), )) diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 4a46baf57f..e92db49485 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -38,35 +38,10 @@ impl ForkName { .into_iter() // Skip Base .skip(1) - .map(|fork| (fork, fork.fork_epoch(spec))) + .map(|fork| (fork, spec.fork_epoch(fork))) .collect() } - pub fn fork_epoch(self, spec: &ChainSpec) -> Option { - match self { - Self::Base => Some(Epoch::new(0)), - Self::Altair => spec.altair_fork_epoch, - Self::Bellatrix => spec.bellatrix_fork_epoch, - Self::Capella => spec.capella_fork_epoch, - Self::Deneb => spec.deneb_fork_epoch, - Self::Electra => spec.electra_fork_epoch, - Self::Fulu => spec.fulu_fork_epoch, - } - } - - /// Returns the fork version of a fork - pub fn fork_version(self, spec: &ChainSpec) -> [u8; 4] { - match self { - Self::Base => spec.genesis_fork_version, - Self::Altair => spec.altair_fork_version, - Self::Bellatrix => spec.bellatrix_fork_version, - Self::Capella => spec.capella_fork_version, - Self::Deneb => spec.deneb_fork_version, - Self::Electra => spec.electra_fork_version, - Self::Fulu => spec.fulu_fork_version, - } - } - pub fn latest() -> ForkName { // This unwrap is safe as long as we have 1+ forks. It is tested below. *ForkName::list_all().last().unwrap() From 6032f158909920ffffaca851b96a3e847ce1277c Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sun, 9 Feb 2025 12:15:30 +0200 Subject: [PATCH 46/52] Fix aggregate attestation v2 response (#6926) --- .../http_api/src/aggregate_attestation.rs | 65 +++++++++++++++++++ beacon_node/http_api/src/lib.rs | 42 +++--------- 2 files changed, 74 insertions(+), 33 deletions(-) create mode 100644 beacon_node/http_api/src/aggregate_attestation.rs diff --git a/beacon_node/http_api/src/aggregate_attestation.rs b/beacon_node/http_api/src/aggregate_attestation.rs new file mode 100644 index 0000000000..94b6acd2e6 --- /dev/null +++ b/beacon_node/http_api/src/aggregate_attestation.rs @@ -0,0 +1,65 @@ +use crate::api_types::GenericResponse; +use crate::unsupported_version_rejection; +use crate::version::{add_consensus_version_header, V1, V2}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{self, EndpointVersion, Hash256, Slot}; +use std::sync::Arc; +use types::fork_versioned_response::EmptyMetadata; +use types::{CommitteeIndex, ForkVersionedResponse}; +use warp::{ + hyper::{Body, Response}, + reply::Reply, +}; + +pub fn get_aggregate_attestation( + slot: Slot, + attestation_data_root: &Hash256, + committee_index: Option, + endpoint_version: EndpointVersion, + chain: Arc>, +) -> Result, warp::reject::Rejection> { + if endpoint_version == V2 { + let Some(committee_index) = committee_index else { + return Err(warp_utils::reject::custom_bad_request( + "missing committee index".to_string(), + )); + }; + let aggregate_attestation = chain + .get_aggregated_attestation_electra(slot, attestation_data_root, committee_index) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch aggregate: {:?}", + e + )) + })? + .ok_or_else(|| { + warp_utils::reject::custom_not_found("no matching aggregate found".to_string()) + })?; + let fork_name = chain.spec.fork_name_at_slot::(slot); + let fork_versioned_response = ForkVersionedResponse { + version: Some(fork_name), + metadata: EmptyMetadata {}, + data: aggregate_attestation, + }; + Ok(add_consensus_version_header( + warp::reply::json(&fork_versioned_response).into_response(), + fork_name, + )) + } else if endpoint_version == V1 { + let aggregate_attestation = chain + .get_pre_electra_aggregated_attestation_by_slot_and_root(slot, attestation_data_root) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch aggregate: {:?}", + e + )) + })? + .map(GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found("no matching aggregate found".to_string()) + })?; + Ok(warp::reply::json(&aggregate_attestation).into_response()) + } else { + return Err(unsupported_version_rejection(endpoint_version)); + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 77c9bcc34f..f932317339 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -5,6 +5,7 @@ //! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are //! used for development. +mod aggregate_attestation; mod attestation_performance; mod attester_duties; mod block_id; @@ -3384,40 +3385,15 @@ pub fn serve( not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { + task_spawner.blocking_response_task(Priority::P0, move || { not_synced_filter?; - let res = if endpoint_version == V2 { - let Some(committee_index) = query.committee_index else { - return Err(warp_utils::reject::custom_bad_request( - "missing committee index".to_string(), - )); - }; - chain.get_aggregated_attestation_electra( - query.slot, - &query.attestation_data_root, - committee_index, - ) - } else if endpoint_version == V1 { - // Do nothing - chain.get_pre_electra_aggregated_attestation_by_slot_and_root( - query.slot, - &query.attestation_data_root, - ) - } else { - return Err(unsupported_version_rejection(endpoint_version)); - }; - res.map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "unable to fetch aggregate: {:?}", - e - )) - })? - .map(api_types::GenericResponse::from) - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "no matching aggregate found".to_string(), - ) - }) + crate::aggregate_attestation::get_aggregate_attestation( + query.slot, + &query.attestation_data_root, + query.committee_index, + endpoint_version, + chain, + ) }) }, ); From 0344f68cfdea246d49ca9f4cfc641c3d20433270 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sun, 9 Feb 2025 21:15:33 +1100 Subject: [PATCH 47/52] Update attestation rewards API for Electra (#6819) Closes: - https://github.com/sigp/lighthouse/issues/6818 Use `MAX_EFFECTIVE_BALANCE_ELECTRA` (2048) for attestation reward calculations involving Electra. Add a new `InteropGenesisBuilder` that tries to provide a more flexible way to build genesis states. Unfortunately due to lifetime jank, it is quite unergonomic at present. We may want to refactor this builder in future to make it easier to use. --- .../beacon_chain/src/attestation_rewards.rs | 19 +- beacon_node/beacon_chain/src/test_utils.rs | 57 +++-- beacon_node/beacon_chain/tests/rewards.rs | 109 +++++++++ beacon_node/genesis/src/interop.rs | 229 +++++++++++------- beacon_node/genesis/src/lib.rs | 2 +- beacon_node/http_api/tests/fork_tests.rs | 59 +++-- 6 files changed, 341 insertions(+), 134 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 3b37b09e40..4f7c480c8c 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -175,7 +175,9 @@ impl BeaconChain { let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; - for effective_balance_eth in 1..=self.max_effective_balance_increment_steps()? { + for effective_balance_eth in + 1..=self.max_effective_balance_increment_steps(previous_epoch)? + { let effective_balance = effective_balance_eth.safe_mul(spec.effective_balance_increment)?; let base_reward = @@ -321,11 +323,14 @@ impl BeaconChain { }) } - fn max_effective_balance_increment_steps(&self) -> Result { + fn max_effective_balance_increment_steps( + &self, + rewards_epoch: Epoch, + ) -> Result { let spec = &self.spec; - let max_steps = spec - .max_effective_balance - .safe_div(spec.effective_balance_increment)?; + let fork_name = spec.fork_name_at_epoch(rewards_epoch); + let max_effective_balance = spec.max_effective_balance_for_fork(fork_name); + let max_steps = max_effective_balance.safe_div(spec.effective_balance_increment)?; Ok(max_steps) } @@ -386,7 +391,9 @@ impl BeaconChain { let mut ideal_attestation_rewards_list = Vec::new(); let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_balances.current_epoch()); - for effective_balance_step in 1..=self.max_effective_balance_increment_steps()? { + for effective_balance_step in + 1..=self.max_effective_balance_increment_steps(previous_epoch)? + { let effective_balance = effective_balance_step.safe_mul(spec.effective_balance_increment)?; let base_reward = diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e61146bfc8..8c9e3929f6 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -31,7 +31,7 @@ use execution_layer::{ ExecutionLayer, }; use futures::channel::mpsc::Receiver; -pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; +pub use genesis::{InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use kzg::trusted_setup::get_trusted_setup; use kzg::{Kzg, TrustedSetup}; @@ -232,6 +232,7 @@ pub struct Builder { mock_execution_layer: Option>, testing_slot_clock: Option, validator_monitor_config: Option, + genesis_state_builder: Option>, import_all_data_columns: bool, runtime: TestRuntime, log: Logger, @@ -253,16 +254,22 @@ impl Builder> { ) .unwrap(), ); + let genesis_state_builder = self.genesis_state_builder.take().unwrap_or_else(|| { + // Set alternating withdrawal credentials if no builder is specified. + InteropGenesisBuilder::default().set_alternating_eth1_withdrawal_credentials() + }); + let mutator = move |builder: BeaconChainBuilder<_>| { let header = generate_genesis_header::(builder.get_spec(), false); - let genesis_state = interop_genesis_state_with_eth1::( - &validator_keypairs, - HARNESS_GENESIS_TIME, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - header, - builder.get_spec(), - ) - .expect("should generate interop state"); + let genesis_state = genesis_state_builder + .set_opt_execution_payload_header(header) + .build_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + builder.get_spec(), + ) + .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") @@ -318,16 +325,22 @@ impl Builder> { .clone() .expect("cannot build without validator keypairs"); + let genesis_state_builder = self.genesis_state_builder.take().unwrap_or_else(|| { + // Set alternating withdrawal credentials if no builder is specified. + InteropGenesisBuilder::default().set_alternating_eth1_withdrawal_credentials() + }); + let mutator = move |builder: BeaconChainBuilder<_>| { let header = generate_genesis_header::(builder.get_spec(), false); - let genesis_state = interop_genesis_state_with_eth1::( - &validator_keypairs, - HARNESS_GENESIS_TIME, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - header, - builder.get_spec(), - ) - .expect("should generate interop state"); + let genesis_state = genesis_state_builder + .set_opt_execution_payload_header(header) + .build_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + builder.get_spec(), + ) + .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") @@ -375,6 +388,7 @@ where mock_execution_layer: None, testing_slot_clock: None, validator_monitor_config: None, + genesis_state_builder: None, import_all_data_columns: false, runtime, log, @@ -560,6 +574,15 @@ where self } + pub fn with_genesis_state_builder( + mut self, + f: impl FnOnce(InteropGenesisBuilder) -> InteropGenesisBuilder, + ) -> Self { + let builder = self.genesis_state_builder.take().unwrap_or_default(); + self.genesis_state_builder = Some(f(builder)); + self + } + pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index be7045c54a..41e6467b0f 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -36,6 +36,38 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { .keypairs(KEYPAIRS.to_vec()) .fresh_ephemeral_store() .chain_config(chain_config) + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness +} + +fn get_electra_harness(spec: ChainSpec) -> BeaconChainHarness> { + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..Default::default() + }; + + let spec = Arc::new(spec); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.clone()) + .keypairs(KEYPAIRS.to_vec()) + .with_genesis_state_builder(|builder| { + builder.set_initial_balance_fn(Box::new(move |i| { + // Use a variety of balances between min activation balance and max effective balance. + let balance = spec.max_effective_balance_electra + / (i as u64 + 1) + / spec.effective_balance_increment + * spec.effective_balance_increment; + balance.max(spec.min_activation_balance) + })) + }) + .fresh_ephemeral_store() + .chain_config(chain_config) + .mock_execution_layer() .build(); harness.advance_slot(); @@ -560,6 +592,83 @@ async fn test_rewards_altair_inactivity_leak_justification_epoch() { assert_eq!(expected_balances, balances); } +#[tokio::test] +async fn test_rewards_electra() { + let spec = ForkName::Electra.make_genesis_spec(E::default_spec()); + let harness = get_electra_harness(spec.clone()); + let target_epoch = 0; + + // advance until epoch N + 1 and get initial balances + harness + .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) + .await; + let mut expected_balances = harness.get_current_state().balances().to_vec(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward(signed_block.message(), &mut state) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } + + harness.extend_slots(1).await; + } + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert ideal rewards are greater than 0 + assert_eq!( + ideal_rewards.len() as u64, + spec.max_effective_balance_electra / spec.effective_balance_increment + ); + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().to_vec(); + + assert_eq!(expected_balances, balances); +} + #[tokio::test] async fn test_rewards_base_subset_only() { let spec = ForkName::Base.make_genesis_spec(E::default_spec()); diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 90c4ad6e66..4fccc0393b 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -24,10 +24,134 @@ fn eth1_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 Hash256::from_slice(&credentials) } +pub type WithdrawalCredentialsFn = + Box Fn(usize, &'a PublicKey, &'a ChainSpec) -> Hash256>; + /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start +#[derive(Default)] +pub struct InteropGenesisBuilder { + /// Mapping from validator index to initial balance for each validator. + /// + /// If `None`, then the default balance of 32 ETH will be used. + initial_balance_fn: Option u64>>, + + /// Mapping from validator index and pubkey to withdrawal credentials for each validator. + /// + /// If `None`, then default BLS withdrawal credentials will be used. + withdrawal_credentials_fn: Option, + + /// The execution payload header to embed in the genesis state. + execution_payload_header: Option>, +} + +impl InteropGenesisBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn set_initial_balance_fn(mut self, initial_balance_fn: Box u64>) -> Self { + self.initial_balance_fn = Some(initial_balance_fn); + self + } + + pub fn set_withdrawal_credentials_fn( + mut self, + withdrawal_credentials_fn: WithdrawalCredentialsFn, + ) -> Self { + self.withdrawal_credentials_fn = Some(withdrawal_credentials_fn); + self + } + + pub fn set_alternating_eth1_withdrawal_credentials(self) -> Self { + self.set_withdrawal_credentials_fn(Box::new(alternating_eth1_withdrawal_credentials_fn)) + } + + pub fn set_execution_payload_header( + self, + execution_payload_header: ExecutionPayloadHeader, + ) -> Self { + self.set_opt_execution_payload_header(Some(execution_payload_header)) + } + + pub fn set_opt_execution_payload_header( + mut self, + execution_payload_header: Option>, + ) -> Self { + self.execution_payload_header = execution_payload_header; + self + } + + pub fn build_genesis_state( + self, + keypairs: &[Keypair], + genesis_time: u64, + eth1_block_hash: Hash256, + spec: &ChainSpec, + ) -> Result, String> { + // Generate withdrawal credentials using provided function, or default BLS. + let withdrawal_credentials_fn = self.withdrawal_credentials_fn.unwrap_or_else(|| { + Box::new(|_, pubkey, spec| bls_withdrawal_credentials(pubkey, spec)) + }); + + let withdrawal_credentials = keypairs + .iter() + .map(|key| &key.pk) + .enumerate() + .map(|(i, pubkey)| withdrawal_credentials_fn(i, pubkey, spec)) + .collect::>(); + + // Generate initial balances. + let initial_balance_fn = self + .initial_balance_fn + .unwrap_or_else(|| Box::new(|_| spec.max_effective_balance)); + + let eth1_timestamp = 2_u64.pow(40); + + let initial_balances = (0..keypairs.len()) + .map(initial_balance_fn) + .collect::>(); + + let datas = keypairs + .into_par_iter() + .zip(withdrawal_credentials.into_par_iter()) + .zip(initial_balances.into_par_iter()) + .map(|((keypair, withdrawal_credentials), amount)| { + let mut data = DepositData { + withdrawal_credentials, + pubkey: keypair.pk.clone().into(), + amount, + signature: Signature::empty().into(), + }; + + data.signature = data.create_signature(&keypair.sk, spec); + + data + }) + .collect::>(); + + let mut state = initialize_beacon_state_from_eth1( + eth1_block_hash, + eth1_timestamp, + genesis_deposits(datas, spec)?, + self.execution_payload_header, + spec, + ) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + *state.genesis_time_mut() = genesis_time; + + // Invalidate all the caches after all the manual state surgery. + state + .drop_all_caches() + .map_err(|e| format!("Unable to drop caches: {:?}", e))?; + + Ok(state) + } +} + pub fn interop_genesis_state( keypairs: &[Keypair], genesis_time: u64, @@ -35,18 +159,21 @@ pub fn interop_genesis_state( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let withdrawal_credentials = keypairs - .iter() - .map(|keypair| bls_withdrawal_credentials(&keypair.pk, spec)) - .collect::>(); - interop_genesis_state_with_withdrawal_credentials::( - keypairs, - &withdrawal_credentials, - genesis_time, - eth1_block_hash, - execution_payload_header, - spec, - ) + InteropGenesisBuilder::new() + .set_opt_execution_payload_header(execution_payload_header) + .build_genesis_state(keypairs, genesis_time, eth1_block_hash, spec) +} + +fn alternating_eth1_withdrawal_credentials_fn<'a>( + index: usize, + pubkey: &'a PublicKey, + spec: &'a ChainSpec, +) -> Hash256 { + if index % 2usize == 0usize { + bls_withdrawal_credentials(pubkey, spec) + } else { + eth1_withdrawal_credentials(pubkey, spec) + } } // returns an interop genesis state except every other @@ -58,80 +185,10 @@ pub fn interop_genesis_state_with_eth1( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let withdrawal_credentials = keypairs - .iter() - .enumerate() - .map(|(index, keypair)| { - if index % 2 == 0 { - bls_withdrawal_credentials(&keypair.pk, spec) - } else { - eth1_withdrawal_credentials(&keypair.pk, spec) - } - }) - .collect::>(); - interop_genesis_state_with_withdrawal_credentials::( - keypairs, - &withdrawal_credentials, - genesis_time, - eth1_block_hash, - execution_payload_header, - spec, - ) -} - -pub fn interop_genesis_state_with_withdrawal_credentials( - keypairs: &[Keypair], - withdrawal_credentials: &[Hash256], - genesis_time: u64, - eth1_block_hash: Hash256, - execution_payload_header: Option>, - spec: &ChainSpec, -) -> Result, String> { - if keypairs.len() != withdrawal_credentials.len() { - return Err(format!( - "wrong number of withdrawal credentials, expected: {}, got: {}", - keypairs.len(), - withdrawal_credentials.len() - )); - } - - let eth1_timestamp = 2_u64.pow(40); - let amount = spec.max_effective_balance; - - let datas = keypairs - .into_par_iter() - .zip(withdrawal_credentials.into_par_iter()) - .map(|(keypair, &withdrawal_credentials)| { - let mut data = DepositData { - withdrawal_credentials, - pubkey: keypair.pk.clone().into(), - amount, - signature: Signature::empty().into(), - }; - - data.signature = data.create_signature(&keypair.sk, spec); - - data - }) - .collect::>(); - - let mut state = initialize_beacon_state_from_eth1( - eth1_block_hash, - eth1_timestamp, - genesis_deposits(datas, spec)?, - execution_payload_header, - spec, - ) - .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - - *state.genesis_time_mut() = genesis_time; - - // Invalidate all the caches after all the manual state surgery. - state - .drop_all_caches() - .map_err(|e| format!("Unable to drop caches: {:?}", e))?; - - Ok(state) + InteropGenesisBuilder::new() + .set_alternating_eth1_withdrawal_credentials() + .set_opt_execution_payload_header(execution_payload_header) + .build_genesis_state(keypairs, genesis_time, eth1_block_hash, spec) } #[cfg(test)] diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 3fb053bf88..1fba64aafb 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -7,6 +7,6 @@ pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{ bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, - interop_genesis_state_with_withdrawal_credentials, DEFAULT_ETH1_BLOCK_HASH, + InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH, }; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index d6b8df33b3..10e1d01536 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -5,7 +5,7 @@ use beacon_chain::{ }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use execution_layer::test_utils::generate_genesis_header; -use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use genesis::{bls_withdrawal_credentials, InteropGenesisBuilder}; use http_api::test_utils::*; use std::collections::HashSet; use types::{ @@ -346,35 +346,46 @@ fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Ve #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn bls_to_execution_changes_update_all_around_capella_fork() { - let validator_count = 128; + const VALIDATOR_COUNT: usize = 128; let fork_epoch = Epoch::new(2); let spec = capella_spec(fork_epoch); let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); // Use a genesis state with entirely BLS withdrawal credentials. - // Offset keypairs by `validator_count` to create keys distinct from the signing keys. - let validator_keypairs = generate_deterministic_keypairs(validator_count); - let withdrawal_keypairs = (0..validator_count) - .map(|i| Some(generate_deterministic_keypair(i + validator_count))) - .collect::>(); - let withdrawal_credentials = withdrawal_keypairs - .iter() - .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) + // Offset keypairs by `VALIDATOR_COUNT` to create keys distinct from the signing keys. + let validator_keypairs = generate_deterministic_keypairs(VALIDATOR_COUNT); + let withdrawal_keypairs = (0..VALIDATOR_COUNT) + .map(|i| Some(generate_deterministic_keypair(i + VALIDATOR_COUNT))) .collect::>(); + + fn withdrawal_credentials_fn<'a>( + index: usize, + _: &'a types::PublicKey, + spec: &'a ChainSpec, + ) -> Hash256 { + // It is a bit inefficient to regenerate the whole keypair here, but this is a workaround. + // `InteropGenesisBuilder` requires the `withdrawal_credentials_fn` to have + // a `'static` lifetime. + let keypair = generate_deterministic_keypair(index + VALIDATOR_COUNT); + bls_withdrawal_credentials(&keypair.pk, spec) + } + let header = generate_genesis_header(&spec, true); - let genesis_state = interop_genesis_state_with_withdrawal_credentials( - &validator_keypairs, - &withdrawal_credentials, - HARNESS_GENESIS_TIME, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - header, - &spec, - ) - .unwrap(); + + let genesis_state = InteropGenesisBuilder::new() + .set_opt_execution_payload_header(header) + .set_withdrawal_credentials_fn(Box::new(withdrawal_credentials_fn)) + .build_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + &spec, + ) + .unwrap(); let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec.clone()), - validator_count, + VALIDATOR_COUNT, Some(Box::new(|harness_builder| { harness_builder .keypairs(validator_keypairs) @@ -421,7 +432,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { let pubkey = &harness.get_withdrawal_keypair(validator_index).pk; // And the wrong secret key. let secret_key = &harness - .get_withdrawal_keypair((validator_index + 1) % validator_count as u64) + .get_withdrawal_keypair((validator_index + 1) % VALIDATOR_COUNT as u64) .sk; harness.make_bls_to_execution_change_with_keys( validator_index, @@ -433,7 +444,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .collect::>(); // Submit some changes before Capella. Just enough to fill two blocks. - let num_pre_capella = validator_count / 4; + let num_pre_capella = VALIDATOR_COUNT / 4; let blocks_filled_pre_capella = 2; assert_eq!( num_pre_capella, @@ -488,7 +499,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { ); // Add Capella blocks which should be full of BLS to execution changes. - for i in 0..validator_count / max_bls_to_execution_changes { + for i in 0..VALIDATOR_COUNT / max_bls_to_execution_changes { let head_block_root = harness.extend_slots(1).await; let head_block = harness .chain @@ -534,7 +545,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { assert_server_indexed_error( error, 400, - (validator_count..3 * validator_count).collect(), + (VALIDATOR_COUNT..3 * VALIDATOR_COUNT).collect(), ); } } From e3e21f7516bbea6afa151867b475234822dee36e Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 10 Feb 2025 03:27:00 +0200 Subject: [PATCH 48/52] Schedule Sepolia and Holesky Electra forks (#6949) --- .../holesky/config.yaml | 14 +++++++++++++- .../sepolia/config.yaml | 18 ++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index d0b61422e0..e5f38b8c9b 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -35,7 +35,7 @@ DENEB_FORK_VERSION: 0x05017000 DENEB_FORK_EPOCH: 29696 # Electra ELECTRA_FORK_VERSION: 0x06017000 -ELECTRA_FORK_EPOCH: 18446744073709551615 +ELECTRA_FORK_EPOCH: 115968 # Fulu FULU_FORK_VERSION: 0x07017000 FULU_FORK_EPOCH: 18446744073709551615 @@ -127,6 +127,18 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + # DAS NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 7564d8f0f6..af78332205 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -36,6 +36,10 @@ CAPELLA_FORK_EPOCH: 56832 DENEB_FORK_VERSION: 0x90000073 DENEB_FORK_EPOCH: 132608 +# Electra +ELECTRA_FORK_VERSION: 0x90000074 +ELECTRA_FORK_EPOCH: 222464 + # Time parameters # --------------------------------------------------------------- # 12 seconds @@ -73,6 +77,8 @@ PROPOSER_SCORE_BOOST: 40 REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 # Deposit contract # --------------------------------------------------------------- @@ -122,6 +128,18 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + # DAS NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 From afdda83798b701489f10aa001496a6737c76d1c1 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 10 Feb 2025 03:27:03 +0200 Subject: [PATCH 49/52] Enable Light Client server by default (#6950) --- beacon_node/beacon_chain/src/chain_config.rs | 2 +- beacon_node/http_api/src/lib.rs | 2 +- beacon_node/lighthouse_network/src/config.rs | 2 +- beacon_node/src/cli.rs | 13 ++++++++-- beacon_node/src/config.rs | 14 +++++++--- book/src/help_bn.md | 5 ++-- lighthouse/tests/beacon_node.rs | 27 +++++++++++++++----- 7 files changed, 48 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index b8a607c886..fcdd57abbc 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -124,7 +124,7 @@ impl Default for ChainConfig { genesis_backfill: false, always_prepare_payload: false, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, - enable_light_client_server: false, + enable_light_client_server: true, malicious_withhold_count: 0, enable_sampling: false, blob_publication_batches: 4, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index f932317339..d6431fe729 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -171,7 +171,7 @@ impl Default for Config { sse_capacity_multiplier: 1, enable_beacon_processor: true, duplicate_block_status_code: StatusCode::ACCEPTED, - enable_light_client_server: false, + enable_light_client_server: true, target_peers: 100, } } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 55c1dbf491..843187d9a7 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -354,7 +354,7 @@ impl Default for Config { topics: Vec::new(), proposer_only: false, metrics_enabled: false, - enable_light_client_server: false, + enable_light_client_server: true, outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 29faa7f220..942e2bc33e 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1494,9 +1494,18 @@ pub fn cli_app() -> Command { .arg( Arg::new("light-client-server") .long("light-client-server") - .help("Act as a full node supporting light clients on the p2p network \ - [experimental]") + .help("DEPRECATED") .action(ArgAction::SetTrue) + + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("disable-light-client-server") + .long("disable-light-client-server") + .help("Disables light client support on the p2p network") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .display_order(0) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0f8f3a8012..b6e7dd55f1 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -176,11 +176,19 @@ pub fn get_config( parse_required(cli_args, "http-duplicate-block-status")?; client_config.http_api.enable_light_client_server = - cli_args.get_flag("light-client-server"); + !cli_args.get_flag("disable-light-client-server"); } if cli_args.get_flag("light-client-server") { - client_config.chain.enable_light_client_server = true; + warn!( + log, + "The --light-client-server flag is deprecated. The light client server is enabled \ + by default" + ); + } + + if cli_args.get_flag("disable-light-client-server") { + client_config.chain.enable_light_client_server = false; } if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { @@ -1419,7 +1427,7 @@ pub fn set_network_config( } // Light client server config. - config.enable_light_client_server = parse_flag(cli_args, "light-client-server"); + config.enable_light_client_server = !parse_flag(cli_args, "disable-light-client-server"); // The self limiter is enabled by default. If the `self-limiter-protocols` flag is not provided, // the default params will be used. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 3bc8a2c275..2feb2e306b 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -458,6 +458,8 @@ Flags: boot. --disable-inbound-rate-limiter Disables the inbound rate limiter (requests received by this node). + --disable-light-client-server + Disables light client support on the p2p network --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning @@ -511,8 +513,7 @@ Flags: already-subscribed subnets, use with --subscribe-all-subnets to ensure all attestations are received for import. --light-client-server - Act as a full node supporting light clients on the p2p network - [experimental] + DEPRECATED --log-color Force outputting colors when emitting logs to the terminal. --logfile-compress diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 1063a80ff4..03314930b9 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2504,9 +2504,9 @@ fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert!(!config.network.enable_light_client_server); - assert!(!config.chain.enable_light_client_server); - assert!(!config.http_api.enable_light_client_server); + assert!(config.network.enable_light_client_server); + assert!(config.chain.enable_light_client_server); + assert!(config.http_api.enable_light_client_server); }); } @@ -2522,13 +2522,26 @@ fn light_client_server_enabled() { } #[test] -fn light_client_http_server_enabled() { +fn light_client_server_disabled() { CommandLineTest::new() - .flag("http", None) - .flag("light-client-server", None) + .flag("disable-light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert!(config.http_api.enable_light_client_server); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); + }); +} + +#[test] +fn light_client_http_server_disabled() { + CommandLineTest::new() + .flag("http", None) + .flag("disable-light-client-server", None) + .run_with_zero_port() + .with_config(|config| { + assert!(!config.http_api.enable_light_client_server); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); }); } From f35213ebe7e4327d0291ecf4a1966b66b7fdb145 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Sun, 9 Feb 2025 22:27:05 -0300 Subject: [PATCH 50/52] Sync active request byrange ids logs (#6914) - Re-opened PR from https://github.com/sigp/lighthouse/pull/6869 Writing and running tests I noted that the sync RPC requests are very verbose now. `DataColumnsByRootRequestId { id: 123, requester: Custody(CustodyId { requester: CustodyRequester(SingleLookupReqId { req_id: 121, lookup_id: 101 }) }) }` Since this Id is logged rather often I believe there's value in 1. Making them more succinct for log verbosity 2. Make them a string that's easy to copy and work with elastic Write custom `Display` implementations to render Ids in a more DX format _ DataColumnsByRootRequestId with a block lookup_ ``` 123/Custody/121/Lookup/101 ``` _DataColumnsByRangeRequestId_ ``` 123/122/RangeSync/0/5492900659401505034 ``` - This one will be shorter after https://github.com/sigp/lighthouse/pull/6868 Also made the logs format and text consistent across all methods --- .../src/service/api_types.rs | 122 ++++++++++- .../network/src/sync/network_context.rs | 206 ++++++++++-------- 2 files changed, 227 insertions(+), 101 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 800d988d1a..e69c7aa5f7 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,15 +1,14 @@ -use std::sync::Arc; - -use libp2p::swarm::ConnectionId; -use types::{ - BlobSidecar, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, -}; - use crate::rpc::{ methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}, SubstreamId, }; +use libp2p::swarm::ConnectionId; +use std::fmt::{Display, Formatter}; +use std::sync::Arc; +use types::{ + BlobSidecar, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, +}; /// Identifier of requests sent by a peer. pub type PeerRequestId = (ConnectionId, SubstreamId); @@ -235,9 +234,108 @@ impl slog::Value for RequestId { } } -// This custom impl reduces log boilerplate not printing `DataColumnsByRootRequestId` on each id log -impl std::fmt::Display for DataColumnsByRootRequestId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {:?}", self.id, self.requester) +macro_rules! impl_display { + ($structname: ty, $format: literal, $($field:ident),*) => { + impl Display for $structname { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, $format, $(self.$field,)*) + } + } + }; +} + +// Since each request Id is deeply nested with various types, if rendered with Debug on logs they +// take too much visual space. This custom Display implementations make the overall Id short while +// not losing information +impl_display!(BlocksByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!(BlobsByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!(DataColumnsByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!(ComponentsByRangeRequestId, "{}/{}", id, requester); +impl_display!(DataColumnsByRootRequestId, "{}/{}", id, requester); +impl_display!(SingleLookupReqId, "{}/Lookup/{}", req_id, lookup_id); +impl_display!(CustodyId, "{}", requester); +impl_display!(SamplingId, "{}/{}", sampling_request_id, id); + +impl Display for DataColumnsByRootRequester { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::Custody(id) => write!(f, "Custody/{id}"), + Self::Sampling(id) => write!(f, "Sampling/{id}"), + } + } +} + +impl Display for CustodyRequester { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Display for RangeRequestId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::RangeSync { chain_id, batch_id } => write!(f, "RangeSync/{batch_id}/{chain_id}"), + Self::BackfillSync { batch_id } => write!(f, "BackfillSync/{batch_id}"), + } + } +} + +impl Display for SamplingRequestId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Display for SamplingRequester { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::ImportedBlock(block) => write!(f, "ImportedBlock/{block}"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn display_id_data_columns_by_root_custody() { + let id = DataColumnsByRootRequestId { + id: 123, + requester: DataColumnsByRootRequester::Custody(CustodyId { + requester: CustodyRequester(SingleLookupReqId { + req_id: 121, + lookup_id: 101, + }), + }), + }; + assert_eq!(format!("{id}"), "123/Custody/121/Lookup/101"); + } + + #[test] + fn display_id_data_columns_by_root_sampling() { + let id = DataColumnsByRootRequestId { + id: 123, + requester: DataColumnsByRootRequester::Sampling(SamplingId { + id: SamplingRequester::ImportedBlock(Hash256::ZERO), + sampling_request_id: SamplingRequestId(101), + }), + }; + assert_eq!(format!("{id}"), "123/Sampling/101/ImportedBlock/0x0000000000000000000000000000000000000000000000000000000000000000"); + } + + #[test] + fn display_id_data_columns_by_range() { + let id = DataColumnsByRangeRequestId { + id: 123, + parent_request_id: ComponentsByRangeRequestId { + id: 122, + requester: RangeRequestId::RangeSync { + chain_id: 54, + batch_id: Epoch::new(0), + }, + }, + }; + assert_eq!(format!("{id}"), "123/122/RangeSync/0/54"); } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 0cd21de7f4..b03a446add 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -37,6 +37,7 @@ use requests::{ use slog::{debug, error, warn}; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; @@ -535,17 +536,10 @@ impl SyncNetworkContext { } } - let req_id = self.next_id(); - let id = SingleLookupReqId { lookup_id, req_id }; - - debug!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "block_root" => ?block_root, - "peer" => %peer_id, - "id" => ?id - ); + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; let request = BlocksByRootSingleRequest(block_root); @@ -563,6 +557,15 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlocksByRoot", + "block_root" => ?block_root, + "peer" => %peer_id, + "id" => %id + ); + self.blocks_by_root_requests.insert( id, peer_id, @@ -572,7 +575,7 @@ impl SyncNetworkContext { BlocksByRootRequestItems::new(request), ); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id.req_id)) } /// Request necessary blobs for `block_root`. Requests only the necessary blobs by checking: @@ -618,22 +621,14 @@ impl SyncNetworkContext { return Ok(LookupRequestResult::NoRequestNeeded("no indices to fetch")); } - let req_id = self.next_id(); - let id = SingleLookupReqId { lookup_id, req_id }; - - debug!( - self.log, - "Sending BlobsByRoot Request"; - "method" => "BlobsByRoot", - "block_root" => ?block_root, - "blob_indices" => ?indices, - "peer" => %peer_id, - "id" => ?id - ); + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; let request = BlobsByRootSingleBlockRequest { block_root, - indices, + indices: indices.clone(), }; // Lookup sync event safety: Refer to `Self::block_lookup_request` `network_send.send` call @@ -645,6 +640,16 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlobsByRoot", + "block_root" => ?block_root, + "blob_indices" => ?indices, + "peer" => %peer_id, + "id" => %id + ); + self.blobs_by_root_requests.insert( id, peer_id, @@ -655,7 +660,7 @@ impl SyncNetworkContext { BlobsByRootRequestItems::new(request), ); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id.req_id)) } /// Request to send a single `data_columns_by_root` request to the network. @@ -666,35 +671,35 @@ impl SyncNetworkContext { request: DataColumnsByRootSingleBlockRequest, expect_max_responses: bool, ) -> Result, &'static str> { - let req_id = DataColumnsByRootRequestId { + let id = DataColumnsByRootRequestId { id: self.next_id(), requester, }; - debug!( - self.log, - "Sending DataColumnsByRoot Request"; - "method" => "DataColumnsByRoot", - "block_root" => ?request.block_root, - "indices" => ?request.indices, - "peer" => %peer_id, - "requester" => ?requester, - "req_id" => %req_id, - ); self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id)), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), })?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "DataColumnsByRoot", + "block_root" => ?request.block_root, + "indices" => ?request.indices, + "peer" => %peer_id, + "id" => %id, + ); + self.data_columns_by_root_requests.insert( - req_id, + id, peer_id, expect_max_responses, DataColumnsByRootRequestItems::new(request), ); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id)) } /// Request to fetch all needed custody columns of a specific block. This function may not send @@ -727,15 +732,17 @@ impl SyncNetworkContext { return Ok(LookupRequestResult::NoRequestNeeded("no indices to fetch")); } - let req_id = self.next_id(); - let id = SingleLookupReqId { lookup_id, req_id }; + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; debug!( self.log, "Starting custody columns request"; "block_root" => ?block_root, "indices" => ?custody_indexes_to_fetch, - "id" => ?id + "id" => %id ); let requester = CustodyRequester(id); @@ -754,7 +761,7 @@ impl SyncNetworkContext { // created cannot return data immediately, it must send some request to the network // first. And there must exist some request, `custody_indexes_to_fetch` is not empty. self.custody_by_root_requests.insert(requester, request); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id.req_id)) } Err(e) => Err(RpcRequestSendError::CustodyRequestError(e)), } @@ -770,15 +777,6 @@ impl SyncNetworkContext { id: self.next_id(), parent_request_id, }; - debug!( - self.log, - "Sending BlocksByRange request"; - "method" => "BlocksByRange", - "count" => request.count(), - "epoch" => Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()), - "peer" => %peer_id, - "id" => ?id, - ); self.network_send .send(NetworkMessage::SendRequest { peer_id, @@ -787,6 +785,16 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlocksByRange", + "slots" => request.count(), + "epoch" => Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()), + "peer" => %peer_id, + "id" => %id, + ); + self.blocks_by_range_requests.insert( id, peer_id, @@ -809,15 +817,6 @@ impl SyncNetworkContext { parent_request_id, }; let request_epoch = Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()); - debug!( - self.log, - "Sending BlobsByRange requests"; - "method" => "BlobsByRange", - "count" => request.count, - "epoch" => request_epoch, - "peer" => %peer_id, - "id" => ?id, - ); // Create the blob request based on the blocks request. self.network_send @@ -828,6 +827,16 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlobsByRange", + "slots" => request.count, + "epoch" => request_epoch, + "peer" => %peer_id, + "id" => %id, + ); + let max_blobs_per_block = self.chain.spec.max_blobs_per_block(request_epoch); self.blobs_by_range_requests.insert( id, @@ -850,16 +859,6 @@ impl SyncNetworkContext { id: self.next_id(), parent_request_id, }; - debug!( - self.log, - "Sending DataColumnsByRange requests"; - "method" => "DataColumnsByRange", - "count" => request.count, - "epoch" => Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), - "columns" => ?request.columns, - "peer" => %peer_id, - "id" => ?id, - ); self.send_network_msg(NetworkMessage::SendRequest { peer_id, @@ -868,6 +867,17 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "DataColumnsByRange", + "slots" => request.count, + "epoch" => Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), + "columns" => ?request.columns, + "peer" => %peer_id, + "id" => %id, + ); + self.data_columns_by_range_requests.insert( id, peer_id, @@ -1011,8 +1021,8 @@ impl SyncNetworkContext { peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>> { - let response = self.blocks_by_root_requests.on_response(id, rpc_event); - let response = response.map(|res| { + let resp = self.blocks_by_root_requests.on_response(id, rpc_event); + let resp = resp.map(|res| { res.and_then(|(mut blocks, seen_timestamp)| { // Enforce that exactly one chunk = one block is returned. ReqResp behavior limits the // response count to at most 1. @@ -1024,10 +1034,7 @@ impl SyncNetworkContext { } }) }); - if let Some(Err(RpcResponseError::VerifyError(e))) = &response { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - response + self.on_rpc_response_result(id, "BlocksByRoot", resp, peer_id, |_| 1) } pub(crate) fn on_single_blob_response( @@ -1036,8 +1043,8 @@ impl SyncNetworkContext { peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>> { - let response = self.blobs_by_root_requests.on_response(id, rpc_event); - let response = response.map(|res| { + let resp = self.blobs_by_root_requests.on_response(id, rpc_event); + let resp = resp.map(|res| { res.and_then(|(blobs, seen_timestamp)| { if let Some(max_len) = blobs .first() @@ -1056,10 +1063,7 @@ impl SyncNetworkContext { } }) }); - if let Some(Err(RpcResponseError::VerifyError(e))) = &response { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - response + self.on_rpc_response_result(id, "BlobsByRoot", resp, peer_id, |_| 1) } #[allow(clippy::type_complexity)] @@ -1072,7 +1076,7 @@ impl SyncNetworkContext { let resp = self .data_columns_by_root_requests .on_response(id, rpc_event); - self.report_rpc_response_errors(resp, peer_id) + self.on_rpc_response_result(id, "DataColumnsByRoot", resp, peer_id, |_| 1) } #[allow(clippy::type_complexity)] @@ -1083,7 +1087,7 @@ impl SyncNetworkContext { rpc_event: RpcEvent>>, ) -> Option>>>> { let resp = self.blocks_by_range_requests.on_response(id, rpc_event); - self.report_rpc_response_errors(resp, peer_id) + self.on_rpc_response_result(id, "BlocksByRange", resp, peer_id, |b| b.len()) } #[allow(clippy::type_complexity)] @@ -1094,7 +1098,7 @@ impl SyncNetworkContext { rpc_event: RpcEvent>>, ) -> Option>>>> { let resp = self.blobs_by_range_requests.on_response(id, rpc_event); - self.report_rpc_response_errors(resp, peer_id) + self.on_rpc_response_result(id, "BlobsByRangeRequest", resp, peer_id, |b| b.len()) } #[allow(clippy::type_complexity)] @@ -1107,14 +1111,38 @@ impl SyncNetworkContext { let resp = self .data_columns_by_range_requests .on_response(id, rpc_event); - self.report_rpc_response_errors(resp, peer_id) + self.on_rpc_response_result(id, "DataColumnsByRange", resp, peer_id, |d| d.len()) } - fn report_rpc_response_errors( + fn on_rpc_response_result usize>( &mut self, + id: I, + method: &'static str, resp: Option>, peer_id: PeerId, + get_count: F, ) -> Option> { + match &resp { + None => {} + Some(Ok((v, _))) => { + debug!( + self.log, + "Sync RPC request completed"; + "id" => %id, + "method" => method, + "count" => get_count(v) + ); + } + Some(Err(e)) => { + debug!( + self.log, + "Sync RPC request error"; + "id" => %id, + "method" => method, + "error" => ?e + ); + } + } if let Some(Err(RpcResponseError::VerifyError(e))) = &resp { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } From ceb5ecf3494ea61ab3f9370ac3b3c1c92aa6d280 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 10 Feb 2025 12:58:08 +1100 Subject: [PATCH 51/52] Update EF tests to spec v1.5.0-beta.2 (#6958) Update spec tests for recent v1.5.0-beta.2 release. There are no substantial changes for Electra and earlier, and the Fulu test updates to be made are tracked here: - https://github.com/sigp/lighthouse/issues/6957 - Add `SingleAttestation` SSZ tests - Add new `deposit_with_reorg` fork choice tests - Update tag to v1.5.0-beta.2 - Ignore Fulu tests --- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 5 ++--- testing/ef_tests/src/handler.rs | 5 +++++ testing/ef_tests/src/type_name.rs | 1 + testing/ef_tests/tests/tests.rs | 12 ++++++++++++ 5 files changed, 21 insertions(+), 4 deletions(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 7b507f8c50..c32a670e9a 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-beta.1 +TESTS_TAG := v1.5.0-beta.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 02a01555b4..8a662b72e3 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -49,9 +49,8 @@ excluded_paths = [ "bls12-381-tests/hash_to_G2", "tests/.*/eip6110", "tests/.*/whisk", - # TODO(electra): SingleAttestation tests are waiting on Eitan's PR - "tests/.*/electra/ssz_static/SingleAttestation", - "tests/.*/fulu/ssz_static/SingleAttestation", + # TODO(das): Fulu tests are ignored for now + "tests/.*/fulu", "tests/.*/fulu/ssz_static/MatrixEntry", ] diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index d1ddd6a48f..481c9b2169 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -680,6 +680,11 @@ impl Handler for ForkChoiceHandler { return false; } + // Deposit tests exist only after Electra. + if self.handler_name == "deposit_with_reorg" && !fork_name.electra_enabled() { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 285ac951a6..dfee385958 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -155,6 +155,7 @@ type_name!(SignedBeaconBlockHeader); type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); +type_name!(SingleAttestation); type_name_generic!(SyncCommitteeContribution); type_name!(SyncCommitteeMessage); type_name!(SyncAggregatorSelectionData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index bba7efde49..1f5a7dd997 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -282,6 +282,12 @@ mod ssz_static { .run(); } + #[test] + fn single_attestation() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } + #[test] fn attester_slashing() { SszStaticHandler::, MinimalEthSpec>::pre_electra() @@ -880,6 +886,12 @@ fn fork_choice_get_proposer_head() { ForkChoiceHandler::::new("get_proposer_head").run(); } +#[test] +fn fork_choice_deposit_with_reorg() { + ForkChoiceHandler::::new("deposit_with_reorg").run(); + // There is no mainnet variant for this test. +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); From 62a0f25f97249029c9b7efeb443dd8d085ecc5ed Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 10 Feb 2025 12:58:11 +1100 Subject: [PATCH 52/52] IPv6 By Default (#6808) --- Cargo.lock | 38 ++++++++++++++++++++ beacon_node/lighthouse_network/Cargo.toml | 1 + beacon_node/lighthouse_network/src/config.rs | 13 +++++++ beacon_node/src/cli.rs | 6 ++-- beacon_node/src/config.rs | 26 ++++++++++---- book/src/help_bn.md | 5 +-- 6 files changed, 77 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4581fb9ce0..219b6df0d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5432,6 +5432,7 @@ dependencies = [ "libp2p", "libp2p-mplex", "lighthouse_version", + "local-ip-address", "logging", "lru", "lru_cache", @@ -5512,6 +5513,18 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "local-ip-address" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +dependencies = [ + "libc", + "neli", + "thiserror 1.0.69", + "windows-sys 0.59.0", +] + [[package]] name = "lock_api" version = "0.4.12" @@ -5955,6 +5968,31 @@ dependencies = [ "tempfile", ] +[[package]] +name = "neli" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +dependencies = [ + "byteorder", + "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + [[package]] name = "netlink-packet-core" version = "0.7.0" diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 3c89ece442..b16ccc2a8c 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -22,6 +22,7 @@ hex = { workspace = true } itertools = { workspace = true } libp2p-mplex = "0.43" lighthouse_version = { workspace = true } +local-ip-address = "0.6" lru = { workspace = true } lru_cache = { workspace = true } metrics = { workspace = true } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 843187d9a7..5a6628439e 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -6,6 +6,7 @@ use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; use libp2p::Multiaddr; +use local_ip_address::local_ipv6; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::net::{Ipv4Addr, Ipv6Addr}; @@ -266,6 +267,18 @@ impl Config { } } + /// A helper function to check if the local host has a globally routeable IPv6 address. If so, + /// returns true. + pub fn is_ipv6_supported() -> bool { + // If IPv6 is supported + let Ok(std::net::IpAddr::V6(local_ip)) = local_ipv6() else { + return false; + }; + + // If its globally routable, return true + is_global_ipv6(&local_ip) + } + pub fn listen_addrs(&self) -> &ListenAddress { &self.listen_addresses } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 942e2bc33e..4c2daecdd3 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -147,16 +147,16 @@ pub fn cli_app() -> Command { .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. To listen \ - over IpV4 and IpV6 set this flag twice with the different values.\n\ + over IPv4 and IPv6 set this flag twice with the different values.\n\ Examples:\n\ - --listen-address '0.0.0.0' will listen over IPv4.\n\ - --listen-address '::' will listen over IPv6.\n\ - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ IPv4 and IPv6. The order of the given addresses is not relevant. However, \ - multiple IPv4, or multiple IPv6 addresses will not be accepted.") + multiple IPv4, or multiple IPv6 addresses will not be accepted. \ + If omitted, Lighthouse will listen on all interfaces, for both IPv4 and IPv6.") .action(ArgAction::Append) .num_args(0..=2) - .default_value("0.0.0.0") .display_order(0) ) .arg( diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b6e7dd55f1..24d569bea2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -905,12 +905,13 @@ pub fn parse_listening_addresses( ) -> Result { let listen_addresses_str = cli_args .get_many::("listen-address") - .expect("--listen_addresses has a default value"); + .unwrap_or_default(); let use_zero_ports = parse_flag(cli_args, "zero-ports"); // parse the possible ips let mut maybe_ipv4 = None; let mut maybe_ipv6 = None; + for addr_str in listen_addresses_str { let addr = addr_str.parse::().map_err(|parse_error| { format!("Failed to parse listen-address ({addr_str}) as an Ip address: {parse_error}") @@ -920,8 +921,8 @@ pub fn parse_listening_addresses( IpAddr::V4(v4_addr) => match &maybe_ipv4 { Some(first_ipv4_addr) => { return Err(format!( - "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ - Got two IpV4 addresses {first_ipv4_addr} and {v4_addr}" + "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ + Got two IPv4 addresses {first_ipv4_addr} and {v4_addr}" )); } None => maybe_ipv4 = Some(v4_addr), @@ -929,8 +930,8 @@ pub fn parse_listening_addresses( IpAddr::V6(v6_addr) => match &maybe_ipv6 { Some(first_ipv6_addr) => { return Err(format!( - "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ - Got two IpV6 addresses {first_ipv6_addr} and {v6_addr}" + "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ + Got two IPv6 addresses {first_ipv6_addr} and {v6_addr}" )); } None => maybe_ipv6 = Some(v6_addr), @@ -984,11 +985,22 @@ pub fn parse_listening_addresses( format!("Failed to parse --quic6-port as an integer: {parse_error}") })?; + // Here we specify the default listening addresses for Lighthouse. + // By default, we listen on 0.0.0.0. + // + // IF the host supports a globally routable IPv6 address, we also listen on ::. + if matches!((maybe_ipv4, maybe_ipv6), (None, None)) { + maybe_ipv4 = Some(Ipv4Addr::UNSPECIFIED); + + if NetworkConfig::is_ipv6_supported() { + maybe_ipv6 = Some(Ipv6Addr::UNSPECIFIED); + } + } + // Now put everything together let listening_addresses = match (maybe_ipv4, maybe_ipv6) { (None, None) => { - // This should never happen unless clap is broken - return Err("No listening addresses provided".into()); + unreachable!("This path is handled above this match statement"); } (None, Some(ipv6)) => { // A single ipv6 address was provided. Set the ports diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 2feb2e306b..cbcb1ec5a3 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -230,7 +230,7 @@ Options: peer without an ENR. --listen-address [
    ...] The address lighthouse will listen for UDP and TCP connections. To - listen over IpV4 and IpV6 set this flag twice with the different + listen over IPv4 and IPv6 set this flag twice with the different values. Examples: - --listen-address '0.0.0.0' will listen over IPv4. @@ -238,7 +238,8 @@ Options: - --listen-address '0.0.0.0' --listen-address '::' will listen over both IPv4 and IPv6. The order of the given addresses is not relevant. However, multiple IPv4, or multiple IPv6 addresses will not be - accepted. [default: 0.0.0.0] + accepted. If omitted, Lighthouse will listen on all interfaces, for + both IPv4 and IPv6. --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON]