From 229f883968b8f6a9ead66a6aa6ffcff36a496455 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 May 2022 03:27:30 +0000 Subject: [PATCH 001/184] Avoid parallel fork choice runs during sync (#3217) ## Issue Addressed Fixes an issue that @paulhauner found with the v2.3.0 release candidate whereby the fork choice runs introduced by #3168 tripped over each other during sync: ``` May 24 23:06:40.542 WARN Error signalling fork choice waiter slot: 3884129, error: ForkChoiceSignalOutOfOrder { current: Slot(3884131), latest: Slot(3884129) }, service: beacon ``` This can occur because fork choice is called from the state advance _and_ the per-slot task. When one of these runs takes a long time it can end up finishing after a run from a later slot, tripping the error above. The problem is resolved by not running either of these fork choice calls during sync. Additionally, these parallel fork choice runs were causing issues in the database: ``` May 24 07:49:05.098 WARN Found a chain that should already have been pruned, head_slot: 92925, head_block_root: 0xa76c7bf1b98e54ed4b0d8686efcfdf853484e6c2a4c67e91cbf19e5ad1f96b17, service: beacon May 24 07:49:05.101 WARN Database migration failed error: HotColdDBError(FreezeSlotError { current_split_slot: Slot(92608), proposed_split_slot: Slot(92576) }), service: beacon ``` In this case, two fork choice calls triggering the finalization processing were being processed out of order due to differences in their processing time, causing the background migrator to try to advance finalization _backwards_ :flushed:. Removing the parallel fork choice runs from sync effectively addresses the issue, because these runs are most likely to have different finalized checkpoints (because of the speed at which fork choice advances during sync). In theory it's still possible to process updates out of order if any other fork choice runs end up completing out of order, but this should be much less common. Fixing out of order fork choice runs in general is difficult as it requires architectural changes like serialising fork choice updates through a single thread, or locking fork choice along with the head when it is mutated (https://github.com/sigp/lighthouse/pull/3175). ## Proposed Changes * Don't run per-slot fork choice during sync (if head is older than 4 slots) * Don't run state-advance fork choice during sync (if head is older than 4 slots) * Check for monotonic finalization updates in the background migrator. This is a good defensive check to have, and I'm not sure why we didn't have it before (we may have had it and wrongly removed it). --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 ++++++++-- beacon_node/beacon_chain/src/migrate.rs | 41 +++++++++++++++++-- .../beacon_chain/src/state_advance_timer.rs | 10 +++++ 3 files changed, 65 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a164460192..5d2b35727f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -123,6 +123,12 @@ const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; /// If the head block is older than this value, don't bother preparing beacon proposers. const PREPARE_PROPOSER_HISTORIC_EPOCHS: u64 = 4; +/// If the head is more than `MAX_PER_SLOT_FORK_CHOICE_DISTANCE` slots behind the wall-clock slot, DO NOT +/// run the per-slot tasks (primarily fork choice). +/// +/// This prevents unnecessary work during sync. +const MAX_PER_SLOT_FORK_CHOICE_DISTANCE: u64 = 4; + /// Reported to the user when the justified block has an invalid execution payload. pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = "Justified block has an invalid execution payload."; @@ -4412,6 +4418,18 @@ impl BeaconChain { pub fn per_slot_task(self: &Arc) { trace!(self.log, "Running beacon chain per slot tasks"); if let Some(slot) = self.slot_clock.now() { + // Always run the light-weight pruning tasks (these structures should be empty during + // sync anyway). + self.naive_aggregation_pool.write().prune(slot); + self.block_times_cache.write().prune(slot); + + // Don't run heavy-weight tasks during sync. + if self.best_slot().map_or(true, |head_slot| { + head_slot + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot + }) { + return; + } + // Run fork choice and signal to any waiting task that it has completed. if let Err(e) = self.fork_choice() { error!( @@ -4434,9 +4452,6 @@ impl BeaconChain { ); } } - - self.naive_aggregation_pool.write().prune(slot); - self.block_times_cache.write().prune(slot); } } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 2c2ce0aa1a..1c0d9c4ed3 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -55,7 +55,13 @@ pub enum PruningOutcome { Successful { old_finalized_checkpoint: Checkpoint, }, - DeferredConcurrentMutation, + /// The run was aborted because the new finalized checkpoint is older than the previous one. + OutOfOrderFinalization { + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + }, + /// The run was aborted due to a concurrent mutation of the head tracker. + DeferredConcurrentHeadTrackerMutation, } /// Logic errors that can occur during pruning, none of these should ever happen. @@ -68,6 +74,10 @@ pub enum PruningError { MissingInfoForCanonicalChain { slot: Slot, }, + FinalizedStateOutOfOrder { + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + }, UnexpectedEqualStateRoots, UnexpectedUnequalStateRoots, } @@ -223,7 +233,7 @@ impl, Cold: ItemStore> BackgroundMigrator old_finalized_checkpoint, - Ok(PruningOutcome::DeferredConcurrentMutation) => { + Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation) => { warn!( log, "Pruning deferred because of a concurrent mutation"; @@ -231,8 +241,21 @@ impl, Cold: ItemStore> BackgroundMigrator { + warn!( + log, + "Ignoring out of order finalization request"; + "old_finalized_epoch" => old_finalized_checkpoint.epoch, + "new_finalized_epoch" => new_finalized_checkpoint.epoch, + "message" => "this is expected occasionally due to a (harmless) race condition" + ); + return; + } Err(e) => { - warn!(log, "Block pruning failed"; "error" => format!("{:?}", e)); + warn!(log, "Block pruning failed"; "error" => ?e); return; } }; @@ -347,6 +370,16 @@ impl, Cold: ItemStore> BackgroundMigrator new_finalized_slot { + return Ok(PruningOutcome::OutOfOrderFinalization { + old_finalized_checkpoint, + new_finalized_checkpoint, + }); + } + debug!( log, "Starting database pruning"; @@ -523,7 +556,7 @@ impl, Cold: ItemStore> BackgroundMigrator( let next_slot = current_slot + 1; executor.spawn_blocking( move || { + // Don't run fork choice during sync. + if beacon_chain.best_slot().map_or(true, |head_slot| { + head_slot + MAX_FORK_CHOICE_DISTANCE < current_slot + }) { + return; + } + if let Err(e) = beacon_chain.fork_choice_at_slot(next_slot) { warn!( log, From f4aa17ef852d24973cda2c00245c56aee0e0dbe5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 25 May 2022 05:29:26 +0000 Subject: [PATCH 002/184] v2.3.0-rc.0 (#3218) ## Issue Addressed NA ## Proposed Changes Bump versions ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efb2c9fa3f..ad105ad48b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -337,7 +337,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.2.1" +version = "2.3.0-rc.0" dependencies = [ "beacon_chain", "clap", @@ -493,7 +493,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.2.1" +version = "2.3.0-rc.0" dependencies = [ "beacon_node", "clap", @@ -2889,7 +2889,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.2.1" +version = "2.3.0-rc.0" dependencies = [ "account_utils", "bls", @@ -3382,7 +3382,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.2.1" +version = "2.3.0-rc.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 986ff7a615..b25a971cfd 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.2.1" +version = "2.3.0-rc.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index b50079f195..de99d6fd02 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.2.1-", - fallback = "Lighthouse/v2.2.1" + prefix = "Lighthouse/v2.3.0-rc.0-", + fallback = "Lighthouse/v2.3.0-rc.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 230eec8da1..0bf5ee2ceb 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.2.1" +version = "2.3.0-rc.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 83fd19c2bc..f558134a26 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.2.1" +version = "2.3.0-rc.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From fd55373b88486c7a5601bc53da5f852e75f83191 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 26 May 2022 02:05:16 +0000 Subject: [PATCH 003/184] Add new VC metrics for beacon node availability (#3193) ## Issue Addressed #3154 ## Proposed Changes Add three new metrics for the VC: 1. `vc_beacon_nodes_synced_count` 2. `vc_beacon_nodes_available_count` 3. `vc_beacon_nodes_total_count` Their values mirror the values present in the following log line: ``` Apr 08 17:25:17.000 INFO Connected to beacon node(s) synced: 4, available: 4, total: 4, service: notifier ``` --- validator_client/src/http_metrics/metrics.rs | 16 ++++++++++++++++ validator_client/src/lib.rs | 12 +++++++++++- validator_client/src/notifier.rs | 12 ++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 29e52c3870..56c1299b3f 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -131,6 +131,22 @@ lazy_static::lazy_static! { &["endpoint"] ); + /* + * Beacon node availability metrics + */ + pub static ref AVAILABLE_BEACON_NODES_COUNT: Result = try_create_int_gauge( + "vc_beacon_nodes_available_count", + "Number of available beacon nodes", + ); + pub static ref SYNCED_BEACON_NODES_COUNT: Result = try_create_int_gauge( + "vc_beacon_nodes_synced_count", + "Number of synced beacon nodes", + ); + pub static ref TOTAL_BEACON_NODES_COUNT: Result = try_create_int_gauge( + "vc_beacon_nodes_total_count", + "Total number of beacon nodes", + ); + pub static ref ETH2_FALLBACK_CONFIGURED: Result = try_create_int_gauge( "sync_eth2_fallback_configured", "The number of configured eth2 fallbacks", diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 43f88b54f0..ce35a00351 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -306,8 +306,18 @@ impl ProductionValidatorClient { &http_metrics::metrics::ETH2_FALLBACK_CONFIGURED, num_nodes.saturating_sub(1) as i64, ); - // Initialize the number of connected, synced fallbacks to 0. + // Set the total beacon node count. + set_gauge( + &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + num_nodes as i64, + ); + + // Initialize the number of connected, synced beacon nodes to 0. set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 0); + set_gauge(&http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, 0); + // Initialize the number of connected, avaliable beacon nodes to 0. + set_gauge(&http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, 0); + let mut beacon_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new(candidates, context.eth2_config.spec.clone(), log.clone()); diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 6157027cb1..732ae68ff8 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -40,8 +40,20 @@ async fn notify( log: &Logger, ) { let num_available = duties_service.beacon_nodes.num_available().await; + set_gauge( + &http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, + num_available as i64, + ); let num_synced = duties_service.beacon_nodes.num_synced().await; + set_gauge( + &http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, + num_synced as i64, + ); let num_total = duties_service.beacon_nodes.num_total(); + set_gauge( + &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + num_total as i64, + ); if num_synced > 0 { info!( log, From a7896a58cc18042a6ed567a8ec2288c682509fd0 Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 26 May 2022 02:05:17 +0000 Subject: [PATCH 004/184] move backfill sync jobs from highest priority to lowest (#3215) ## Issue Addressed #3212 ## Proposed Changes Move chain segments coming from back-fill syncing from highest priority to lowest ## Additional Info If this does not solve the issue, next steps would be lowering the batch size for back-fill sync, and as last resort throttling the processing of these chain segments --- .../network/src/beacon_processor/mod.rs | 20 ++++++++++++++++--- beacon_node/network/src/metrics.rs | 4 ++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 3e25bd1442..76903705fb 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -869,6 +869,7 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); + let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); @@ -1110,6 +1111,9 @@ impl BeaconProcessor { // Check exits last since our validators don't get rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { self.spawn_worker(item, toolbox); + // Handle backfill sync chain segments. + } else if let Some(item) = backfill_chain_segment.pop() { + self.spawn_worker(item, toolbox); // This statement should always be the final else statement. } else { // Let the journal know that a worker is freed and there's nothing else @@ -1195,9 +1199,15 @@ impl BeaconProcessor { sync_contribution_queue.push(work) } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), - Work::ChainSegment { .. } => { - chain_segment_queue.push(work, work_id, &self.log) - } + Work::ChainSegment { ref process_id, .. } => match process_id { + ChainSegmentProcessId::RangeBatchId { .. } + | ChainSegmentProcessId::ParentLookup { .. } => { + chain_segment_queue.push(work, work_id, &self.log) + } + ChainSegmentProcessId::BackSyncBatchId { .. } => { + backfill_chain_segment.push(work, work_id, &self.log) + } + }, Work::Status { .. } => status_queue.push(work, work_id, &self.log), Work::BlocksByRangeRequest { .. } => { bbrange_queue.push(work, work_id, &self.log) @@ -1247,6 +1257,10 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, chain_segment_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL, + backfill_chain_segment.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_EXIT_QUEUE_TOTAL, gossip_voluntary_exit_queue.len() as i64, diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 02c491cb01..cc0165131c 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -161,6 +161,10 @@ lazy_static! { "beacon_processor_chain_segment_queue_total", "Count of chain segments from the rpc waiting to be verified." ); + pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_backfill_chain_segment_queue_total", + "Count of backfill chain segments from the rpc waiting to be verified." + ); pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( "beacon_processor_chain_segment_success_total", "Total number of chain segments successfully processed." From f675c865e2bbf78ae01c377a893a5b02abb6851d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 27 May 2022 04:29:46 +0000 Subject: [PATCH 005/184] Set Ropsten TTD to unrealistically high value (#3225) ## Issue Addressed NA ## Proposed Changes Updates Ropsten TTD as per https://github.com/eth-clients/merge-testnets/pull/11. ## Additional Info NA --- .../built_in_network_configs/ropsten/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml index 45921aec53..264eaf8230 100644 --- a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml @@ -23,7 +23,7 @@ ALTAIR_FORK_EPOCH: 500 # Merge BELLATRIX_FORK_VERSION: 0x80000071 BELLATRIX_FORK_EPOCH: 750 -TERMINAL_TOTAL_DIFFICULTY: 43531756765713534 +TERMINAL_TOTAL_DIFFICULTY: 100000000000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 From 6f732986f1a42bec9f7ef7f57a143836f585a119 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 30 May 2022 01:35:10 +0000 Subject: [PATCH 006/184] v2.3.0 (#3222) ## Issue Addressed NA ## Proposed Changes Please list or describe the changes introduced by this PR. ## Additional Info - Pending testing on our infra. **Please do not merge** --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad105ad48b..7be5546b60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -337,7 +337,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.3.0-rc.0" +version = "2.3.0" dependencies = [ "beacon_chain", "clap", @@ -493,7 +493,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.3.0-rc.0" +version = "2.3.0" dependencies = [ "beacon_node", "clap", @@ -2889,7 +2889,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.3.0-rc.0" +version = "2.3.0" dependencies = [ "account_utils", "bls", @@ -3382,7 +3382,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.3.0-rc.0" +version = "2.3.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index b25a971cfd..bc61d1756f 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.3.0-rc.0" +version = "2.3.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index de99d6fd02..97c17bd9bb 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.3.0-rc.0-", - fallback = "Lighthouse/v2.3.0-rc.0" + prefix = "Lighthouse/v2.3.0-", + fallback = "Lighthouse/v2.3.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 0bf5ee2ceb..0cd0786070 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.3.0-rc.0" +version = "2.3.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index f558134a26..de9875fd80 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.3.0-rc.0" +version = "2.3.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From af5da1244e6d55076397abc05e2ab8ba46751043 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 31 May 2022 06:09:07 +0000 Subject: [PATCH 007/184] Fix links in docs (#3219) ## Issue Addressed N/A ## Proposed Changes Fix the link for `advanced-release-candidates.md` in the lighthouse book and add it to the summary page. --- book/src/SUMMARY.md | 1 + book/src/advanced-pre-releases.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 871b2c4ba8..e2a2eb37eb 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -43,6 +43,7 @@ * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) * [Pre-Releases](./advanced-pre-releases.md) + * [Release Candidates](./advanced-release-candidates.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/advanced-pre-releases.md b/book/src/advanced-pre-releases.md index b90bd631d4..f3f4a52304 100644 --- a/book/src/advanced-pre-releases.md +++ b/book/src/advanced-pre-releases.md @@ -1,4 +1,4 @@ # Pre-Releases -Pre-releases are now referred to as [Release Candidates][./advanced-pre-releases.md]. The terms may +Pre-releases are now referred to as [Release Candidates](./advanced-release-candidates.md). The terms may be used interchangeably. From ee18f6a9f7c3692cf2ee2769eb11f3b5929a06be Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 May 2022 06:09:08 +0000 Subject: [PATCH 008/184] Add `lcli indexed-attestations` (#3221) ## Proposed Changes It's reasonably often that we want to manually convert an attestation to indexed form. This PR adds an `lcli` command for doing this, using an SSZ state and a list of JSON attestations (as extracted from a JSON block) as input. --- lcli/src/indexed_attestations.rs | 48 ++++++++++++++++++++++++++++++++ lcli/src/main.rs | 23 +++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 lcli/src/indexed_attestations.rs diff --git a/lcli/src/indexed_attestations.rs b/lcli/src/indexed_attestations.rs new file mode 100644 index 0000000000..6e3bfa51d3 --- /dev/null +++ b/lcli/src/indexed_attestations.rs @@ -0,0 +1,48 @@ +use clap::ArgMatches; +use clap_utils::parse_required; +use state_processing::common::get_indexed_attestation; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; +use types::*; + +fn read_file_bytes(filename: &Path) -> Result, String> { + let mut bytes = vec![]; + let mut file = File::open(filename) + .map_err(|e| format!("Unable to open {}: {}", filename.display(), e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename.display(), e))?; + Ok(bytes) +} + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let spec = &T::default_spec(); + + let state_file: PathBuf = parse_required(matches, "state")?; + let attestations_file: PathBuf = parse_required(matches, "attestations")?; + + let mut state = BeaconState::::from_ssz_bytes(&read_file_bytes(&state_file)?, spec) + .map_err(|e| format!("Invalid state: {:?}", e))?; + state + .build_all_committee_caches(spec) + .map_err(|e| format!("{:?}", e))?; + + let attestations: Vec> = + serde_json::from_slice(&read_file_bytes(&attestations_file)?) + .map_err(|e| format!("Invalid attestation list: {:?}", e))?; + + let indexed_attestations = attestations + .into_iter() + .map(|att| { + let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; + get_indexed_attestation(committee.committee, &att) + }) + .collect::, _>>() + .map_err(|e| format!("Error constructing indexed attestation: {:?}", e))?; + + let string_output = serde_json::to_string_pretty(&indexed_attestations) + .map_err(|e| format!("Unable to convert to JSON: {:?}", e))?; + println!("{}", string_output); + + Ok(()) +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 996bfc0ac7..0a36768d15 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -6,6 +6,7 @@ mod create_payload_header; mod deploy_deposit_contract; mod eth1_genesis; mod generate_bootnode_enr; +mod indexed_attestations; mod insecure_validators; mod interop_genesis; mod new_testnet; @@ -598,6 +599,26 @@ fn main() { .help("The number of nodes to divide the validator keys to"), ) ) + .subcommand( + SubCommand::with_name("indexed-attestations") + .about("Convert attestations to indexed form, using the committees from a state.") + .arg( + Arg::with_name("state") + .long("state") + .value_name("SSZ_STATE") + .takes_value(true) + .required(true) + .help("BeaconState to generate committees from (SSZ)"), + ) + .arg( + Arg::with_name("attestations") + .long("attestations") + .value_name("JSON_ATTESTATIONS") + .takes_value(true) + .required(true) + .help("List of Attestations to convert to indexed form (JSON)"), + ) + ) .get_matches(); let result = matches @@ -679,6 +700,8 @@ fn run( .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), ("insecure-validators", Some(matches)) => insecure_validators::run(matches) .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), + ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) + .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } From 98c8ac1a87756bc2eb601b06c72585bd869caa21 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 May 2022 06:09:10 +0000 Subject: [PATCH 009/184] Fix typo in peer state transition log (#3224) ## Issue Addressed We were logging `out_finalized_epoch` instead of `our_finalized_epoch`. I noticed this ages ago but only just got around to fixing it. ## Additional Info I also reformatted the log line to respect the line length limit (`rustfmt` won't do it because it gets confused by the `;` in slog's log macros). --- beacon_node/network/src/sync/manager.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 53480db88e..0003db6ab0 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -327,10 +327,17 @@ impl SyncManager { if let Some(was_updated) = update_sync_status { let is_connected = self.network_globals.peers.read().is_connected(peer_id); if was_updated { - debug!(self.log, "Peer transitioned sync state"; "peer_id" => %peer_id, "new_state" => rpr, - "our_head_slot" => local_sync_info.head_slot, "out_finalized_epoch" => local_sync_info.finalized_epoch, - "their_head_slot" => remote_sync_info.head_slot, "their_finalized_epoch" => remote_sync_info.finalized_epoch, - "is_connected" => is_connected); + debug!( + self.log, + "Peer transitioned sync state"; + "peer_id" => %peer_id, + "new_state" => rpr, + "our_head_slot" => local_sync_info.head_slot, + "our_finalized_epoch" => local_sync_info.finalized_epoch, + "their_head_slot" => remote_sync_info.head_slot, + "their_finalized_epoch" => remote_sync_info.finalized_epoch, + "is_connected" => is_connected + ); // A peer has transitioned its sync state. If the new state is "synced" we // inform the backfill sync that a new synced peer has joined us. From 16e49af8e10422ddde02bf3bcd57ba1d314426cd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 31 May 2022 06:09:11 +0000 Subject: [PATCH 010/184] Use genesis slot for node/syncing (#3226) ## Issue Addressed NA ## Proposed Changes Resolves this error log emitted from the VC prior to genesis: ``` WARN Unable connect to beacon node error: ServerMessage(ErrorMessage { code: 500, message: "UNHANDLED_ERROR: UnableToReadSlot", stacktraces: [] }) ``` ## Additional Info NA --- beacon_node/http_api/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5e28ac6a7b..fa3b6a9d95 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1737,9 +1737,9 @@ pub fn serve( .head_info() .map(|info| info.slot) .map_err(warp_utils::reject::beacon_chain_error)?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to read slot clock".into()) + })?; // Taking advantage of saturating subtraction on slot. let sync_distance = current_slot - head_slot; From cc4b778b1fdf42c52ffeede95987a7d4517dbac0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 May 2022 06:09:12 +0000 Subject: [PATCH 011/184] Inline `safe_arith` methods (#3229) ## Proposed Changes Speed up epoch processing by around 10% by inlining methods from the `safe_arith` crate. The Rust standard library uses `#[inline]` for the `checked_` functions that we're wrapping, so it makes sense for us to inline them too. ## Additional Info I conducted a brief statistical test on the block at slot [3858336](https://beaconcha.in/block/3858336) applied to the state at slot 3858335, which requires an epoch transition. The command used for testing was: ``` lcli transition-blocks --testnet-dir ./common/eth2_network_config/built_in_network_configs/mainnet --no-signature-verification state.ssz block.ssz output.ssz ``` The testing found that inlining reduced the epoch transition time from 398ms to 359ms, a reduction of 9.77%, which was found to be statistically significant with a two-tailed t-test (p < 0.01). Data and intermediate calculations can be found here: https://docs.google.com/spreadsheets/d/1tlf3eFjz3dcXeb9XVOn21953uYpc9RdQapPtcHGH1PY --- consensus/safe_arith/src/lib.rs | 8 ++++++ lcli/src/main.rs | 8 +++++- lcli/src/transition_blocks.rs | 50 +++++++++++++++++++++++++++++++-- 3 files changed, 62 insertions(+), 4 deletions(-) diff --git a/consensus/safe_arith/src/lib.rs b/consensus/safe_arith/src/lib.rs index ab5985a6e1..c1dbff4c7c 100644 --- a/consensus/safe_arith/src/lib.rs +++ b/consensus/safe_arith/src/lib.rs @@ -20,6 +20,7 @@ macro_rules! assign_method { #[doc = "Safe variant of `"] #[doc = $doc_op] #[doc = "`."] + #[inline] fn $name(&mut self, other: $rhs_ty) -> Result<()> { *self = self.$op(other)?; Ok(()) @@ -68,30 +69,37 @@ macro_rules! impl_safe_arith { const ZERO: Self = 0; const ONE: Self = 1; + #[inline] fn safe_add(&self, other: Self) -> Result { self.checked_add(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_sub(&self, other: Self) -> Result { self.checked_sub(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_mul(&self, other: Self) -> Result { self.checked_mul(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_div(&self, other: Self) -> Result { self.checked_div(other).ok_or(ArithError::DivisionByZero) } + #[inline] fn safe_rem(&self, other: Self) -> Result { self.checked_rem(other).ok_or(ArithError::DivisionByZero) } + #[inline] fn safe_shl(&self, other: u32) -> Result { self.checked_shl(other).ok_or(ArithError::Overflow) } + #[inline] fn safe_shr(&self, other: u32) -> Result { self.checked_shr(other).ok_or(ArithError::Overflow) } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 0a36768d15..c440f50008 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -103,7 +103,13 @@ fn main() { .required(true) .default_value("./output.ssz") .help("Path to output a SSZ file."), - ), + ) + .arg( + Arg::with_name("no-signature-verification") + .long("no-signature-verification") + .takes_value(false) + .help("Disable signature verification.") + ) ) .subcommand( SubCommand::with_name("pretty-ssz") diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index f78c6b005e..74be1e6284 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -7,6 +7,7 @@ use state_processing::{ use std::fs::File; use std::io::prelude::*; use std::path::{Path, PathBuf}; +use std::time::Instant; use types::{BeaconState, ChainSpec, EthSpec, SignedBeaconBlock}; pub fn run_transition_blocks( @@ -31,6 +32,13 @@ pub fn run_transition_blocks( .parse::() .map_err(|e| format!("Failed to parse output path: {}", e))?; + let no_signature_verification = matches.is_present("no-signature-verification"); + let signature_strategy = if no_signature_verification { + BlockSignatureStrategy::NoVerification + } else { + BlockSignatureStrategy::VerifyIndividual + }; + info!("Using {} spec", T::spec_name()); info!("Pre-state path: {:?}", pre_state_path); info!("Block path: {:?}", block_path); @@ -43,7 +51,9 @@ pub fn run_transition_blocks( let block: SignedBeaconBlock = load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; - let post_state = do_transition(pre_state, block, spec)?; + let t = Instant::now(); + let post_state = do_transition(pre_state, block, signature_strategy, spec)?; + println!("Total transition time: {}ms", t.elapsed().as_millis()); let mut output_file = File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; @@ -58,31 +68,58 @@ pub fn run_transition_blocks( fn do_transition( mut pre_state: BeaconState, block: SignedBeaconBlock, + signature_strategy: BlockSignatureStrategy, spec: &ChainSpec, ) -> Result, String> { + let t = Instant::now(); pre_state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; + println!("Build caches: {}ms", t.elapsed().as_millis()); + + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + println!("Initial tree hash: {}ms", t.elapsed().as_millis()); // Transition the parent state to the block slot. + let t = Instant::now(); for i in pre_state.slot().as_u64()..block.slot().as_u64() { per_slot_processing(&mut pre_state, None, spec) .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; } + println!("Slot processing: {}ms", t.elapsed().as_millis()); + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + println!("Pre-block tree hash: {}ms", t.elapsed().as_millis()); + + let t = Instant::now(); pre_state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; + println!("Build all caches (again): {}ms", t.elapsed().as_millis()); + let t = Instant::now(); per_block_processing( &mut pre_state, &block, None, - BlockSignatureStrategy::VerifyIndividual, + signature_strategy, VerifyBlockRoot::True, spec, ) .map_err(|e| format!("State transition failed: {:?}", e))?; + println!("Process block: {}ms", t.elapsed().as_millis()); + + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + println!("Post-block tree hash: {}ms", t.elapsed().as_millis()); Ok(pre_state) } @@ -97,5 +134,12 @@ pub fn load_from_ssz_with( let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?; - decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e)) + let t = Instant::now(); + let result = decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e)); + println!( + "SSZ decoding {}: {}ms", + path.display(), + t.elapsed().as_millis() + ); + result } From 8e1305a3d29130a6ccbee8a767640d475b0d8e28 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 31 May 2022 06:09:12 +0000 Subject: [PATCH 012/184] Use a stable tag for ubuntu in dockerfile (#3231) ## Issue Addressed N/A ## Proposed Changes Use stable version of ubuntu base image in dockerfile instead of using latest. This will help in narrowing down issues with docker images. --- Dockerfile | 2 +- Dockerfile.cross | 2 +- lcli/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 76347e9bfe..aa2853ce4f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ARG FEATURES ENV FEATURES $FEATURES RUN cd lighthouse && make -FROM ubuntu:latest +FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/Dockerfile.cross b/Dockerfile.cross index c8bd868878..e210c5bdfc 100644 --- a/Dockerfile.cross +++ b/Dockerfile.cross @@ -1,7 +1,7 @@ # This image is meant to enable cross-architecture builds. # It assumes the lighthouse binary has already been # compiled for `$TARGETPLATFORM` and moved to `./bin`. -FROM --platform=$TARGETPLATFORM ubuntu:latest +FROM --platform=$TARGETPLATFORM ubuntu:22.04 RUN apt-get update && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 27ec8cc86c..255f96eec1 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -8,6 +8,6 @@ ARG PORTABLE ENV PORTABLE $PORTABLE RUN cd lighthouse && make install-lcli -FROM ubuntu:latest +FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli From 55ac423872e5087002887581943260ac4051e32f Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 3 Jun 2022 03:22:54 +0000 Subject: [PATCH 013/184] Emit log when fee recipient values are inconsistent (#3202) ## Issue Addressed #3156 ## Proposed Changes Emit a `WARN` log whenever the value of `fee_recipient` as returned from the EE is different from the value of `suggested_fee_recipient` as set on the BN, for example by the `--suggested-fee-recipient` CLI flag. ## Additional Info I have set the log level to `WARN` since it is legal behaviour (meaning it isn't really an error but is important to know when it is occurring). If we feel like this behaviour is almost always undesired (caused by a misconfiguration or malicious EE) then an `ERRO` log would be more appropriate. Happy to change it in that case. --- beacon_node/execution_layer/src/lib.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5aa4edd74a..d6acd5fe54 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -17,7 +17,7 @@ use payload_status::process_multiple_payload_statuses; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; use std::convert::TryInto; @@ -531,6 +531,23 @@ impl ExecutionLayer { if let Some(preparation_data_entry) = self.proposer_preparation_data().await.get(&proposer_index) { + if let Some(suggested_fee_recipient) = self.inner.suggested_fee_recipient { + if preparation_data_entry.preparation_data.fee_recipient != suggested_fee_recipient + { + warn!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "proposer_index" => ?proposer_index, + "fee_recipient" => ?preparation_data_entry.preparation_data.fee_recipient, + "suggested_fee_recipient" => ?suggested_fee_recipient, + ) + } + } // The values provided via the API have first priority. preparation_data_entry.preparation_data.fee_recipient } else if let Some(address) = self.inner.suggested_fee_recipient { From 20071975c7539a76d4abc509f64334d1604019db Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 3 Jun 2022 03:22:55 +0000 Subject: [PATCH 014/184] Switch Nethermind integration tests to use `master` branch (#3228) ## Issue Addressed N/A ## Proposed Changes Preemptively switch Nethermind integration tests to use the `master` branch along with the baked in `kiln` config. ## Additional Info There have been some spurious timeouts across CI so this also increases the timeout to 20s. --- .../src/genesis_json.rs | 74 ------------------- .../src/nethermind.rs | 21 ++---- .../src/test_rig.rs | 2 +- 3 files changed, 7 insertions(+), 90 deletions(-) diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index c0b94e22e8..87fdaec14a 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -40,77 +40,3 @@ pub fn geth_genesis_json() -> Value { "baseFeePerGas":"0x7" }) } - -/// Sourced from: -/// -/// https://github.com/NethermindEth/nethermind/blob/kiln/src/Nethermind/Chains/themerge_kiln_testvectors.json -pub fn nethermind_genesis_json() -> Value { - json!({ - "name": "TheMerge_Devnet", - "engine": { - "clique": { - "params": { - "period": 5, - "epoch": 30000 - } - } - }, - "params": { - "gasLimitBoundDivisor": "0x400", - "accountStartNonce": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID": 1, - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip158Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip140Transition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip145Transition": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip1283Transition": "0x0", - "eip1283DisableTransition": "0x0", - "eip152Transition": "0x0", - "eip1108Transition": "0x0", - "eip1344Transition": "0x0", - "eip1884Transition": "0x0", - "eip2028Transition": "0x0", - "eip2200Transition": "0x0", - "eip2565Transition": "0x0", - "eip2929Transition": "0x0", - "eip2930Transition": "0x0", - "eip1559Transition": "0x0", - "eip3198Transition": "0x0", - "eip3529Transition": "0x0", - "eip3541Transition": "0x0" - }, - "genesis": { - "seal": { - "ethereum": { - "nonce": "0x42", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x400000000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit":"0x1C9C380", - "author": "0x0000000000000000000000000000000000000000", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas":"0x7" - }, - "accounts": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance":"0x6d6172697573766477000000" - } - } - }) -} diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 833409c69e..7fb07c9e5a 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -1,13 +1,12 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; -use crate::genesis_json::nethermind_genesis_json; +use std::env; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; -use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const NETHERMIND_BRANCH: &str = "kiln"; +const NETHERMIND_BRANCH: &str = "master"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -71,14 +70,7 @@ impl NethermindEngine { impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { - let datadir = TempDir::new().unwrap(); - - let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); - let json = nethermind_genesis_json(); - serde_json::to_writer(&mut file, &json).unwrap(); - - datadir + TempDir::new().unwrap() } fn start_client( @@ -88,15 +80,14 @@ impl GenericExecutionEngine for NethermindEngine { jwt_secret_path: PathBuf, ) -> Child { let network_port = unused_tcp_port().unwrap(); - let genesis_json_path = datadir.path().join("genesis.json"); Command::new(Self::binary_path()) .arg("--datadir") .arg(datadir.path().to_str().unwrap()) .arg("--config") - .arg("themerge_kiln_testvectors") - .arg("--Init.ChainSpecPath") - .arg(genesis_json_path.to_str().unwrap()) + .arg("kiln") + .arg("--Merge.TerminalTotalDifficulty") + .arg("0") .arg("--JsonRpc.AdditionalRpcUrls") .arg(format!("http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client|no-auth,http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", http_port, http_auth_port)) .arg("--JsonRpc.EnabledModules") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 79661354de..77993df2b7 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -9,7 +9,7 @@ use types::{ MainnetEthSpec, Slot, Uint256, }; -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. From 47d57a290b6f733c5d6917ec463f426a670278e0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 3 Jun 2022 06:05:03 +0000 Subject: [PATCH 015/184] Improve eth1 block cache sync (for Ropsten) (#3234) ## Issue Addressed Fix for the eth1 cache sync issue observed on Ropsten. ## Proposed Changes Ropsten blocks are so infrequent that they broke our algorithm for downloading eth1 blocks. We currently try to download forwards from the last block in our cache to the block with block number [`remote_highest_block - FOLLOW_DISTANCE + FOLLOW_DISTANCE / ETH1_BLOCK_TIME_TOLERANCE_FACTOR`](https://github.com/sigp/lighthouse/blob/6f732986f1a42bec9f7ef7f57a143836f585a119/beacon_node/eth1/src/service.rs#L489-L492). With the tolerance set to 4 this is insufficient because we lag by 1536 blocks, which is more like ~14 hours on Ropsten. This results in us having an incomplete eth1 cache, because we should cache all blocks between -16h and -8h. Even if we were to set the tolerance to 2 for the largest allowance, we would only look back 1024 blocks which is still more than 8 hours. For example consider this block https://ropsten.etherscan.io/block/12321390. The block from 1536 blocks earlier is 14 hours and 20 minutes before it: https://ropsten.etherscan.io/block/12319854. The block from 1024 blocks earlier is https://ropsten.etherscan.io/block/12320366, 8 hours and 48 minutes before. - This PR introduces a new CLI flag called `--eth1-cache-follow-distance` which can be used to set the distance manually. - A new dynamic catchup mechanism is added which detects when the cache is lagging the true eth1 chain and tries to download more blocks within the follow distance in order to catch up. --- beacon_node/eth1/src/service.rs | 125 ++++++++++++++++++++++++-------- beacon_node/eth1/tests/test.rs | 30 ++++---- beacon_node/src/cli.rs | 10 +++ beacon_node/src/config.rs | 6 ++ lighthouse/tests/beacon_node.rs | 19 +++++ 5 files changed, 144 insertions(+), 46 deletions(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 460f53e732..a35d574037 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -41,8 +41,16 @@ const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = 60_000; const WARNING_MSG: &str = "BLOCK PROPOSALS WILL FAIL WITHOUT VALID, SYNCED ETH1 CONNECTION"; -/// A factor used to reduce the eth1 follow distance to account for discrepancies in the block time. -const ETH1_BLOCK_TIME_TOLERANCE_FACTOR: u64 = 4; +/// Number of blocks to download if the node detects it is lagging behind due to an inaccurate +/// relationship between block-number-based follow distance and time-based follow distance. +const CATCHUP_BATCH_SIZE: u64 = 128; + +/// The absolute minimum follow distance to enforce when downloading catchup batches. +const CATCHUP_MIN_FOLLOW_DISTANCE: u64 = 64; + +/// To account for fast PoW blocks requiring more blocks in the cache than the block-based follow +/// distance would imply, we store `CACHE_FACTOR` more blocks in our cache. +const CACHE_FACTOR: u64 = 2; #[derive(Debug, PartialEq, Clone)] pub enum EndpointError { @@ -284,10 +292,18 @@ async fn get_remote_head_and_new_block_ranges( e }; let new_deposit_block_numbers = service - .relevant_new_block_numbers(remote_head_block.number, HeadType::Deposit) + .relevant_new_block_numbers( + remote_head_block.number, + Some(remote_head_block.timestamp), + HeadType::Deposit, + ) .map_err(handle_remote_not_synced)?; let new_block_cache_numbers = service - .relevant_new_block_numbers(remote_head_block.number, HeadType::BlockCache) + .relevant_new_block_numbers( + remote_head_block.number, + Some(remote_head_block.timestamp), + HeadType::BlockCache, + ) .map_err(handle_remote_not_synced)?; Ok(( remote_head_block, @@ -307,7 +323,7 @@ async fn relevant_new_block_numbers_from_endpoint( get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) .map_err(SingleEndpointError::GetBlockNumberFailed) .await?; - service.relevant_new_block_numbers(remote_highest_block, head_type) + service.relevant_new_block_numbers(remote_highest_block, None, head_type) } #[derive(Debug, PartialEq)] @@ -319,7 +335,7 @@ pub enum SingleEndpointError { RemoteNotSynced { next_required_block: u64, remote_highest_block: u64, - reduced_follow_distance: u64, + cache_follow_distance: u64, }, /// Failed to download a block from the eth1 node. BlockDownloadFailed(String), @@ -384,6 +400,11 @@ pub struct Config { /// /// Note: this should be less than or equal to the specification's `ETH1_FOLLOW_DISTANCE`. pub follow_distance: u64, + /// The follow distance to use for blocks in our cache. + /// + /// This can be set lower than the true follow distance in order to correct for poor timing + /// of eth1 blocks. + pub cache_follow_distance: Option, /// Specifies the seconds when we consider the head of a node far behind. /// This should be less than `ETH1_FOLLOW_DISTANCE * SECONDS_PER_ETH1_BLOCK`. pub node_far_behind_seconds: u64, @@ -410,20 +431,30 @@ impl Config { E::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - // Compute the number of extra blocks we store prior to the voting period start blocks. - let follow_distance_tolerance_blocks = - spec.eth1_follow_distance / ETH1_BLOCK_TIME_TOLERANCE_FACTOR; - // Ensure we can store two full windows of voting blocks. let voting_windows = eth1_blocks_per_voting_period * 2; - // Extend the cache to account for varying eth1 block times and the follow distance - // tolerance blocks. - let length = voting_windows - + (voting_windows / ETH1_BLOCK_TIME_TOLERANCE_FACTOR) - + follow_distance_tolerance_blocks; + // Extend the cache to account for the cache follow distance. + let extra_follow_distance_blocks = self + .follow_distance + .saturating_sub(self.cache_follow_distance()); - self.block_cache_truncation = Some(length as usize); + let length = voting_windows + extra_follow_distance_blocks; + + // Allow for more blocks to account for blocks being generated faster than expected. + // The cache expiry should really be timestamp based, but that would require a more + // extensive refactor. + let cache_size = CACHE_FACTOR * length; + + self.block_cache_truncation = Some(cache_size as usize); + } + + /// The distance at which the cache should follow the head. + /// + /// Defaults to 3/4 of `follow_distance` unless set manually. + pub fn cache_follow_distance(&self) -> u64 { + self.cache_follow_distance + .unwrap_or(3 * self.follow_distance / 4) } } @@ -438,6 +469,7 @@ impl Default for Config { deposit_contract_deploy_block: 1, lowest_cached_block_number: 1, follow_distance: 128, + cache_follow_distance: None, node_far_behind_seconds: 128 * 14, block_cache_truncation: Some(4_096), auto_update_interval_millis: 60_000, @@ -486,9 +518,8 @@ impl Service { /// /// This is useful since the spec declares `SECONDS_PER_ETH1_BLOCK` to be `14`, whilst it is /// actually `15` on Goerli. - pub fn reduced_follow_distance(&self) -> u64 { - let full = self.config().follow_distance; - full.saturating_sub(full / ETH1_BLOCK_TIME_TOLERANCE_FACTOR) + pub fn cache_follow_distance(&self) -> u64 { + self.config().cache_follow_distance() } /// Return byte representation of deposit and block caches. @@ -834,9 +865,10 @@ impl Service { fn relevant_new_block_numbers( &self, remote_highest_block: u64, + remote_highest_block_timestamp: Option, head_type: HeadType, ) -> Result>, SingleEndpointError> { - let follow_distance = self.reduced_follow_distance(); + let follow_distance = self.cache_follow_distance(); let next_required_block = match head_type { HeadType::Deposit => self .deposits() @@ -852,8 +884,16 @@ impl Service { .map(|n| n + 1) .unwrap_or_else(|| self.config().lowest_cached_block_number), }; + let latest_cached_block = self.latest_cached_block(); - relevant_block_range(remote_highest_block, next_required_block, follow_distance) + relevant_block_range( + remote_highest_block, + remote_highest_block_timestamp, + next_required_block, + follow_distance, + latest_cached_block.as_ref(), + &self.inner.spec, + ) } /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured @@ -1189,24 +1229,48 @@ impl Service { /// Returns an error if `next_required_block > remote_highest_block + 1` which means the remote went /// backwards. fn relevant_block_range( - remote_highest_block: u64, + remote_highest_block_number: u64, + remote_highest_block_timestamp: Option, next_required_block: u64, - reduced_follow_distance: u64, + cache_follow_distance: u64, + latest_cached_block: Option<&Eth1Block>, + spec: &ChainSpec, ) -> Result>, SingleEndpointError> { - let remote_follow_block = remote_highest_block.saturating_sub(reduced_follow_distance); + // If the latest cached block is lagging the head block by more than `cache_follow_distance` + // times the expected block time then the eth1 block time is likely quite different from what we + // assumed. + // + // In order to catch up, load batches of `CATCHUP_BATCH_SIZE` until the situation rights itself. + // Note that we need to check this condition before the regular follow distance condition + // or we will keep downloading small numbers of blocks. + if let (Some(remote_highest_block_timestamp), Some(latest_cached_block)) = + (remote_highest_block_timestamp, latest_cached_block) + { + let lagging = latest_cached_block.timestamp + + cache_follow_distance * spec.seconds_per_eth1_block + < remote_highest_block_timestamp; + let end_block = std::cmp::min( + remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), + next_required_block + CATCHUP_BATCH_SIZE, + ); + if lagging && next_required_block <= end_block { + return Ok(Some(next_required_block..=end_block)); + } + } + let remote_follow_block = remote_highest_block_number.saturating_sub(cache_follow_distance); if next_required_block <= remote_follow_block { Ok(Some(next_required_block..=remote_follow_block)) - } else if next_required_block > remote_highest_block + 1 { + } else if next_required_block > remote_highest_block_number + 1 { // If this is the case, the node must have gone "backwards" in terms of it's sync // (i.e., it's head block is lower than it was before). // - // We assume that the `reduced_follow_distance` should be sufficient to ensure this never + // We assume that the `cache_follow_distance` should be sufficient to ensure this never // happens, otherwise it is an error. Err(SingleEndpointError::RemoteNotSynced { next_required_block, - remote_highest_block, - reduced_follow_distance, + remote_highest_block: remote_highest_block_number, + cache_follow_distance, }) } else { // Return an empty range. @@ -1292,10 +1356,9 @@ mod tests { let seconds_per_voting_period = ::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - let reduce_follow_distance_blocks = - config.follow_distance / ETH1_BLOCK_TIME_TOLERANCE_FACTOR; + let cache_follow_distance_blocks = config.follow_distance - config.cache_follow_distance(); - let minimum_len = eth1_blocks_per_voting_period * 2 + reduce_follow_distance_blocks; + let minimum_len = eth1_blocks_per_voting_period * 2 + cache_follow_distance_blocks; assert!(len > minimum_len as usize); } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index bb00ebaab1..3fe3b3ca52 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -107,7 +107,7 @@ mod eth1_cache { async { let log = null_logger(); - for follow_distance in 0..2 { + for follow_distance in 0..3 { let eth1 = new_ganache_instance() .await .expect("should start eth1 environment"); @@ -116,17 +116,16 @@ mod eth1_cache { let initial_block_number = get_block_number(&web3).await; - let service = Service::new( - Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); + let config = Config { + endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: initial_block_number, + follow_distance, + ..Config::default() + }; + let cache_follow_distance = config.cache_follow_distance(); + + let service = Service::new(config, log.clone(), MainnetEthSpec::default_spec()); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -139,7 +138,7 @@ mod eth1_cache { .blocks() .read() .highest_block_number() - .map(|n| n + follow_distance) + .map(|n| n + cache_follow_distance) .expect("should have a latest block after the first round") }; @@ -168,12 +167,13 @@ mod eth1_cache { .blocks() .read() .highest_block_number() - .map(|n| n + follow_distance), + .map(|n| n + cache_follow_distance), Some(initial + blocks), - "should update {} blocks in round {} (follow {})", + "should update {} blocks in round {} (follow {} i.e. {})", blocks, round, follow_distance, + cache_follow_distance ); } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index a1347c9b02..3102018e3e 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -377,6 +377,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("1000") .takes_value(true) ) + .arg( + Arg::with_name("eth1-cache-follow-distance") + .long("eth1-cache-follow-distance") + .value_name("BLOCKS") + .help("Specifies the distance between the Eth1 chain head and the last block which \ + should be imported into the cache. Setting this value lower can help \ + compensate for irregular Proof-of-Work block times, but setting it too low \ + can make the node vulnerable to re-orgs.") + .takes_value(true) + ) .arg( Arg::with_name("slots-per-restore-point") .long("slots-per-restore-point") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b1560c7955..db765100c3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -236,6 +236,12 @@ pub fn get_config( client_config.eth1.purge_cache = true; } + if let Some(follow_distance) = + clap_utils::parse_optional(cli_args, "eth1-cache-follow-distance")? + { + client_config.eth1.cache_follow_distance = Some(follow_distance); + } + if cli_args.is_present("merge") || cli_args.is_present("execution-endpoints") { let mut el_config = execution_layer::Config::default(); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 5748bbd341..effccbbd66 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -225,6 +225,25 @@ fn eth1_purge_cache_flag() { .run_with_zero_port() .with_config(|config| assert!(config.eth1.purge_cache)); } +#[test] +fn eth1_cache_follow_distance_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.eth1.cache_follow_distance, None); + assert_eq!(config.eth1.cache_follow_distance(), 3 * 2048 / 4); + }); +} +#[test] +fn eth1_cache_follow_distance_manual() { + CommandLineTest::new() + .flag("eth1-cache-follow-distance", Some("128")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.eth1.cache_follow_distance, Some(128)); + assert_eq!(config.eth1.cache_follow_distance(), 128); + }); +} // Tests for Bellatrix flags. #[test] From 3d51f24717d63bfd9d03f724db3633b2ee849d63 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 4 Jun 2022 21:24:39 +0000 Subject: [PATCH 016/184] Update Ropsten TTD (#3240) ## Issue Addressed NA ## Proposed Changes Updates the Ropsten TTD as per: https://blog.ethereum.org/2022/06/03/ropsten-merge-ttd/ ## Additional Info NA --- .../built_in_network_configs/ropsten/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml index 264eaf8230..5dad3ff759 100644 --- a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml @@ -23,7 +23,7 @@ ALTAIR_FORK_EPOCH: 500 # Merge BELLATRIX_FORK_VERSION: 0x80000071 BELLATRIX_FORK_EPOCH: 750 -TERMINAL_TOTAL_DIFFICULTY: 100000000000000000000000 +TERMINAL_TOTAL_DIFFICULTY: 50000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 From a6d2ed6119c45b83c5fea79b67c3c48f3c81c156 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 6 Jun 2022 05:51:10 +0000 Subject: [PATCH 017/184] Fix: PeerManager doesn't remove "outbound only" peers which should be pruned (#3236) ## Issue Addressed This is one step to address https://github.com/sigp/lighthouse/issues/3092 before introducing `quickcheck`. I noticed an issue while I was reading the pruning implementation `PeerManager::prune_excess_peers()`. If a peer with the following condition, **`outbound_peers_pruned` counter increases but the peer is not pushed to `peers_to_prune`**. - [outbound only](https://github.com/sigp/lighthouse/blob/1e4ac8a4b9dec645af23af811475b4e4c95c69ee/beacon_node/lighthouse_network/src/peer_manager/mod.rs#L1018) - [min_subnet_count <= MIN_SYNC_COMMITTEE_PEERS](https://github.com/sigp/lighthouse/blob/1e4ac8a4b9dec645af23af811475b4e4c95c69ee/beacon_node/lighthouse_network/src/peer_manager/mod.rs#L1047) As a result, PeerManager doesn't remove "outbound" peers which should be pruned. Note: [`subnet_to_peer`](https://github.com/sigp/lighthouse/blob/e0d673ea86ac0f6dab3ddd92b0de06ce5eacf8c0/beacon_node/lighthouse_network/src/peer_manager/mod.rs#L999) (HashMap) doesn't guarantee a particular order of iteration. So whether the test fails depend on the order of iteration. --- .../src/peer_manager/mod.rs | 192 ++++++++++++++++-- 1 file changed, 179 insertions(+), 13 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 85c0ddd950..9c8d41194c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1015,20 +1015,17 @@ impl PeerManager { let mut removed_peer_index = None; for (index, (candidate_peer, info)) in peers_on_subnet.iter().enumerate() { // Ensure we don't remove too many outbound peers - if info.is_outbound_only() { - if self.target_outbound_peers() - < connected_outbound_peer_count + if info.is_outbound_only() + && self.target_outbound_peers() + >= connected_outbound_peer_count .saturating_sub(outbound_peers_pruned) - { - outbound_peers_pruned += 1; - } else { - // Restart the main loop with the outbound peer removed from - // the list. This will lower the peers per subnet count and - // potentially a new subnet may be chosen to remove peers. This - // can occur recursively until we have no peers left to choose - // from. - continue; - } + { + // Restart the main loop with the outbound peer removed from + // the list. This will lower the peers per subnet count and + // potentially a new subnet may be chosen to remove peers. This + // can occur recursively until we have no peers left to choose + // from. + continue; } // Check the sync committee @@ -1051,6 +1048,9 @@ impl PeerManager { } } + if info.is_outbound_only() { + outbound_peers_pruned += 1; + } // This peer is suitable to be pruned removed_peer_index = Some(index); break; @@ -1885,4 +1885,170 @@ mod tests { assert!(!connected_peers.contains(&peers[1])); assert!(!connected_peers.contains(&peers[2])); } + + /// This test is for reproducing the issue: + /// https://github.com/sigp/lighthouse/pull/3236#issue-1256432659 + /// + /// Whether the issue happens depends on `subnet_to_peer` (HashMap), since HashMap doesn't + /// guarantee a particular order of iteration. So we repeat the test case to try to reproduce + /// the issue. + #[tokio::test] + async fn test_peer_manager_prune_based_on_subnet_count_repeat() { + for _ in 0..100 { + test_peer_manager_prune_based_on_subnet_count().await; + } + } + + /// Test the pruning logic to prioritize peers with the most subnets. This test specifies + /// the connection direction for the peers. + /// Either Peer 4 or 5 is expected to be removed in this test case. + /// + /// Create 8 peers. + /// Peer0 (out) : Subnet 1, Sync-committee-1 + /// Peer1 (out) : Subnet 1, Sync-committee-1 + /// Peer2 (out) : Subnet 2, Sync-committee-2 + /// Peer3 (out) : Subnet 2, Sync-committee-2 + /// Peer4 (out) : Subnet 3 + /// Peer5 (out) : Subnet 3 + /// Peer6 (in) : Subnet 4 + /// Peer7 (in) : Subnet 5 + async fn test_peer_manager_prune_based_on_subnet_count() { + let target = 7; + let mut peer_manager = build_peer_manager(target).await; + + // Create 8 peers to connect to. + let mut peers = Vec::new(); + for x in 0..8 { + let peer = PeerId::random(); + + // Have some of the peers be on a long-lived subnet + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + match x { + 0 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(1, true).unwrap(); + syncnets.set(1, true).unwrap(); + } + 1 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(1, true).unwrap(); + syncnets.set(1, true).unwrap(); + } + 2 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(2, true).unwrap(); + syncnets.set(2, true).unwrap(); + } + 3 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(2, true).unwrap(); + syncnets.set(2, true).unwrap(); + } + 4 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(3, true).unwrap(); + } + 5 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(3, true).unwrap(); + } + 6 => { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(4, true).unwrap(); + } + 7 => { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(5, true).unwrap(); + } + _ => unreachable!(), + } + + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets, + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + let long_lived_subnets = peer_manager + .network_globals + .peers + .read() + .peer_info(&peer) + .unwrap() + .long_lived_subnets(); + println!("{},{}", x, peer); + for subnet in long_lived_subnets { + println!("Subnet: {:?}", subnet); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer, subnet); + } + peers.push(peer); + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + // Tests that when we are over the target peer limit, after disconnecting an unhealthy peer, + // the number of connected peers updates and we will not remove too many peers. + assert_eq!( + peer_manager.network_globals.connected_or_dialing_peers(), + target + ); + + let connected_peers: std::collections::HashSet<_> = peer_manager + .network_globals + .peers + .read() + .connected_or_dialing_peers() + .cloned() + .collect(); + + // Either peer 4 or 5 should be removed. + // Check that we keep 6 and 7 peers, which we have few on a particular subnet. + assert!(connected_peers.contains(&peers[6])); + assert!(connected_peers.contains(&peers[7])); + } } From 493c2c037c7ae63a5943aad8c33763839c304c5c Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 6 Jun 2022 23:52:31 +0000 Subject: [PATCH 018/184] reduce reprocess queue/channel sizes (#3239) ## Issue Addressed Reduces the effect of late blocks on overall node buildup ## Proposed Changes change the capacity of the channels used to send work for reprocessing in the beacon processor, and to send back to the main processor task, to be 75% of the capacity of the channel for receiving new events ## Additional Info The issues we've seen suggest we should still evaluate node performance under stress, with late blocks being a big factor. Other changes that could help: 1. right now we have a cap for queued attestations for reprocessing that applies to the sum of aggregated and unaggregated attestations. We could consider adding a separate cap that favors aggregated ones. 2. solving #2848 --- beacon_node/network/src/beacon_processor/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 76903705fb..4aa7c76924 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -89,7 +89,7 @@ pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. -const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 16_384; +const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; /// The maximum number of queued `Attestation` objects that will be stored before we start dropping /// them. From 54cf94ea59a0026a01c95b4ccc61c07a6fe2063e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 6 Jun 2022 23:52:32 +0000 Subject: [PATCH 019/184] Fix per-slot timer in presence of clock changes (#3243) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed Fixes a timing issue that results in spurious fork choice notifier failures: ``` WARN Error signalling fork choice waiter slot: 3962270, error: ForkChoiceSignalOutOfOrder { current: Slot(3962271), latest: Slot(3962270) }, service: beacon ``` There’s a fork choice run that is scheduled to run at the start of every slot by the `timer`, which creates a 12s interval timer when the beacon node starts up. The problem is that if there’s a bit of clock drift that gets corrected via NTP (or a leap second for that matter) then these 12s intervals will cease to line up with the start of the slot. This then creates the mismatch in slot number that we see above. Lighthouse also runs fork choice 500ms before the slot begins, and these runs are what is conflicting with the start-of-slot runs. This means that the warning in current versions of Lighthouse is mostly cosmetic because fork choice is up to date with all but the most recent 500ms of attestations (which usually isn’t many). ## Proposed Changes Fix the per-slot timer so that it continually re-calculates the duration to the start of the next slot and waits for that. A side-effect of this change is that we may skip slots if the per-slot task takes >12s to run, but I think this is an unlikely scenario and an acceptable compromise. --- beacon_node/client/src/builder.rs | 7 +------ beacon_node/timer/src/lib.rs | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 59f1bebdb4..1f02ec7b3c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -485,13 +485,8 @@ where .beacon_chain .clone() .ok_or("node timer requires a beacon chain")?; - let seconds_per_slot = self - .chain_spec - .as_ref() - .ok_or("node timer requires a chain spec")? - .seconds_per_slot; - spawn_timer(context.executor, beacon_chain, seconds_per_slot) + spawn_timer(context.executor, beacon_chain) .map_err(|e| format!("Unable to start node timer: {}", e))?; Ok(self) diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 9c6bf1ca87..bf2acaf5bb 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -6,29 +6,29 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use slog::{debug, info, warn}; use slot_clock::SlotClock; use std::sync::Arc; -use std::time::Duration; -use tokio::time::{interval_at, Instant}; +use tokio::time::sleep; /// Spawns a timer service which periodically executes tasks for the beacon chain pub fn spawn_timer( executor: task_executor::TaskExecutor, beacon_chain: Arc>, - seconds_per_slot: u64, ) -> Result<(), &'static str> { let log = executor.log(); - let start_instant = Instant::now() - + beacon_chain - .slot_clock - .duration_to_next_slot() - .ok_or("slot_notifier unable to determine time to next slot")?; - - // Warning: `interval_at` panics if `seconds_per_slot` = 0. - let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot)); let per_slot_executor = executor.clone(); + let timer_future = async move { let log = per_slot_executor.log().clone(); loop { - interval.tick().await; + let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { + Some(duration) => duration, + None => { + warn!(log, "Unable to determine duration to next slot"); + return; + } + }; + + sleep(duration_to_next_slot).await; + let chain = beacon_chain.clone(); if let Some(handle) = per_slot_executor .spawn_blocking_handle(move || chain.per_slot_task(), "timer_per_slot_task") From 58e223e429e11a5610bd46be7d35fb78bbf63f36 Mon Sep 17 00:00:00 2001 From: Divma Date: Tue, 7 Jun 2022 02:35:55 +0000 Subject: [PATCH 020/184] update libp2p (#3233) ## Issue Addressed na ## Proposed Changes Updates libp2p to https://github.com/libp2p/rust-libp2p/pull/2662 ## Additional Info From comments on the relevant PRs listed, we should pay attention at peer management consistency, but I don't think anything weird will happen. This is running in prater tok and sin --- Cargo.lock | 265 ++++++++++++++-------- beacon_node/lighthouse_network/Cargo.toml | 4 +- 2 files changed, 169 insertions(+), 100 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7be5546b60..1dcc10b857 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -202,15 +202,6 @@ dependencies = [ "pin-project-lite 0.2.8", ] -[[package]] -name = "atomic" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "attohttpc" version = "0.10.1" @@ -1466,9 +1457,9 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.3.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" +checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", "proc-macro2", @@ -2599,7 +2590,7 @@ dependencies = [ "httpdate", "itoa 1.0.1", "pin-project-lite 0.2.8", - "socket2 0.4.4", + "socket2", "tokio", "tower-service", "tracing", @@ -2766,14 +2757,14 @@ dependencies = [ [[package]] name = "ipconfig" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ - "socket2 0.3.19", - "widestring", + "socket2", + "widestring 0.5.1", "winapi", - "winreg 0.6.2", + "winreg 0.7.0", ] [[package]] @@ -3003,18 +2994,17 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.43.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8570e25fa03d4385405dbeaf540ba00e3ee50942f03d84e1a8928a029f35f9" +checksum = "f3541a9b837ea166d91b6f54e9e3264ac94f0af7f7b51a78dadd52912e7bdba6" dependencies = [ - "atomic", "bytes", "futures", "futures-timer", "getrandom 0.2.6", "instant", "lazy_static", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3056,11 +3046,11 @@ dependencies = [ "multistream-select 0.10.4", "parking_lot 0.11.2", "pin-project 1.0.10", - "prost", - "prost-build", + "prost 0.9.0", + "prost-build 0.9.0", "rand 0.8.5", "ring", - "rw-stream-sink", + "rw-stream-sink 0.2.1", "sha2 0.9.9", "smallvec", "thiserror", @@ -3071,9 +3061,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9164ec41455856e8187addc870bb4fe1ea2ee28e1a9244831d449a2429b32c1a" +checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" dependencies = [ "asn1_der", "bs58", @@ -3091,11 +3081,11 @@ dependencies = [ "multistream-select 0.11.0", "parking_lot 0.12.0", "pin-project 1.0.10", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.8.5", "ring", - "rw-stream-sink", + "rw-stream-sink 0.3.0", "sha2 0.10.2", "smallvec", "thiserror", @@ -3106,22 +3096,23 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7838647d33978b77f943687412f4a39e74234c8342cbfdad14282b465b272cb4" +checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" dependencies = [ "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", + "parking_lot 0.12.0", "smallvec", "trust-dns-resolver", ] [[package]] name = "libp2p-gossipsub" -version = "0.36.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f62943fba0b0dae02b87868620c52a581c54ec9fb04b5e195cf20313fc510c3" +checksum = "c9be947d8cea8e6b469201314619395826896d2c051053c3723910ba98e68e04" dependencies = [ "asynchronous-codec", "base64", @@ -3131,12 +3122,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-swarm", "log", "prometheus-client", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3147,28 +3138,31 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.34.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f219b4d4660fe3a04bf5fe6b5970902b7c1918e25b2536be8c70efc480f88f8" +checksum = "40ad878c9b15bbc629b0c0cef57f59e8b37fa3f4f0e5ce11ff2bca42aae62e38" dependencies = [ + "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-swarm", "log", "lru", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", + "prost-codec", "smallvec", + "thiserror", ] [[package]] name = "libp2p-metrics" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29e4e5e4c5aa567fe1ee3133afe088dc2d2fd104e20c5c2c5c2649f75129677" +checksum = "adc4357140141ba9739eee71b20aa735351c0fc642635b2bffc7f57a6b5c1090" dependencies = [ - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3177,14 +3171,14 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442eb0c9fff0bf22a34f015724b4143ce01877e079ed0963c722d94c07c72160" +checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", "nohash-hasher", "parking_lot 0.12.0", @@ -3195,18 +3189,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd7e0c94051cda67123be68cf6b65211ba3dde7277be9068412de3e7ffd63ef" +checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3217,33 +3211,33 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "962c0fb0e7212fb96a69b87f2d09bcefd317935239bdc79cda900e7a8897a3fe" +checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.34.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53ab2d4eb8ef2966b10fdf859245cdd231026df76d3c6ed2cf9e418a8f688ec9" +checksum = "e8863c7e17641622969ffeab84e338481a8c75e4bce40f18f27822127e975f4b" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", "pin-project 1.0.10", "rand 0.7.3", @@ -3264,34 +3258,35 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193447aa729c85aac2376828df76d171c1a589c9e6b58fcc7f9d9a020734122c" +checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" dependencies = [ "futures", "futures-timer", "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", - "socket2 0.4.4", + "socket2", "tokio", ] [[package]] name = "libp2p-websocket" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c932834c3754501c368d1bf3d0fb458487a642b90fc25df082a3a2f3d3b32e37" +checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "log", + "parking_lot 0.12.0", "quicksink", - "rw-stream-sink", + "rw-stream-sink 0.3.0", "soketto", "url", "webpki-roots", @@ -3299,12 +3294,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be902ebd89193cd020e89e89107726a38cfc0d16d18f613f4a37d046e92c7517" +checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" dependencies = [ "futures", - "libp2p-core 0.32.0", + "libp2p-core 0.33.0", "parking_lot 0.12.0", "thiserror", "yamux", @@ -4578,9 +4573,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a896938cc6018c64f279888b8c7559d3725210d5db9a3a1ee6bc7188d51d34" +checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" dependencies = [ "dtoa", "itoa 1.0.1", @@ -4606,7 +4601,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.9.0", +] + +[[package]] +name = "prost" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +dependencies = [ + "bytes", + "prost-derive 0.10.1", ] [[package]] @@ -4622,13 +4627,48 @@ dependencies = [ "log", "multimap", "petgraph", - "prost", - "prost-types", + "prost 0.9.0", + "prost-types 0.9.0", "regex", "tempfile", "which", ] +[[package]] +name = "prost-build" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +dependencies = [ + "bytes", + "cfg-if", + "cmake", + "heck 0.4.0", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost 0.10.4", + "prost-types 0.10.1", + "regex", + "tempfile", + "which", +] + +[[package]] +name = "prost-codec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +dependencies = [ + "asynchronous-codec", + "bytes", + "prost 0.10.4", + "thiserror", + "unsigned-varint 0.7.1", +] + [[package]] name = "prost-derive" version = "0.9.0" @@ -4642,6 +4682,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.9.0" @@ -4649,7 +4702,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes", - "prost", + "prost 0.9.0", +] + +[[package]] +name = "prost-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +dependencies = [ + "bytes", + "prost 0.10.4", ] [[package]] @@ -5152,6 +5215,17 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "rw-stream-sink" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +dependencies = [ + "futures", + "pin-project 1.0.10", + "static_assertions", +] + [[package]] name = "ryu" version = "1.0.9" @@ -5779,17 +5853,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if", - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.4.4" @@ -6242,7 +6305,7 @@ dependencies = [ "parking_lot 0.12.0", "pin-project-lite 0.2.8", "signal-hook-registry", - "socket2 0.4.4", + "socket2", "tokio-macros", "winapi", ] @@ -6467,9 +6530,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.4" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" +checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" dependencies = [ "async-trait", "cfg-if", @@ -6492,9 +6555,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.4" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecae383baad9995efaa34ce8e57d12c3f305e545887472a492b838f4b5cfb77a" +checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" dependencies = [ "cfg-if", "futures-util", @@ -6502,7 +6565,7 @@ dependencies = [ "lazy_static", "log", "lru-cache", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "resolv-conf", "smallvec", "thiserror", @@ -7150,6 +7213,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" +[[package]] +name = "widestring" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" + [[package]] name = "winapi" version = "0.3.9" @@ -7189,7 +7258,7 @@ checksum = "177b1723986bcb4c606058e77f6e8614b51c7f9ad2face6f6fd63dd5c8b3cec3" dependencies = [ "field-offset", "libc", - "widestring", + "widestring 0.4.3", "winapi", ] @@ -7238,9 +7307,9 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "winreg" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ "winapi", ] diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 5ed3614de6..3ec86b3d12 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -38,11 +38,11 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.15.0" +prometheus-client = "0.16.0" unused_port = { path = "../../common/unused_port" } [dependencies.libp2p] -version = "0.43.0" +version = "0.45.0" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] From cfd26d25e039ed85b67a0b88f02ef633d64b9119 Mon Sep 17 00:00:00 2001 From: Divma Date: Tue, 7 Jun 2022 02:35:56 +0000 Subject: [PATCH 021/184] do not count sync batch attempts when peer is not at fault (#3245) ## Issue Addressed currently we count a failed attempt for a syncing chain even if the peer is not at fault. This makes us do more work if the chain fails, and heavily penalize peers, when we can simply retry. Inspired by a proposal I made to #3094 ## Proposed Changes If a batch fails but the peer is not at fault, do not count the attempt Also removes some annoying logs ## Additional Info We still get a counter on ignored attempts.. just in case --- .../network/src/sync/backfill_sync/mod.rs | 8 ++-- .../network/src/sync/range_sync/batch.rs | 48 ++++++++++++++----- .../network/src/sync/range_sync/chain.rs | 11 +++-- .../src/sync/range_sync/chain_collection.rs | 6 +-- .../network/src/sync/range_sync/mod.rs | 2 +- 5 files changed, 49 insertions(+), 26 deletions(-) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index e76c037dad..be750e25f0 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -11,7 +11,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::network_context::SyncNetworkContext; -use crate::sync::range_sync::{BatchConfig, BatchId, BatchInfo, BatchState}; +use crate::sync::range_sync::{BatchConfig, BatchId, BatchInfo, BatchProcessingResult, BatchState}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; @@ -606,7 +606,7 @@ impl BackFillSync { } }; - if let Err(e) = batch.processing_completed(true) { + if let Err(e) = batch.processing_completed(BatchProcessingResult::Success) { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; } // If the processed batch was not empty, we can validate previous unvalidated @@ -664,7 +664,9 @@ impl BackFillSync { }; debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); - match batch.processing_completed(false) { + match batch.processing_completed(BatchProcessingResult::Failed { + count_attempt: peer_action.is_some(), + }) { Err(e) => { // Batch was in the wrong state self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 614bf57dd0..aaebe022c7 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -72,6 +72,11 @@ pub struct WrongState(pub(crate) String); /// Auxiliary type alias for readability. type IsFailed = bool; +pub enum BatchProcessingResult { + Success, + Failed { count_attempt: bool }, +} + /// A segment of a chain. pub struct BatchInfo { /// Start slot of the batch. @@ -80,6 +85,8 @@ pub struct BatchInfo { end_slot: Slot, /// The `Attempts` that have been made and failed to send us this batch. failed_processing_attempts: Vec, + /// Number of processing attempts that have failed but we do not count. + other_failed_processing_attempts: u8, /// The number of download retries this batch has undergone due to a failed request. failed_download_attempts: Vec, /// State of the batch. @@ -143,6 +150,7 @@ impl BatchInfo { end_slot, failed_processing_attempts: Vec::new(), failed_download_attempts: Vec::new(), + other_failed_processing_attempts: 0, state: BatchState::AwaitingDownload, marker: std::marker::PhantomData, } @@ -348,23 +356,33 @@ impl BatchInfo { } #[must_use = "Batch may have failed"] - pub fn processing_completed(&mut self, was_sucessful: bool) -> Result { + pub fn processing_completed( + &mut self, + procesing_result: BatchProcessingResult, + ) -> Result { match self.state.poison() { BatchState::Processing(attempt) => { - self.state = if !was_sucessful { - // register the failed attempt - self.failed_processing_attempts.push(attempt); + self.state = match procesing_result { + BatchProcessingResult::Success => BatchState::AwaitingValidation(attempt), + BatchProcessingResult::Failed { count_attempt } => { + if count_attempt { + // register the failed attempt + self.failed_processing_attempts.push(attempt); - // check if the batch can be downloaded again - if self.failed_processing_attempts.len() - >= B::max_batch_processing_attempts() as usize - { - BatchState::Failed - } else { - BatchState::AwaitingDownload + // check if the batch can be downloaded again + if self.failed_processing_attempts.len() + >= B::max_batch_processing_attempts() as usize + { + BatchState::Failed + } else { + BatchState::AwaitingDownload + } + } else { + self.other_failed_processing_attempts = + self.other_failed_processing_attempts.saturating_add(1); + BatchState::AwaitingDownload + } } - } else { - BatchState::AwaitingValidation(attempt) }; Ok(self.state.is_failed()) } @@ -451,6 +469,10 @@ impl slog::KV for BatchInfo { )?; serializer.emit_usize("downloaded", self.failed_download_attempts.len())?; serializer.emit_usize("processed", self.failed_processing_attempts.len())?; + serializer.emit_u8( + "processed_no_penalty", + self.other_failed_processing_attempts, + )?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; slog::Result::Ok(()) } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 9f4142dd66..88837d0e12 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,4 +1,4 @@ -use super::batch::{BatchInfo, BatchState}; +use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use crate::beacon_processor::ChainSegmentProcessId; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; @@ -463,7 +463,7 @@ impl SyncingChain { )) })?; - batch.processing_completed(true)?; + batch.processing_completed(BatchProcessingResult::Success)?; // If the processed batch was not empty, we can validate previous unvalidated // blocks. if *was_non_empty { @@ -512,9 +512,12 @@ impl SyncingChain { batch.state(), )) })?; - debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, + debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, "peer_penalty" => ?peer_action, "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); - if batch.processing_completed(false)? { + + if batch.processing_completed(BatchProcessingResult::Failed { + count_attempt: peer_action.is_some(), + })? { // check that we have not exceeded the re-process retry counter // If a batch has exceeded the invalid batch lookup attempts limit, it means // that it is likely all peers in this chain are are sending invalid batches diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 512f7a989a..7ddfc3f70a 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -407,7 +407,6 @@ impl ChainCollection { local_info: &SyncInfo, awaiting_head_peers: &mut HashMap, ) { - debug!(self.log, "Purging chains"); let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -416,10 +415,7 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - let is = - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root); - debug!(log_ref, "Chain is outdated {}", is); - is + target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) }; // Retain only head peers that remain relevant diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index b4a27c23c7..31122d59a1 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,7 +8,7 @@ mod chain_collection; mod range; mod sync_type; -pub use batch::{BatchConfig, BatchInfo, BatchState}; +pub use batch::{BatchConfig, BatchInfo, BatchProcessingResult, BatchState}; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; pub use sync_type::RangeSyncType; From 9c429d0764ed91cf56efb8a47a35a556b54a86a4 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 9 Jun 2022 10:47:03 +0000 Subject: [PATCH 022/184] Only use authenticated endpoints during EE integration testing (#3253) ## Issue Addressed Failures in our CI integration tests for Geth. ## Proposed Changes Only connect to the authenticated execution endpoints during execution tests. This is necessary now that it is impossible to connect to the `engine` api on an unauthenticated endpoint. See https://github.com/ethereum/go-ethereum/pull/24997 ## Additional Info As these tests break semi-regularly, I have kept logs enabled to ease future debugging. I've also updated the Nethermind tests, although these weren't broken. This should future-proof us if Nethermind decides to follow suit with Geth --- .../execution_engine_integration/src/execution_engine.rs | 6 ------ testing/execution_engine_integration/src/main.rs | 2 +- testing/execution_engine_integration/src/nethermind.rs | 9 ++++++--- testing/execution_engine_integration/src/test_rig.rs | 2 +- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index dd5d03be89..7df88aa0d7 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -22,7 +22,6 @@ pub struct ExecutionEngine { engine: E, #[allow(dead_code)] datadir: TempDir, - http_port: u16, http_auth_port: u16, child: Child, } @@ -46,16 +45,11 @@ impl ExecutionEngine { Self { engine, datadir, - http_port, http_auth_port, child, } } - pub fn http_url(&self) -> SensitiveUrl { - SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() - } - pub fn http_auth_url(&self) -> SensitiveUrl { SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap() } diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index 30c8132b7c..a4ec0f9215 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -15,7 +15,7 @@ use nethermind::NethermindEngine; use test_rig::TestRig; /// Set to `false` to send logs to the console during tests. Logs are useful when debugging. -const SUPPRESS_LOGS: bool = true; +const SUPPRESS_LOGS: bool = false; fn main() { if cfg!(windows) { diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 7fb07c9e5a..be638fe042 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -75,7 +75,7 @@ impl GenericExecutionEngine for NethermindEngine { fn start_client( datadir: &TempDir, - http_port: u16, + _http_port: u16, http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { @@ -89,11 +89,14 @@ impl GenericExecutionEngine for NethermindEngine { .arg("--Merge.TerminalTotalDifficulty") .arg("0") .arg("--JsonRpc.AdditionalRpcUrls") - .arg(format!("http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client|no-auth,http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", http_port, http_auth_port)) + .arg(format!( + "http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", + http_auth_port + )) .arg("--JsonRpc.EnabledModules") .arg("net,eth,subscribe,web3,admin,engine") .arg("--JsonRpc.Port") - .arg(http_port.to_string()) + .arg(http_auth_port.to_string()) .arg("--Network.DiscoveryPort") .arg(network_port.to_string()) .arg("--Network.P2PPort") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 77993df2b7..21162fea56 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -68,7 +68,7 @@ impl TestRig { let ee_b = { let execution_engine = ExecutionEngine::new(generic_engine); - let urls = vec![execution_engine.http_url()]; + let urls = vec![execution_engine.http_auth_url()]; let config = execution_layer::Config { execution_endpoints: urls, From 56b4cd88ca0f4926101ce221d25e9b0c0054a079 Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 9 Jun 2022 23:48:51 +0000 Subject: [PATCH 023/184] minor libp2p upgrade (#3259) ## Issue Addressed Upgrades libp2p --- Cargo.lock | 13 +++++++------ beacon_node/lighthouse_network/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1dcc10b857..85323c1bea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2994,9 +2994,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.45.0" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3541a9b837ea166d91b6f54e9e3264ac94f0af7f7b51a78dadd52912e7bdba6" +checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" dependencies = [ "bytes", "futures", @@ -3138,9 +3138,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.36.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ad878c9b15bbc629b0c0cef57f59e8b37fa3f4f0e5ce11ff2bca42aae62e38" +checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" dependencies = [ "asynchronous-codec", "futures", @@ -3154,6 +3154,7 @@ dependencies = [ "prost-codec", "smallvec", "thiserror", + "void", ] [[package]] @@ -3228,9 +3229,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.36.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8863c7e17641622969ffeab84e338481a8c75e4bce40f18f27822127e975f4b" +checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" dependencies = [ "either", "fnv", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 3ec86b3d12..e7c4781e21 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -42,7 +42,7 @@ prometheus-client = "0.16.0" unused_port = { path = "../../common/unused_port" } [dependencies.libp2p] -version = "0.45.0" +version = "0.45.1" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] From 452b46a7afa58f43428db71e41747265d07486b5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 10 Jun 2022 04:29:26 +0000 Subject: [PATCH 024/184] Pin MDBX at last version with Win/Mac support (#3246) ## Issue Addressed Newer versions of MDBX have removed Windows and macOS support, so this PR pins MDBX at the last working version to prevent an accidental regression via `cargo update`. ## Additional Info This is a short-term solution, if our pinned version of MDBX turns out to be buggy we will need to consider backporting patches from upstream to our own fork. --- slasher/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 22b3408ab3..368350f11b 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -13,7 +13,8 @@ flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } -mdbx = { package = "libmdbx", version = "0.1.0" } +# MDBX is pinned at the last version with Windows and macOS support. This is only viable short-term. +mdbx = { package = "libmdbx", version = "=0.1.4" } lru = "0.7.1" parking_lot = "0.12.0" rand = "0.8.5" From 1d016a83f25c0484c58abb9242f282222592330e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 10 Jun 2022 04:29:27 +0000 Subject: [PATCH 025/184] Lint against panicky calls in async functions (#3250) ## Description Add a new lint to CI that attempts to detect calls to functions like `block_on` from async execution contexts. This lint was written from scratch exactly for this purpose, on my fork of Clippy: https://github.com/michaelsproul/rust-clippy/tree/disallow-from-async ## Additional Info - I've successfully detected the previous two issues we had with `block_on` by running the linter on the commits prior to each of these PRs: https://github.com/sigp/lighthouse/pull/3165, https://github.com/sigp/lighthouse/pull/3199. - The lint runs on CI with `continue-on-error: true` so that if it fails spuriously it won't block CI. - I think it would be good to merge this PR before https://github.com/sigp/lighthouse/pull/3244 so that we can lint the extensive executor-related changes in that PR. - I aim to upstream the lint to Clippy, at which point building a custom version of Clippy from my fork will no longer be necessary. I imagine this will take several weeks or months though, because the code is currently a bit hacky and will need some renovations to pass review. --- .github/custom/clippy.toml | 21 +++++++++++++++++++++ .github/workflows/test-suite.yml | 17 +++++++++++++++++ .gitignore | 1 + Makefile | 9 +++++++++ 4 files changed, 48 insertions(+) create mode 100644 .github/custom/clippy.toml diff --git a/.github/custom/clippy.toml b/.github/custom/clippy.toml new file mode 100644 index 0000000000..df09502307 --- /dev/null +++ b/.github/custom/clippy.toml @@ -0,0 +1,21 @@ +disallowed-from-async-methods = [ + "tokio::runtime::Handle::block_on", + "tokio::runtime::Runtime::block_on", + "tokio::task::LocalSet::block_on", + "tokio::sync::Mutex::blocking_lock", + "tokio::sync::RwLock::blocking_read", + "tokio::sync::mpsc::Receiver::blocking_recv", + "tokio::sync::mpsc::UnboundedReceiver::blocking_recv", + "tokio::sync::oneshot::Receiver::blocking_recv", + "tokio::sync::mpsc::Sender::blocking_send", + "tokio::sync::RwLock::blocking_write", +] +async-wrapper-methods = [ + "tokio::runtime::Handle::spawn_blocking", + "task_executor::TaskExecutor::spawn_blocking", + "task_executor::TaskExecutor::spawn_blocking_handle", + "warp_utils::task::blocking_task", + "warp_utils::task::blocking_json_task", + "validator_client::http_api::blocking_signed_json_task", + "execution_layer::test_utils::MockServer::new", +] diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index da0bcb3857..a58491d04f 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -252,6 +252,23 @@ jobs: run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock + disallowed-from-async-lint: + name: disallowed-from-async-lint + runs-on: ubuntu-latest + needs: cargo-fmt + continue-on-error: true + steps: + - uses: actions/checkout@v1 + - name: Install SigP Clippy fork + run: | + cd .. + git clone https://github.com/michaelsproul/rust-clippy.git + cd rust-clippy + git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a + cargo build --release --bin cargo-clippy --bin clippy-driver + cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin + - name: Run Clippy with the disallowed-from-async lint + run: make nightly-lint check-msrv: name: check-msrv runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 9376efc768..9830ef39be 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ perf.data* *.tar.gz /bin genesis.ssz +/clippy.toml diff --git a/Makefile b/Makefile index 01fd45a4dd..a97637bfd1 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,7 @@ AARCH64_TAG = "aarch64-unknown-linux-gnu" BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly +CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. @@ -145,6 +146,14 @@ lint: -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push +# FIXME: fails if --release is added due to broken HTTP API tests +nightly-lint: + cp .github/custom/clippy.toml . + cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests -- \ + -A clippy::all \ + -D clippy::disallowed_from_async + rm clippy.toml + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum From 11d80a6a388be4fc32f5b91e864494f87f6ddee7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 10 Jun 2022 04:29:28 +0000 Subject: [PATCH 026/184] Optimise `per_epoch_processing` low-hanging-fruit (#3254) ## Issue Addressed NA ## Proposed Changes - Uses a `Vec` in `SingleEpochParticipationCache` rather than `HashMap` to speed up processing times at the cost of memory usage. - Cache the result of `integer_sqrt` rather than recomputing for each validator. - Cache `state.previous_epoch` rather than recomputing it for each validator. ### Benchmarks Benchmarks on a recent mainnet state using #3252 to get timing. #### Without this PR ``` lcli skip-slots --state-path /tmp/state-0x3cdc.ssz --partial-state-advance --slots 32 --state-root 0x3cdc33cd02713d8d6cc33a6dbe2d3a5bf9af1d357de0d175a403496486ff845e --runs 10 [2022-06-09T08:21:02Z INFO lcli::skip_slots] Using mainnet spec [2022-06-09T08:21:02Z INFO lcli::skip_slots] Advancing 32 slots [2022-06-09T08:21:02Z INFO lcli::skip_slots] Doing 10 runs [2022-06-09T08:21:02Z INFO lcli::skip_slots] State path: "/tmp/state-0x3cdc.ssz" SSZ decoding /tmp/state-0x3cdc.ssz: 43ms [2022-06-09T08:21:03Z INFO lcli::skip_slots] Run 0: 245.718794ms [2022-06-09T08:21:03Z INFO lcli::skip_slots] Run 1: 245.364782ms [2022-06-09T08:21:03Z INFO lcli::skip_slots] Run 2: 255.866179ms [2022-06-09T08:21:04Z INFO lcli::skip_slots] Run 3: 243.838909ms [2022-06-09T08:21:04Z INFO lcli::skip_slots] Run 4: 250.431425ms [2022-06-09T08:21:04Z INFO lcli::skip_slots] Run 5: 248.68765ms [2022-06-09T08:21:04Z INFO lcli::skip_slots] Run 6: 262.051113ms [2022-06-09T08:21:05Z INFO lcli::skip_slots] Run 7: 264.293967ms [2022-06-09T08:21:05Z INFO lcli::skip_slots] Run 8: 293.202007ms [2022-06-09T08:21:05Z INFO lcli::skip_slots] Run 9: 264.552017ms ``` #### With this PR: ``` lcli skip-slots --state-path /tmp/state-0x3cdc.ssz --partial-state-advance --slots 32 --state-root 0x3cdc33cd02713d8d6cc33a6dbe2d3a5bf9af1d357de0d175a403496486ff845e --runs 10 [2022-06-09T08:57:59Z INFO lcli::skip_slots] Run 0: 73.898678ms [2022-06-09T08:57:59Z INFO lcli::skip_slots] Run 1: 75.536978ms [2022-06-09T08:57:59Z INFO lcli::skip_slots] Run 2: 75.176104ms [2022-06-09T08:57:59Z INFO lcli::skip_slots] Run 3: 76.460828ms [2022-06-09T08:57:59Z INFO lcli::skip_slots] Run 4: 75.904195ms [2022-06-09T08:58:00Z INFO lcli::skip_slots] Run 5: 75.53077ms [2022-06-09T08:58:00Z INFO lcli::skip_slots] Run 6: 74.745572ms [2022-06-09T08:58:00Z INFO lcli::skip_slots] Run 7: 75.823489ms [2022-06-09T08:58:00Z INFO lcli::skip_slots] Run 8: 74.892055ms [2022-06-09T08:58:00Z INFO lcli::skip_slots] Run 9: 76.333569ms ``` ## Additional Info NA --- beacon_node/operation_pool/src/attestation.rs | 4 +- .../state_processing/src/common/altair.rs | 26 +++++++-- .../altair/sync_committee.rs | 5 +- .../process_operations.rs | 8 +-- .../altair/inactivity_updates.rs | 3 +- .../altair/participation_cache.rs | 53 ++++++++++--------- .../altair/rewards_and_penalties.rs | 10 ++-- .../base/rewards_and_penalties.rs | 3 +- .../epoch_processing_summary.rs | 8 ++- consensus/types/src/beacon_state.rs | 16 ++++-- 10 files changed, 90 insertions(+), 46 deletions(-) diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 11537e6ec3..2f7fba4540 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -90,6 +90,8 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let att_participation_flags = get_attestation_participation_flag_indices(state, &att.data, inclusion_delay, spec) .ok()?; + let base_reward_per_increment = + altair::BaseRewardPerIncrement::new(total_active_balance, spec).ok()?; let fresh_validators_rewards = attesting_indices .iter() @@ -98,7 +100,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let participation = participation_list.get(index)?; let base_reward = - altair::get_base_reward(state, index, total_active_balance, spec).ok()?; + altair::get_base_reward(state, index, base_reward_per_increment, spec).ok()?; for (flag_index, weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { if att_participation_flags.contains(&flag_index) diff --git a/consensus/state_processing/src/common/altair.rs b/consensus/state_processing/src/common/altair.rs index 6cf80bdd9e..8943ef2f40 100644 --- a/consensus/state_processing/src/common/altair.rs +++ b/consensus/state_processing/src/common/altair.rs @@ -2,27 +2,45 @@ use integer_sqrt::IntegerSquareRoot; use safe_arith::{ArithError, SafeArith}; use types::*; +/// This type exists to avoid confusing `total_active_balance` with `base_reward_per_increment`, +/// since they are used in close proximity and the same type (`u64`). +#[derive(Copy, Clone)] +pub struct BaseRewardPerIncrement(u64); + +impl BaseRewardPerIncrement { + pub fn new(total_active_balance: u64, spec: &ChainSpec) -> Result { + get_base_reward_per_increment(total_active_balance, spec).map(Self) + } + + pub fn as_u64(&self) -> u64 { + self.0 + } +} + /// Returns the base reward for some validator. /// +/// The function has a different interface to the spec since it accepts the +/// `base_reward_per_increment` without computing it each time. Avoiding the re computation has +/// shown to be a significant optimisation. +/// /// Spec v1.1.0 pub fn get_base_reward( state: &BeaconState, index: usize, - // Should be == get_total_active_balance(state, spec) - total_active_balance: u64, + base_reward_per_increment: BaseRewardPerIncrement, spec: &ChainSpec, ) -> Result { state .get_effective_balance(index)? .safe_div(spec.effective_balance_increment)? - .safe_mul(get_base_reward_per_increment(total_active_balance, spec)?) + .safe_mul(base_reward_per_increment.as_u64()) .map_err(Into::into) } /// Returns the base reward for some validator. /// /// Spec v1.1.0 -pub fn get_base_reward_per_increment( +fn get_base_reward_per_increment( total_active_balance: u64, spec: &ChainSpec, ) -> Result { diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 8358003e4b..306e86714c 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,4 +1,4 @@ -use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance}; +use crate::common::{altair::BaseRewardPerIncrement, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures}; use safe_arith::SafeArith; @@ -72,7 +72,8 @@ pub fn compute_sync_aggregate_rewards( let total_active_balance = state.get_total_active_balance()?; let total_active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; - let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? + let total_base_rewards = BaseRewardPerIncrement::new(total_active_balance, spec)? + .as_u64() .safe_mul(total_active_increments)?; let max_participant_rewards = total_base_rewards .safe_mul(SYNC_REWARD_WEIGHT)? diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 3bf22d004a..31a4ac1fb4 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -1,7 +1,8 @@ use super::*; use crate::common::{ - altair::get_base_reward, get_attestation_participation_flag_indices, increase_balance, - initiate_validator_exit, slash_validator, + altair::{get_base_reward, BaseRewardPerIncrement}, + get_attestation_participation_flag_indices, increase_balance, initiate_validator_exit, + slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; @@ -128,6 +129,7 @@ pub mod altair { // Update epoch participation flags. let total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let mut proposer_reward_numerator = 0; for index in &indexed_attestation.attesting_indices { let index = *index as usize; @@ -143,7 +145,7 @@ pub mod altair { { validator_participation.add_flag(flag_index)?; proposer_reward_numerator.safe_add_assign( - get_base_reward(state, index, total_active_balance, spec)? + get_base_reward(state, index, base_reward_per_increment, spec)? .safe_mul(weight)?, )?; } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index 038fe77044..967f642e85 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -14,6 +14,7 @@ pub fn process_inactivity_updates( participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { + let previous_epoch = state.previous_epoch(); // Score updates based on previous epoch participation, skip genesis epoch if state.current_epoch() == T::genesis_epoch() { return Ok(()); @@ -33,7 +34,7 @@ pub fn process_inactivity_updates( .safe_add_assign(spec.inactivity_score_bias)?; } // Decrease the score of all validators for forgiveness when not during a leak - if !state.is_in_inactivity_leak(spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec) { let inactivity_score = state.get_inactivity_score_mut(index)?; inactivity_score .safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index 503dadfc70..004726923e 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -12,7 +12,6 @@ //! to get useful summaries about the validator participation in an epoch. use safe_arith::{ArithError, SafeArith}; -use std::collections::HashMap; use types::{ consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, @@ -24,6 +23,7 @@ use types::{ #[derive(Debug, PartialEq)] pub enum Error { InvalidFlagIndex(usize), + InvalidValidatorIndex(usize), } /// A balance which will never be below the specified `minimum`. @@ -64,7 +64,7 @@ struct SingleEpochParticipationCache { /// It would be ideal to maintain a reference to the `BeaconState` here rather than copying the /// `ParticipationFlags`, however that would cause us to run into mutable reference limitations /// upstream. - unslashed_participating_indices: HashMap, + unslashed_participating_indices: Vec>, /// Stores the sum of the balances for all validators in `self.unslashed_participating_indices` /// for all flags in `NUM_FLAG_INDICES`. /// @@ -76,11 +76,12 @@ struct SingleEpochParticipationCache { } impl SingleEpochParticipationCache { - fn new(hashmap_len: usize, spec: &ChainSpec) -> Self { + fn new(state: &BeaconState, spec: &ChainSpec) -> Self { + let num_validators = state.validators().len(); let zero_balance = Balance::zero(spec.effective_balance_increment); Self { - unslashed_participating_indices: HashMap::with_capacity(hashmap_len), + unslashed_participating_indices: vec![None; num_validators], total_flag_balances: [zero_balance; NUM_FLAG_INDICES], total_active_balance: zero_balance, } @@ -100,7 +101,11 @@ impl SingleEpochParticipationCache { /// /// May return an error if `flag_index` is out-of-bounds. fn has_flag(&self, val_index: usize, flag_index: usize) -> Result { - if let Some(participation_flags) = self.unslashed_participating_indices.get(&val_index) { + let participation_flags = self + .unslashed_participating_indices + .get(val_index) + .ok_or(Error::InvalidValidatorIndex(val_index))?; + if let Some(participation_flags) = participation_flags { participation_flags .has_flag(flag_index) .map_err(|_| Error::InvalidFlagIndex(flag_index)) @@ -121,13 +126,14 @@ impl SingleEpochParticipationCache { &mut self, val_index: usize, state: &BeaconState, + current_epoch: Epoch, relative_epoch: RelativeEpoch, ) -> Result<(), BeaconStateError> { let val_balance = state.get_effective_balance(val_index)?; let validator = state.get_validator(val_index)?; // Sanity check to ensure the validator is active. - let epoch = relative_epoch.into_epoch(state.current_epoch()); + let epoch = relative_epoch.into_epoch(current_epoch); if !validator.is_active_at(epoch) { return Err(BeaconStateError::ValidatorIsInactive { val_index }); } @@ -149,8 +155,10 @@ impl SingleEpochParticipationCache { } // Add their `ParticipationFlags` to the map. - self.unslashed_participating_indices - .insert(val_index, *epoch_participation); + *self + .unslashed_participating_indices + .get_mut(val_index) + .ok_or(BeaconStateError::UnknownValidator(val_index))? = Some(*epoch_participation); // Iterate through all the flags and increment the total flag balances for whichever flags // are set for `val_index`. @@ -190,19 +198,10 @@ impl ParticipationCache { let current_epoch = state.current_epoch(); let previous_epoch = state.previous_epoch(); - let num_previous_epoch_active_vals = state - .get_cached_active_validator_indices(RelativeEpoch::Previous)? - .len(); - let num_current_epoch_active_vals = state - .get_cached_active_validator_indices(RelativeEpoch::Current)? - .len(); - // Both the current/previous epoch participations are set to a capacity that is slightly // larger than required. The difference will be due slashed-but-active validators. - let mut current_epoch_participation = - SingleEpochParticipationCache::new(num_current_epoch_active_vals, spec); - let mut previous_epoch_participation = - SingleEpochParticipationCache::new(num_previous_epoch_active_vals, spec); + let mut current_epoch_participation = SingleEpochParticipationCache::new(state, spec); + let mut previous_epoch_participation = SingleEpochParticipationCache::new(state, spec); // Contains the set of validators which are either: // // - Active in the previous epoch. @@ -224,6 +223,7 @@ impl ParticipationCache { current_epoch_participation.process_active_validator( val_index, state, + current_epoch, RelativeEpoch::Current, )?; } @@ -232,13 +232,14 @@ impl ParticipationCache { previous_epoch_participation.process_active_validator( val_index, state, + current_epoch, RelativeEpoch::Previous, )?; } // Note: a validator might still be "eligible" whilst returning `false` to // `Validator::is_active_at`. - if state.is_eligible_validator(val_index)? { + if state.is_eligible_validator(previous_epoch, val_index)? { eligible_indices.push(val_index) } } @@ -313,16 +314,20 @@ impl ParticipationCache { * Active/Unslashed */ - pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> bool { + /// Returns `None` for an unknown `val_index`. + pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> Option { self.previous_epoch_participation .unslashed_participating_indices - .contains_key(&val_index) + .get(val_index) + .map(|flags| flags.is_some()) } - pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> bool { + /// Returns `None` for an unknown `val_index`. + pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> Option { self.current_epoch_participation .unslashed_participating_indices - .contains_key(&val_index) + .get(val_index) + .map(|flags| flags.is_some()) } /* diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index ce102694f5..ccebbcb3a2 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -6,7 +6,10 @@ use types::consts::altair::{ }; use types::{BeaconState, ChainSpec, EthSpec}; -use crate::common::{altair::get_base_reward, decrease_balance, increase_balance}; +use crate::common::{ + altair::{get_base_reward, BaseRewardPerIncrement}, + decrease_balance, increase_balance, +}; use crate::per_epoch_processing::{Delta, Error}; /// Apply attester and proposer rewards. @@ -67,13 +70,14 @@ pub fn get_flag_index_deltas( let unslashed_participating_increments = unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; let active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; + let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; for &index in participation_cache.eligible_validator_indices() { - let base_reward = get_base_reward(state, index, total_active_balance, spec)?; + let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?; let mut delta = Delta::default(); if unslashed_participating_indices.contains(index as usize)? { - if !state.is_in_inactivity_leak(spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec) { let reward_numerator = base_reward .safe_mul(weight)? .safe_mul(unslashed_participating_increments)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 99d08a6db3..87e4261e0a 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -78,6 +78,7 @@ pub fn get_attestation_deltas( validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result, Error> { + let previous_epoch = state.previous_epoch(); let finality_delay = state .previous_epoch() .safe_sub(state.finalized_checkpoint().epoch)? @@ -92,7 +93,7 @@ pub fn get_attestation_deltas( // `get_inclusion_delay_deltas`. It's safe to do so here because any validator that is in // the unslashed indices of the matching source attestations is active, and therefore // eligible. - if !state.is_eligible_validator(index)? { + if !state.is_eligible_validator(previous_epoch, index)? { continue; } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 8148747423..5e15aa3e1b 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -101,7 +101,9 @@ impl EpochProcessingSummary { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_active_unslashed_in_current_epoch(val_index), + } => participation_cache + .is_active_unslashed_in_current_epoch(val_index) + .unwrap_or(false), } } @@ -197,7 +199,9 @@ impl EpochProcessingSummary { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_active_unslashed_in_previous_epoch(val_index), + } => participation_cache + .is_active_unslashed_in_previous_epoch(val_index) + .unwrap_or(false), } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index d182ab9ae7..3a0f7d02e8 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1602,17 +1602,23 @@ impl BeaconState { self.clone_with(CloneConfig::committee_caches_only()) } - pub fn is_eligible_validator(&self, val_index: usize) -> Result { - let previous_epoch = self.previous_epoch(); + /// Passing `previous_epoch` to this function rather than computing it internally provides + /// a tangible speed improvement in state processing. + pub fn is_eligible_validator( + &self, + previous_epoch: Epoch, + val_index: usize, + ) -> Result { self.get_validator(val_index).map(|val| { val.is_active_at(previous_epoch) || (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) }) } - pub fn is_in_inactivity_leak(&self, spec: &ChainSpec) -> bool { - (self.previous_epoch() - self.finalized_checkpoint().epoch) - > spec.min_epochs_to_inactivity_penalty + /// Passing `previous_epoch` to this function rather than computing it internally provides + /// a tangible speed improvement in state processing. + pub fn is_in_inactivity_leak(&self, previous_epoch: Epoch, spec: &ChainSpec) -> bool { + (previous_epoch - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty } /// Get the `SyncCommittee` associated with the next slot. Useful because sync committees From 3dd50bda11cefb3c17d851cbb8811610385c20aa Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 10 Jun 2022 06:58:50 +0000 Subject: [PATCH 027/184] Improve substream management (#3261) ## Issue Addressed Which issue # does this PR address? ## Proposed Changes Please list or describe the changes introduced by this PR. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. --- .../lighthouse_network/src/behaviour/mod.rs | 3 - .../src/peer_manager/mod.rs | 5 +- .../lighthouse_network/src/rpc/handler.rs | 56 +++++++++++-------- 3 files changed, 35 insertions(+), 29 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index e67bb29de3..81de3f015a 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1006,9 +1006,6 @@ where proto, error, } => { - if matches!(error, RPCError::HandlerRejected) { - // this peer's request got canceled - } // Inform the peer manager of the error. // An inbound error here means we sent an error to the peer, or the stream // timed out. diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 9c8d41194c..3575d9d34d 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -457,10 +457,7 @@ impl PeerManager { debug!(self.log, "Internal RPC Error"; "error" => %e, "peer_id" => %peer_id); return; } - RPCError::HandlerRejected => { - // Our fault. Do nothing - return; - } + RPCError::HandlerRejected => PeerAction::Fatal, RPCError::InvalidData(_) => { // Peer is not complying with the protocol. This is considered a malicious action PeerAction::Fatal diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index ac39e0cecc..9ac062adc4 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -40,6 +40,9 @@ const IO_ERROR_RETRIES: u8 = 3; /// Maximum time given to the handler to perform shutdown operations. const SHUTDOWN_TIMEOUT_SECS: u8 = 15; +/// Maximum number of simultaneous inbound substreams we keep for this peer. +const MAX_INBOUND_SUBSTREAMS: usize = 32; + /// Identifier of inbound and outbound substreams from the handler's perspective. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct SubstreamId(usize); @@ -241,7 +244,7 @@ where // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: req.protocol(), id, })); @@ -265,7 +268,7 @@ where self.dial_queue.push((id, req)); } _ => self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: req.protocol(), id, })), @@ -339,23 +342,32 @@ where // store requests that expect responses if expected_responses > 0 { - // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = InboundState::Idle(substream); - self.inbound_substreams.insert( - self.current_inbound_substream_id, - InboundInfo { - state: awaiting_stream, - pending_items: VecDeque::with_capacity(expected_responses as usize), - delay_key: Some(delay_key), - protocol: req.protocol(), - request_start_time: Instant::now(), - remaining_chunks: expected_responses, - }, - ); + if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { + // Store the stream and tag the output. + let delay_key = self.inbound_substreams_delay.insert( + self.current_inbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = InboundState::Idle(substream); + self.inbound_substreams.insert( + self.current_inbound_substream_id, + InboundInfo { + state: awaiting_stream, + pending_items: VecDeque::with_capacity(expected_responses as usize), + delay_key: Some(delay_key), + protocol: req.protocol(), + request_start_time: Instant::now(), + remaining_chunks: expected_responses, + }, + ); + } else { + self.events_out.push(Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: req.protocol(), + error: RPCError::HandlerRejected, + })); + return self.shutdown(None); + } } // If we received a goodbye, shutdown the connection. @@ -382,7 +394,7 @@ where // accept outbound connections only if the handler is not deactivated if matches!(self.state, HandlerState::Deactivated) { self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto, id, })); @@ -671,7 +683,7 @@ where { // if the request was still active, report back to cancel it self.events_out.push(Err(HandlerErr::Inbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: info.protocol, id: *id, })); @@ -803,7 +815,7 @@ where // the handler is deactivated. Close the stream entry.get_mut().state = OutboundSubstreamState::Closing(substream); self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: entry.get().proto, id: entry.get().req_id, })) From 564d7da656803f5e06e53a303972580be54500bf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 14 Jun 2022 05:25:38 +0000 Subject: [PATCH 028/184] v2.3.1 (#3262) ## Issue Addressed NA ## Proposed Changes Bump versions ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 85323c1bea..ec6a98fb0c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -328,7 +328,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.3.0" +version = "2.3.1" dependencies = [ "beacon_chain", "clap", @@ -484,7 +484,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.3.0" +version = "2.3.1" dependencies = [ "beacon_node", "clap", @@ -2880,7 +2880,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.3.0" +version = "2.3.1" dependencies = [ "account_utils", "bls", @@ -3378,7 +3378,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.3.0" +version = "2.3.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index bc61d1756f..081e91aba8 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.3.0" +version = "2.3.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 97c17bd9bb..c5a5bc57e8 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.3.0-", - fallback = "Lighthouse/v2.3.0" + prefix = "Lighthouse/v2.3.1-", + fallback = "Lighthouse/v2.3.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 0cd0786070..5dfcba8fa1 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.3.0" +version = "2.3.1" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index de9875fd80..35fee80315 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.3.0" +version = "2.3.1" authors = ["Sigma Prime "] edition = "2021" autotests = false From 7aeb9f9ecd8bea3fb2b42e0b67a269894ac34a58 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 17 Jun 2022 03:10:52 +0000 Subject: [PATCH 029/184] Add sepolia config (#3268) ## Issue Addressed N/A ## Proposed Changes Add network config for sepolia from https://github.com/eth-clients/merge-testnets/pull/14 --- common/eth2_config/src/lib.rs | 3 +- .../sepolia/boot_enr.yaml | 1 + .../sepolia/config.yaml | 76 ++++++++++++++++++ .../sepolia/deploy_block.txt | 1 + .../sepolia/genesis.ssz.zip | Bin 0 -> 136489 bytes 5 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/sepolia/config.yaml create mode 100644 common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt create mode 100644 common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index ec8522ac98..9cea725865 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -238,5 +238,6 @@ define_hardcoded_nets!( (prater, "prater", GENESIS_STATE_IS_KNOWN), (gnosis, "gnosis", GENESIS_STATE_IS_KNOWN), (kiln, "kiln", GENESIS_STATE_IS_KNOWN), - (ropsten, "ropsten", GENESIS_STATE_IS_KNOWN) + (ropsten, "ropsten", GENESIS_STATE_IS_KNOWN), + (sepolia, "sepolia", GENESIS_STATE_IS_KNOWN) ); diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml new file mode 100644 index 0000000000..abb3b1250e --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml @@ -0,0 +1 @@ +- enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml new file mode 100644 index 0000000000..95587c2908 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -0,0 +1,76 @@ +# Extends the mainnet preset +PRESET_BASE: 'mainnet' +CONFIG_NAME: 'sepolia' + +# Genesis +# --------------------------------------------------------------- +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 1300 +# Sunday, June 19, 2022 2:00:00 PM +UTC +MIN_GENESIS_TIME: 1655647200 +GENESIS_FORK_VERSION: 0x90000069 +GENESIS_DELAY: 86400 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x90000070 +ALTAIR_FORK_EPOCH: 50 + +# Merge +BELLATRIX_FORK_VERSION: 0x90000071 +BELLATRIX_FORK_EPOCH: 100 +TERMINAL_TOTAL_DIFFICULTY: 100000000000000000000000 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x03001020 +CAPELLA_FORK_EPOCH: 18446744073709551615 + +# Sharding +SHARDING_FORK_VERSION: 0x04001020 +SHARDING_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 11155111 +DEPOSIT_NETWORK_ID: 11155111 +DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt new file mode 100644 index 0000000000..5674fc3e57 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt @@ -0,0 +1 @@ +1273020 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip new file mode 100644 index 0000000000000000000000000000000000000000..1321634cea6faa40295d0ead46ee9a0479e6b529 GIT binary patch literal 136489 zcmeF%hS63f( zHFN~zjk;2k{Q6Rp|7&*l`i$`I$G9+%PtX8(H^ui+B z+uI3P24VU-4K3}qaa&4-xEbqm7V#ry@$Ki400)2-0J*=H6PfIY4J3XP4d>;M&Sy9fDvgP_A%`*B=G8PrUX0iS|!DuAl*Xg zT)s8b)`Q-qLaC6|9@42e`+9W2#8(Y}Ue7rO0M`Cua==AiG50fFC;9#h|K;Gnb@1Od z_>Y4BB>Ydp|0MiR!v7@vPs0Bs{7=IFB>Ydp|0MiR!v7@vPs0B%N%#n8ME-yIvh+!j z1jhnS0epiH3);xOe5Vl>(OEt+JCsF^xhKhX+JJ^uFMvB1iUC)^oHsAOO8pDrlcRy>p@0`z|LUJAFFyD|7VBMF=*R3GvZa;bt0g&JwEOChE zkB2oIO1B9cFMm%M3+aGyQy$;7CHbc4HTayC2J8ltle?Iyfc-i(Z@X`-`c4^h9J94a zo2)L?i4^yj*iHH`T|0FR;jyk~Sx#6y*<>9Z8n+J4{Aj3x9rHfM4ac|FLxNAc<_9da z*|!j`a^04npmU)vyzf#uY5RZ3*BWe~nTXoOXiVvTcK2E$+~nThTobigXNJyGO`fMx zX3qefyfw3NwaV!hPr`G9wO}fBEn>==O|#Gw>gQiN0xV*0?v?i}fXmGs;U&PT>cPt@ z$ed!r!Il1Cv$9)QK+|_RF=Mo1E>xmFdn~6|&0|W*zCP;%Ct6J^R(Wz~`ka1ZzGbq& zLi(wf-Q==-Ms7x2eP~BrJtRYryqg9@R_PvjvbMTeVDuVYXDAMiN7`PBQE|xI!sECP z5Auh|QpH`cJN=O%drUonS;ylN;@Y}-2E5MS<9O;odoQe;i;bak3V4eUEbZqk4|7+S z>t*Syu>-T;z5mW6is?kx1E*v3@`&q%gTB-0%Fm_dLg2-Bxy-Vl!uy3TBGMV6O{ zfPjE#9e&QfO&Rj2YQ*|ZmkBwq%xR@lLwow~pQX59ErpLA0?l`75y=4EMstx8xkQJD zyRhc&HH=@{ewXdJVI@+&S(R>S{x2E>BD@Wo4VB%rge1jc)onq8x^1_x`(n{YXHyQ% zf2o_+%b&L=*Q456XIqAX3W%=P1mLo%sgNf5wdaT7Jc{xGNY;=3zKeBv|8CQ+g*C&x)d)k)qIBM3wH0K4>dY)?Zf_|P&V7!rfU(t``fdL@ zu{x#BhW1=)K>>IqyXbW9oPZ`8`fzet*_wcxbK1YKfAkRkD9hY z{1ZiNE2ExO9N~K|x3fuAw?u@OeY*^;Rj?m=P214s=;W8R2EWyhS>o{@n=0XF$3^a~ z7}bvf+r_7<0erXD2_pi1LvUBA3isoy^VfUpseV=6lr92V4RGJYq&n}zc|`a3^$ghk z(oP9f9G1TJu$HTRx=><$&APeI!zV19m1u3;{j1*)5nm*d9IzB74F;aA)v|cDvbUyi z+u+VP!?x=Of1@Ajn_t%ChJ^v6>+2_G11N1F>xvIg=Ap)&wpsEom%UNz1;`#Wgc&sp z!7y~w8@U(9(m%ID`hNG8s|NZX%4Y50od;u8n)?l}ZjVoYV?#Fyp!%J5<_R^3_-nk` z+rN5q!*~R{Z|SvT_E6p`14=H>Wn}e^u}b9kQO%z$&-J&LN|YzCSI1LWN5iZLg!M*J z4|EQtxMaCd6RmH*+eL2F&vW>SNz9v-+93teb~$qoE@;gy3F zZ+uIB3!||t_T9hI=5~huV(l(x*+BiqmuReAAO`if3q7(EVIHY(vx9ggfKQs=E=-Hn zN2u_YpqEyVoI1t{KKOMTl0T|q#44La9vhx&A{-lD;xjmXfUHSiKwc|57LmiAS?Zf_ z@hUFOfj#J!{;&I25O!_=i2PQAcK3wYX^UZ7$lgxOSg3bT^5WA==56tq z;xl4v5x4%l03T_7CfRTC7gjib2lQ~Lc;&u|NUo;+M(=xneQtUtB%|Lh0U48;xwCrp z+ggJP4N2I}F|3^S~G!N(5!ouIiEsuz`$3A)lr6@nAL)K zmaB=yvi0V^?Y7R529{!dC3f}S1Ik493M;)C&87~V4(}Y6=e|Ok^<(5(Q}ZC<#A(@x zV&>~fJa%mYm{(odd>!jiAX;MerG#dwkoNfuOA%fNCt!Fl0B8wBfp(cy@|yF*J?NC% zR5SV=D`%grfZ=(VJ7>!^-W})HH?O>I_YpWH_v#9~=GGBQ<|IhPcZO-TE-@9s4kt&; zSH@rx&AyOIoW_@lgiCtbGv`f}^XQ+Lis@vhR&k>MOQxsoiS_h-5$*ivSYpdJeRk95 z)fM}c3epAh-kLM>KJpjNR+-5t>Z(GrFIB&ChvN74C;eoIoad^Q2^M^l9tT5D#XS_l zTh0{j&t?JV&c-emw7#`CEa7amc8ADij^I-A>$yec^(=j;_1b{{^an6Lru~eubZ=PL zK5(UT2O3a4CT~Mp=d1YjHm-7_`hWIgBnVf>eEAD8OV(cya>8O3M@=oGWSOSzA zCY6=Cd0S-O|Od z@J5fjC-#QE38^v#eHP)e3E1(hYEBxf&_u9i1{LBSL?P~UYf%#D`l{XJeEjzX7+vi_ zuRj1Gdn4_-3aCi4j6{4>xj)XbvTaDHUO-xY$pYQw%4+M9DP*(G1yTr4+9~@U<+7iO z@YuBQBr$*xS@Nq7qg%&-1nnLfiLcEguEY$~3TmX9_dzjPcwM7eBKku93KQk_>{;|Q;iVaG zT&qYA@);W8&X04zP482ks{d>k-pv4Cv5JIpfA;Y@nOMk^&7z3Ek!?z(S%^y2tX;0< zoQU5U>wb~>yR0mJ_gz4HLlzjK%q&IEcXk>M0y4~NQ!MhfO)NaSL!+5%I>8~OnBdMu zw$tXh&2l~8bgzR_?Af<0Vt5$COeN2zhF&zQ=}7H630gOl`D)MM3I`apD_YgZ|CBe9 zVy82XUVZ{c=;(#y)TM$8y>5t^_~F^MY<7f)Q;)aCa`Q7;*WSu(jmo(u3t*MZ^Km3G z?lVRO-pxf#nY9N`_^pxJ(S8n3Lnk@lCOd;+md4;&SZ=$R@-fqAa^9ovt)K?vgX{T?eK%k>iHY9$t0aU*nmDoNaGPPeA2(|3*2{=L1nxh zY=B0l^WiJp@aky5rf(QwET!2i&4k?FtWbM`>{&Ew$H?5kc53v27c z)(y(C-J9McL@c9j4kFaP@?yRc>FyV>zY_^G_hBUda&s~8o#t$9*~R@0RK)WZ9(On# z38iF7Q7~-k6otz1@Vt3!!y8P^IXq6E>&)(3cq}sw;%%KrdRBCf0izwXz1LzOd|a}d z(-9=~>)TJ7ZL4R&o!DS+(zm_+4a1`(L>rAm!>{0nSKsSsLJpd7s5Zz~IIHP3%%oXL zkm;$;cx%5IklVdeb;XlO==we1 zE5p4zc7HYY7HL>QPrY!bo4_vFQHVS?`N-thKF~Zy-r3sj^Q2Jr9#7=9ldRHzwUdm+ z^UbTKZ*9Udf;j+Zv9s8(kf6TlCLj)^cSduoy5Fi{)E}%SCGry6@;Z5OedLDf<2Ao< zaqU1rlEN{Xst_k&fpUnqdz8=c=!3?Rwq>7h6^_{%5Wr_r$|w6>!U5jkR^FNh#E_;) zRrixmo0WN=_h#Qv;Cn_ZzLhCFy?c2tuvR6Q4d_u^C;RkKsN+E`>^Fh02ux$^vC3J$ zkDO@5$EckgZ#0EeY(W@_OmnS!fz^jD%or)B zvMq)I&kat{O)x=lH+AgZEKgXKT2#I8&ROW1`w`P`rLSVWEc=e!I;~Ew$3-;Hcn*9H zN++$dA1e8Gyqz}oGrE6$9zRuXp6ibw>#t(sK3JXD2|9(O+&PLEUp~tqFB7!hJM1oR zuoy3&=e&%~>r4l%C2iW?H{MPHPi9@r#^}v8EY@GP)>`Zj6={i}CmYG1j8!FnKX^(% zX4IM>k%JkDI~N9cHb=kxW@TSKDqtdzqSxdTTi?$Rhk=9uHz&ls6zmYiSFh9R&p8T; z51^5u1fspdDuMtpP*c%$$7!?oY{6|iW|07Uz-V;DEk?9;T+KicHU|}=a0m!sFIl^b zsdyg1=@-y4o%0=gtS;saD4)z^qd+29YJHd} zpj{6I#IhKdNxzUA+FoDTwPt?UHn6$_M;02(QvASeX|IK>W^HFfF|u-(D=y0{$tD2X zrgu(;XDp@D!pge_W!cC=g6yN?VVx@gxq8cy21a_hDKPrM}XHcM4?=)ekrju~m&NfET zA&UpE_3;G9Z+B~uT|;vDx2b+btpbM9+!6O3)3xiB8nQG)xg!@leogL?WeDmmwEF=6 z%$N9;uQJ3IIJ~w(#dF%?3XE=GBa@_+3nKTg@rqOm7(`dw&vdt{WGFsfsNq`c6^%>7G18zFFhpeP=NP)*Hfjw*%}qTJKv|f423J~(@JqnD!Q=EJs+CAHNm68JpL<+Y%_)DTpnsI z`w~cMzBx|TLoJoee>>1VmMbV|-akdfb~eW+cKPw$Px?$>zv!Mm04A}JxX2%0;zD|J zKAC0U^Vm+}WH@A6@tQmCvfcs?{I|WMS%IoiS|TF9lbMfIG~1mmy#HRlNaByqI~m-( z_m2690nsDm8Ohc})T|EorEz;}a#WE|_q|dHv-GZ`;7jn;m=OP8VR9O9XHjM0NSukO zPd74mwZD~f#BKJw9PRjtG}GhlawY}uSqc?R`o7dDfq)Eu4oLJ*-N^gvZFU}oMNa0; z)keW#v!{B8przq~<)*Af=NE2^?5;`LlEp^V+YAuuNS*&G#>l5hI_!(TY3jk8cA+=MOO z?}YH?VjLG>_UdzTvW1;jg0AI#0F+#pi00)Qc_kEx$kHag&S0Q+nOt6N{XOq1PwTeW zQnZpK$3}w4oxG0iEfqq;L@yZKYBd)LgJPW7Ie&F|u3%unPw((?%s=~IzA1Pe*ZM99 zJ~#L)AD2||WOzKLnyb%oC?EUltC&uL9z`Z9GTC0cYTiP^Ihs|H$<&Df|5lk;Hj<%X!AN)iItoSm zh0E?>EE$h8{rvp}$^ZPDWQ~j>HEYMn4b*a{Q_K#*9gJUo1e`$={bx%+X*qVy%Hc^rI0N5i;)y40S z=+1N|XB`d3X|Y)0RFM1)atGJz3@Z8XJhRVt|6LC9gtOl%Qhz_ZC(?GlNc#MHBni_~ z#K#G+eMQx~ZWQ@iI{WOJK{Y3QZ9crMtth?>196v(#e`mBlj$z+>~lx%!K^jhl_qvt zp}z!U8)fRhfoPEfVs2oeIHq^PGIg9Upk1EyRgxA)g&frh5S++yJzF8z7YT6MzEBs)(M4-u}lvu@=FF_eh+tejk_;($N>G* zY>=$0qW`CPBfZ@&Rc=WJ_Im%POwVoMhv$p(*khjLPzRB`^?yAL5MbzBOAU{IW3S=< z_({^q(aXKO4Sokvmqr#uZWpnqD&%V{2Po<(0GfJ4rMq!jj*7na6l2w@#?Isyne6@L-0?lZ+BR z(|)*watzV8CAzr?QFs&7Rejz{)SzK;jA=5`iSVqihk&#lygNJ)NhF23bD^!=pl4?P znby9?X%-C9EZ63VThRxSza}Dr>fZN0qKXePWQc-J7X$4~4eJ+}# z*A26hozFrXaxgHoo~}F8m1i=$4Su+_4MB900czc{MOyMyijlRirZ<4PfJ}h*b(I+H zn`=vy;o(x=GaiNfa7q(wLXYy&@r{ksd&ztqE>OG>!h_6zj`sTfDBPxqy;#15ed1Xd z3=*D<_f`2D6sEZ*`>?5Vy6vt*AX1-avm8()=C1E%cR$vwI6FV$@`Kg?G3tV;$&UDW zd%*R>w%W21ht9zjrq1N6(E02{rAsp)k^5lNd8KL9fsbaXRPF7gbLPS(8BviuQ|`jH z$b`+L$*|r59#yq{&AA@%HVe;Fz2D8usFzvqX%c<(V-awNv2E|edEyz5br1XNY-F%u zHrF)Eg%{c+ELCpL_Z!tfYR9bW`*!k9*$(oQ!nOJOFmg}gm4m@+`LJ1o@nu?Aq@Aj) zpw_GL%@rYE_hIYde!4pSIl3Q=-FLrtu~ua7HGRYFR4EL#i`*Los1}lvJhhv9Fx~m9 z{71w^f;pwiVjJ(2ohFUV7}nc1cumUEU}ZJF&E)FNPwaCSFA|^Ajbn2S%%XTpVp!;g zfU*GbG#6J|c~vwVSw3D>cTXlFcUnAVp7XrxuuNfLlPhWYdpLeoz(oqHIPGh#6O@HX zfs~l*_9ah@txNG`_oA;RW9u%}Tg0I}$8pg1;#D+a53+2I#<44x4aA#~LXjhi`)&rO zK(-lpvhZ3ulflK%4`$G3U@+L% zuduH$82StyEGz!~3=BQO&67tu?5qCzJ?o`(elFxWW;AKglu;_&LmhW&&-7Z> zZ1-esLee0xuO%sCNHUvjH!9+rPq9@B_h|MhnM25+GJS3@$vJ(q^+CVnL!*~uD> z)0@%TwG$@Nh^eQqR`o1$N;D@}w6;+z-$!S5|Ap9kDs6f-A=$YMnn_BuZ{Q(9jVQW$ z(9zzwB56hh_w*U{tL|lKKfIBNb3p2?bnhS_c$6LO>UN)r zkmfF4I6vJ{>L{kW5Ue>k6D&R*^?Q-gijIma%Jp9V?j?E|I%} z7|0)Bmv{8Z7i!NKC5l!@6Fslq)+E zQ8xx!DGM7hthKA`A8%GbY+5Z{prVp4!VIyS$IR(cp+=8XQM}bac7+ubg&S8?@uw7N z_9&MnN33V_!Sy!EW+FuY3r+by!Xu~w4d%Oxks_09Xw`)Tdy-Qvo`C&k?WO5butY~Pwvbk;1-s-& zcLRH@yC43euSimVkXz57XAusTc0VBR%7vc6GkNEu$2l6GhXue=Jye!x%H9pW#3ryXo-Z}?r9a1f`q zSv6#ld`B6467A80ycT0D`YVFaDuroa>^9flz<9N@EikA^jC{X#yzcS$2kT0bC?mFw zFHYWsje%9`zuv#wz@78_QJaU8hfB8&-%$-bQ*YM5sXcIFG$8sDv_JBmtR+>Z{08$( z1q~^x#&UQ*>e&{1j`-xp=sUGxB7!!<2$9ug({SQ zjCr>flB2$;?!U62jmL44}PIsjJ(@=#`_%Mj||D zK;!D`Fx}bfe9q!$7f0!o>6U4S_{pQznTQy^tU2Ce?BhpPpcu6&u7?}d z1-4JN9=4EvXRtJz5g1^t!b=E3YiURv2Pl#ZEpxwQCKu1H1DVAADXk^sm2=Q9s3$d<{Rr;PIybw~68D+X7FcMG@gv9EnzN&hDBN-ZocMpqoWpY;{B`nCtf@|peTMJh-qE273~ zR^yMwj~OJ10o%JddhBDkXO*SA;x)76nWDY88P|6l;l<=&|5#{G5%7uXX1Ry(Uj zBd{xVQ!9%9`TZRX_LDNv4*Z&c?&a?+MOig!+hy6wgWf5U<3IDSCZ=6&V$net{E6WO zx$Cp5Zz}hYs?4;IyM&s$lVIhd*+Ms|Z^@EjjlV7)^LeJ=z&?3F>{qMWupDkJXM04r zM2C_by%R+s08EwroYeQiL^(G}d8A<<|OC+k4Mp5-L_1Gdaj3p|Vsf3U`@R+NQ< z5yW|?-vzf6m@gf69Uo0~;yP200oAkmfJ>3oJ-$-;4hnO00t#noCfrj`vmEaOOh>#` zI}{OVE7tod~Cpzl=D+=Ggj?}_f{>Gj6z-vq!W8}yU)_y4Zy zJJn0>R6jdW{L4=stt)i~HgKknBQ{l*zZ>jw*$IJ!emqH^gm?qoM$oe?qtsab<@|J@p1h%4U5IqsW-a)#b>kph z7{t(4Uh*Yb6i--U3Pw@drDpeQNI+JM?w=7z=|!{QC&?>lvhnw@54*W9$|3bbg-3YM z-wUQ3r~*2so?9FnPp*M>L2mWG(XUCd*vG^6#DpDJ?AV3bCOSSkOl9P}h;v8OE}v6S zm-qu3e+f>V@NUPv3H)?>8zQ_RdLs;Y-%;DXDbdI9G1@psWhQitvXqY_aumY5(wQMx z%c##C1=vS2pL42Q-ir{!cjTv_$&LFUeMFOi8h@=W5TN)qkQ?o>iv2scIs9gyYVA27 znsJWYkB93Es?-9?lIppF7VM+V*k4idckSIgN7NEB{|t4s)_ZTeSy)q7$gU}u$XR@` z46sEh5~mUxB;Y=+=bul|0Rtf91!^-szF7=*L*OzD%}dIgzGT(<9izuzqP3Cdo-C)* zRufg*sn&@>RI=sJv{2ZBIj?FG=&yY^cEh{a4!w0`rMx=}!CD1)ghq@p1YcUB%fZzP zD8{Og(nSOQZH)E7kp1!=yKS*sWN%Q@^inL<*~84#Q7F7S?IALcY`V>mk4$ZnS+77! z?E8q=#H&ev(z^f&5(VZIxAo8ZA{r=Nud`?NT)5C9QO16u8?M8@2!^6mc!F1s8ttrf zOLwx`#dC}1(^dKIIy`D6gf8T{T?Cb#Vc-7!##Xru)s3Iapd?oOAU2v2s&%X|+O+tb z<1(G1zVXQfQyeg*X902iINlhfc>vNQN)i6wOED-k>wzJixr~ZUO5pCOoqA0^^n{sU zs2K0mLNXUab&P9VPLs6-n-bhO5fIt&ocgIDb>+pX%7o?{Ync zk(t57SeDTZlyhaZlTG`aNAG6#qptiLzz0jwU6vXo&Fqi*c6Pr#`)jmCjOF*^-)$)< zUv3?TCkrx{I`giOC*lq`&6%VpL=#_;YveP2zp#`eP>Qx^OdzkhhF(RuT${_xrgJ() z*}Tr_1^&gm7x*S}+(V{=Q%E33gCWq-_rwzDSA`~pnm)4WNI!s^+=lRDDT~`qRaf5oP0nY zk1A=hng1&jM{H@%Ka({#pITZ-Nd~+7LybJ&c5f)h#uF@iX3Yp2qlKhzJ*5_Ve)^-6 zZ-*Ql-wk;-7594fhf^9}@TZKj-P9ccckN9ahj+2pqgV+nUr@zQxws7+J}ydyEhsR` z-NHJOwgOjz87pqj)rQj@Mtl%sQbbE}X~3abnn18(<0MaKzOB6!DI9B~ny$j2o?GK|KAN<|*^t^J z;Ogzy;ZrHrJgR;j&-j3_={s*CI9$R|Twe*NST_)0R1AKzm_L(wZ zAZ(ViYzX}gAal&n7K4j{&l{#l`o#sLIbtyH#vvZHF-YM0!@mPIIJNgRu-gk-Y0RHq z6X^L5X_E^g5B`did>?qjqkH)mDT(FU zs7{~SrRY{h*wLf0V|4upoe>{*{~}ek-O*`c&LaG)b~|#^!gfzCVO0ZRCv2aIzWJOi zpY174mU{~sAxVRLFQwa^?S=`PWjBR%ZO}hFnU?K@sYXy@jhb(RsRq5V@LfV-WPZ01 z>P#(bsy)61@RQ($54Ry1e?sW1vzGa};v{@&@ylAlN%C-?B?ZDsbY%VuR4b(K&1njI zrb>ztDUsT7$x@@B&6aOOe&A2Fo<6?w(OYJyv7la~0}xKR$X12K$7LLYIq|)arh67U zpX%>7_Tz;_LC&aiB0Bsa;J^;3Gx1V_FZ%&#Vlx~ardSUuVckS9ryeoo zJgaz=zKlh*D;RS7&{?=ABgq8~)d9#J2#W})z;JSYH5QQk2#2V(yU4FwQ?) zCK3w)f#4AOiF0=C4ecL+W3;Z7I4B~!b01Gm_p^<-UU@i7Hp`@&RTg=EIL_6 zEU@NAqu8!%pq3fp*p&u#qP~tbUJ#XdyPdq#F|lmp#?2iYdpd5j$g0LoPQk5V{F&ud zgW*^Qy2|Uzg?c9u!%IEI0<@yjN>Z=XoQ7J>=rH-N^kV-#p$jO$+)Xl;+eVxXe*T%F zO-^ENcVAFU8cin)tjG9FvZeTU?KLa`@F}=}@{?N$Y;>IS?8F6RX$qX*fOz%oy1heD z2^Xhs3n)bbes^6LkYpjHALe$su~*|0gZ1^;%{HNftI%8o zxR~8*C2J8w9$mhmSN`?GZ8*y}3b>WFS-;VE3545t_~WR`h5EXx=IsOu_sX@_hZXIX zg)(XUb`?UsGZ=3Rs|1eLs(LYYmJF?@mg% z?7FZrgPVgYHS*l!FzOTK=2Fij!uuTT3uMVZ^lS}mCJVo@AN=&>^zd5y4#3*^Zfwor zGD`yO6Hu5(ehsH2?bl^k)#kQ?e;skw3$Z_yn=|=P_{y|o`SQD$lMPNJAWL5#&tN;% z;u3R_e1y<|b$Vo(z#KmDh%(VQ5Kq155bAoE9(eyt;ZgMUC+yUA)0)|lHLzqY7ZMt^zRcY+MJD(q z&kWVDix&o|YkS>2fCeQq9`tY-R2qgP%v`ipB>q)IQV791j;ab6{=Km#Ofz3%;=#zc z+=0LLYP`acZD|}oe<_D-6gx@Fnj@UsN9D9q*AQypg{tA}%YCpdImAxE|NMuCb`Fiy z)bO+GL)KL~Yt~AH8?Y8M-$R7==xns~r1q1hflw^T+o&PsgsU$zu1I|1PId%3y>Ft@Rh1v+wtp64Hy$@m_EFDHPO4F4UspY{u-RNz0K>7{Iq#6Du{ivNerEhwn zEP&dfWRL{>cKP#d%`&_LMu6V-!>J%|w!yH@Avq|3=RMu1pNwQxy1k*lG-?3*=lAI( zTgZW-k3UK0``vr*1<1?`dulZoGKdfF@sxJ}WA(}pNsAjK1~(O1Td6+1Us{F4+@L%! zK|o@73!!m%*QU2jpA|fb+sf&w`$)J-ayQbr;T!kL2g38T?-DTwy*ANaY3TCX;NXG%ryfocUSG`_mtMO|D{$N!-}X1@6Yn z_dFvMhpR;d4?FKjib9=pQmUC!9q{s!4b30rs>eUwTZqUl*c2;_kL3mX@`&VfDnc{raV91H% zM~^&B25`agI~5N}p&<+~XJ7gO+l3M2Z9fH-+{Eg+o07y#PM^?G_NOc>)#Qx8N8tQ^ zriE9hwi{k0g>P8+XeKLyB?R9{Yn;W~djv}}V|NBONV0YS`0uw_AbuqI(8*^E<Khj3$e&U%C1oZHR$k$6@)nYSJBEJ&(!(d*Ktl%juCUTf7-V!Vk0?Ix9 z?Qe6eRXLc2f``#|9^g`~r!7`_TE3Uq(MmUkC#xxyG1n`ypS63yW-yxlLb%_!5V2S_S3$et7v`1}m`~UjA!e_L4IE35E6RDo^SbqzgYH)(h+*-6OR?;Z zs~h?yEySM`6Addd3PQ*#MWN}ToF&}d$Hdl;0d!ao+y(UXEq?u}nP^*>=Y?j`_!UmY z|1#A?qL?_V^aW+Mmsa5dF(f<^R|)8pyG@``M4qhpbii!b-nJX(dqzpYLy0+IHT8G> z3iI(m?FzoesJrNfF_lOr*-(*qoWWra>iy#Jshb${W)3Gzw9Xog--d_i2$ufS2vWhx zsKB=4^AX zC?niul>hUf?10PCZ435}cV*jKK2UhI2zh@cs+cBG{q??K7a>DoVoB0O@s5fhcsFq`WW9^Bi_JFi%^(*k zmWg?+eKzIli%DNXb1?;fk$%8#$sm>G$r8t5Jj)AmiV3Oh3k!}#+q>Q6nd2;x7s2jt zEey~(4<8rW?yT(FfLi8wYgcu}?W#Ty!Y?reDg;rBxgoM5E#X%}`7I24i#0>Xr3@tU zbj&OC>uh~OxRM=;{IgQW6<%HxVz*5APuW6D{;(C@b^CUGE+}t8Si=w7QVY5Etf4;o zp9gBsWpdc8io|e~L%E)#%m_1%I{SF#ow{SaLQb_WLWKS=+foe}LC;Hdf-Hn&I}=EiCCyqp&>9*n;DJ$n?}M!|D+zGiL&;L3#br-xsMN^DF&!hco_YBYLGSO1phBK)JTVvpAKg2N_D zu4HMnIs3nVuf7~sGiDxy)e&W&6zq+7W4GrZ32^2Z9(p_jSblt?f(h3V4UzC@qi^ZA z%HQg3+u*4+C5T;}5TA?IfdP2tJqum2q!d{sJ+(PnH%GNIy_@rv+Ej|HD3f_a z%0@v!7q`SeX-SNd+;mUtx}B{YCpjX7QF2Veg!KXiL-^LW%H1X0yNk-<+4BHRSJ3_aFVX+7%^&EaJ?e2 z{84YEjQ3$ceuk>fjf7(!Gv!INA`0ahfd-$<(+?FzN+}BhmT=NPS}`Ay_~GN%RiD~- z$-?zOsFAjU&kPr__O-qwjl)e)Z=6k$o}{ko{WTp~;m2x>zA$5C0D<3j`er~Sn@_lb zs0?jW8l{g`?U5MoGisIhIR3fs{D>RkK1|2@sgS6gNL5n)7#Y_#x3ehUVqWkJA>P}C zQxxqe_)GrU^48y9rVHBT`(hL9UV1``g68wLgEqJFa@k&+tO1_|_NI?s(wRX-@w$>#c)_6CSv~_2E)H~PJmJK#2fBg+kv^CJ)brng`sX1?%Nf4v+ zb_^@5^O@Rq8zSZ}A)!Wc08VDhuZ_5JZ!_O;ydFX8(%%@6^69ItZCw^*z3?+Dri1xR zQ5+!g%=H}4_a+0kb5!yAkz)V%cMdBr<}nM+|B<$Uhd82YSEqCAe%Ws&dzPB=>wmupxG?d4s3k}AxgF;m?O2+1K;H2o1$KpR;$y^0{jFnU^b*W6 zuw;y)$d0;$cV;WbU&TG}JZPt?q}=_9MsTnw+Z=NitP-K%{vtT-tM62qU%%H^5UaYp zLP?d1Cj}-QD*mXjA+CUsasX_gcUe=*kkb#6q0$N<8n z^ZQ^C)2e)|L-M}}3|94eD7kykhtqYPkN>{Ee-g@GmK13;$o$7b$A?@%CvYKh}0-LjF*u=vda9fPLoxd)TKOhju7yE_pT;fV zmEz=4G(3LiV^)u$0vXzQ2ncM1#dxIJ|7}@M8yBnii1IQFMNzR$bRQzV&u1eN$jODbA^>bE^`tT<;DM= zvZz;bQM-}G*se?d>Q~a>tq#^cb-d(JRzr9ePgJ?GA?$Y~a$iqoS|&JsxDkQ7akAHc zAr|pkdFRM(;}ZW9x~QTw=fZjMAb?pAq9DRNMls|)$>x7QPtv6k!*{A9>WY?>O&+iB52gq)Kc}4~nxLtq5OMVHhbw zY`v?ki<1Y7DxRKG)E@&pRTH}Q_PHJ-Jh(RgeM-VO0E))bs-(F~RzF*`ZEQ@TbF&rmxSvR5}A zeHTV*a`yDE0KSxN_=mDOk~PK_Zt>xN6%vL0PO_8O`AAZ~Z?d{MG`CLA<4JzHo}@^cq=ZS#+ccj^|P!KH{i-yhdFENk?m`kVb&0Xc|GLv7|c#Ue?9lihmH% z4wB-s$IL(eW7jVrvIKR&i$OJ|h4QrM&IA3|PG>l9XZ8$)H7nAsa85y{Nq4Ra_q7oiH#`wOXu$|@HOQ!Q70uH0Q>LB77bZgLV@bEX-2gLd6`XQ!wsfNSotQq1Urmsr>ErCdJ-m6C<_&@pz&#YW>tzvI zRBWh7$HzYAaHU$8QH?3xX>udHH~yCtJU4k#r~Bce?zf#CLtxH>_^$`Lc(_s zz#AyF^r=+A{BXnDXlivUEFY3fh7^le7buu7sQBp_bl{+#yFH6jit_dkL%Z>p>ZJ9A zLdheAQs%H9_ip`^g-%_8q=1K!Kh+W4D}Oj2x)A{)fxKFMtMRrX)9cl`d*0hMBFicD zz^T;xq2vy}B{lH;*v76`GGBnab($|((3DV5C*OEh)Kij)vMdoLz8D$8{`#>=>Jo?+ zbYwtw`a3xV6v`h$A{!pKVlr%96wo4LK6EF3+J+^MbZ?5dl9Bu?A85gAW%Ldtj{Os45~R@sx{O$VU7jsjz+L(l~=PuEEuY5hDM|7vn-KWoL+Pb3k^6sOB>s z=L25ZOGL_Y#Y1F@P`iDXeVv`!iFHQFqHXNoaX2;esCN8gF__uoIv?VjnnZx4fG1Qh z3e*brliQr9Fr`Sok>z5L4$$o2K?_Li>)YSyM*k%NuVrR(-8t%~+6x4WjWTv`OO+l4 zxnjtoFU;se`J}aw1h>WLlW9N6@sg|gU+`}ptl|L>;qQyDH#WE~SP_x#;$l&8er zrn@q4&tvoJY3{Mn9@vMev4!`#> zou+=qv1>r>CkNXYaf3k_Azrdy^~snKNSHUPBagA{&6FB}Aj=}BMqHuRZO70lenf$# z(6t6{or~f+E_$o1{LV7-X|)J7soR0BiDM?X%2Wg5jd++fL&)mtEqE>`o4CkCu&sCZ z-WT9xYl4_ft~4D^l`uA1aXC0V9ApkP;ecoVg@*`d1%40x53>2hw0m2$@5J1V(fayfS+dyy@dO#VwnKbC+*Vf*<~3q z(q`_tTisuR^m@5U;In!?H9=jhScYSW6Q|G+WxaY-B}zSQ^}5j*5_S@erArla9hkn5 z4ak1?SIuuTAq51<%X&T{T8I7(Oi4UJu=_2<7mV^f2o|BUU^|oa61;=0Ri7@EWbcuX zo4e_x;2F0$r$q0wrZEAj1M+pJqGd?njHBveDxIcPBE}ClM9Tww0CO}GR63-`3vm+z${^}IZin`$88qNj_{Kc~V+wh^Gm+|y7`jWfM7Jt_j z8rx|%ua3l-_}K8mGtaZ3zr4V=LfO7{YD0~GaoKjqJYw5qk?;EHkcGh-X^pkapCK-ZW11rx!g*gsuRz%Nf- z7yI>;Z#K>E!*Wk;5B%7M&=>4awnXU@xJdkk;QiH9p$L#p-2JODzZd!_v0)t^UW%1( zi?qi-H4n#ZSzE4S^570;^j<;NUy_~4Ysmo3K34|J9qauW&mE)6-k>;YI<;)2X7hTn)29_RV3Y=hy zAFf{60_SkK>>SS@t&uwI)L0mca-(qfLyY!d61x#d>g55jgt%1eNbp^_QX{C6;H!MR==2f|uo^08*;nw75?%aBG%#c)BDe2oaSz z7kBIkMNS9ma>x%ffcWlC{|T$2_e6C<;7<2K2w_->creu(DK}Nt4ff#@_$Q9dkk?vG zPe7JJzg;#6!g`g_QHWhKyJU4GdS!Bp4~`Rihs@(nT9OE0r#2^Uy99i=Fd`1EMydAH zq<+UWL^&}kox_?fHCUWO&7<{2rGr=Ywl_L<93_&1X+rx}1>Vm(X96DW^<@eAQU*_z za9D@J6nNaZchg^mO)Y+%V|?fY_!02KRp}$=QJW7*tr4IgO7o!vMC217qu7Vt6NRO~ zzv`j#@+KzN+28;Iv{LN{pZ$pt%YSJ(H%GUa?Q)Z)`}N4)3n#WkP?S<0XI||LoPg>; zQCAiuQu-cJ{X4zzfhrX_r~KwX*)N?1`9HQU#iR33Q$#NG5z^z(-pkiI(NK&B?9l_m)9H!_EnY&Y~ z6!ba&d3weSJoI6l+D*_zhzoRoL@UOpq>V^4B%r}Xqg7;jtD-iuC5I=3gVhaGRgI+N zcAp3vlLP33e@+`OS^n%g>4A1t^W-tK8zxs&$6;Z+*V(ySVgpawbn_kSicZoF)lBOg z^=J95C49)4u350~M?#k*I0lI6kz4|Uj?O=N)KgO6JZ5MEC4p^I?Z3KRPCPk3PC#8G z5UOBqKWR2zhCgV)d0c_t=3JA$9u&)NYvt`3LRd}mIq%V8bWw5&}P$i3@;5<;0G^=$qk0vsJW}gso^zpY0@5auuEyBr#-m0UtOFErsRA zXxWAjXGP@?S$3bou9fMOmGpGYHm~zu_!C5wH)3#_m;w^5jh=I6w2&?^W6Zh#y)V*K zMoi2O<-@MU-LW;s6OWSy71mspqY?N$0uiIXDW^jN_S6Fw1vl!|zbes_^2BzU zZp>c~{*Cke-k-nG6x{Phfa`U=z;!oYrP=_Xp^9^W!FZt?3 zODW!yLN6PuL=Il(`t;$2Qq5WYMIGCCz2QNOigzZLJnI_H=y8A1T~)|x(mRqT&~RC{YHX@ck)YN z1!w{>oF)0u+~ioRCDF=*z^^n6Uu+lh*(~HFp!@6f7Mw+Ez%%99hC}YN`zG(bnu%e{ zXYT11T)`Hlzq=WML9`wTB-I$_sgX#vRs`~PW`%|IxX0We66*I3LT;H-O9z18QJ&P% zae_CMK%)6p*OrgSuQ@1{-EOwXi6Q<$3qcUIfC`bPvcI(o2i06D*nYBwnhkKW2jsv01)>!HhW4=ao6S{Pp&1G;Gfb&uQF=}Ay_Wn zKnQ~$J1S{aaJJ)|8$E8V9vD(#7)kJbl*3x^p6&H)!K~5Hrs4=` zl^fri?x7`-`yH{DU~7NJe+ShLgtt2F~5FgQSzRvC_&i*C&7B|b9Yi!GvfhJs+%Dv zeP|7JP$7E}ZP*rPT|>vGP=Xni=X>?MN%I7Ln2y7>%r)+Jj+=UsNVTJJ^EKZZ?rEsi z0mCvD;%iE;zCe}5I9e#rfft1)IvYggEnqSCCTV035Nk79|ATyYKp!2&G5^M;tb)4) zIbMok3EsSK!6TR>??wp!%Jbptqj09r9FMem#0FjGD)VS^UGu8a7Kcq*aZR#-Ji~_1 z_bo*LelH#d_vj5jS{9A%%N3u-CY$?4eB(FUaF?VVQ6KQb?M?sA31`2Qea6ecFAIuJ zAXf+pKav>D{?2aFY5+YX#aX{xsy&HBIo@>yLzOyABm!J+ge9YPYOQ_ZNfYP(ZmPLA zOro!LLflEqqL^6U$_&17y?Avt>3=4gA8(2~L8EQ+yJppSpb3U)B6QX_2hSIygx$-# zqTz??Qag(0kji-xa7b~ITB+d3;oR{^T!vzv%pse>vG_3+YD0dku96=7cbpZ-$k^8s z*x{N9%ieI$k8h{KPdQI1U87}@-J}sjXk~UT%JHTX;-lLsZK&|#{RX&>h_t75RhQ3m z#!|yH182A1Gj!oBUQ^*8;%ZFSAi?`GwaN204b!L(sO#<2I4)_QO>MU0W0B^5A$CUb zdzm*3bZXqTY=2BZL`8x=xM#!yRxhei8#a@$kY$}W`X~QMR5&#i(T2y&YzBT4ESh{>%;03C`Ht1)XcDc-{(;ZQ|{;1I~EC49z zc3-HOo>ET{3i|$h>x!$@?kK;pHt;i`$k)C82HwvVQ(Di=Q0V2TQu60H{ejGIj*-fc zM1*WO`-OTt>2UhW60Lt3rFIs&bLf;($jub144*Q;oUQnaU zYVo$;x7y=Y(1F`1h0VP@PTpvxej*c!tz=&m#?a87u#8z1ouc44Rd-$NDZZsL6rbJX zp=dlV&XxeW;y3XYW3R>tL6Uygrka9qcXQ@z8hT#ih0JHJ$$C4i@V>HE+){AkJ zn{>}nkN$@2sl30mv$agvf`_+=eLK(%EHzyfB(go?*7=g+a`pymE*Ary93p%nwyWyq zjkr&Irk?B^I8F?GKX`y=3BPv}D>6>AM(mr=Hzu}(Q%($_1*5EK^UUuK6<*+>k1`tJ zx@rF(KjQ#f;k*2Q0sKx)wjvCt_lo>4%B&IGU28L66_0amXOVj(`X899fy)jWe_>KP z^0sETzJQgTuxp7Nu~utBpYjXE1IJ$|paWMheSFl5`($y2V~LS21<2c|%pFaBX&S|- z5KMe-l7#!FndG(Q$wW(KR;(zK%$NW~1l`4a=OdEQw)S@BueMd3#T4yVd&F0z{kMa- z=K=oj3MtI4HL{6Y#88Xp?FAx5S6tQ7~j%4WjcbM~esPC;(Z4t_*5#mK~NkZfaOJCLsao8?Hl zcSLf#8jT{B%U$@9vXB#oA%n1hkD9)idxlWJ?0(NA@PyD>y&E=~sUYAZ&c=vmP0b3) z;#k=sKI+$7>v{uTZirT|uX^jbX(IJr+Dp;(}coj_&Z1N@WkAC@3}4 z2w6Q`lN^0^I|rt1qi03`66|XGm z^nrHZc$%K|?liER@5Rmm!swF9uD#ZhjZiKdcad)fi1@u$?WFn5}Ps{oU{)t0O ztRP%287CNi!nZGlJl#;WUAfcspGMh=9LCOXp8CW^#7a?O{L!7xUu4xwvGoO_n5zJ;0*wvOp@ z4|>?4-?GES+GKbyCwlpH8$MV!kOYn@0dK-aXA+mIu>(m+Srp4g3%+{Hi^MOMq^k&_Him7;=l-F1&zt_Q9h5K0b9OXLj$sn_!y7>2Iw9wuSe04& z2W_&dlu;eWkmEiZ>|U4#CO2w8ehYkSAUUk4yxqDSaW9lhuKe+w}p#tcT`e_hau_CiZaTTRV6PQwLW8L)0mh-owE*jQHlw_hZok ze}HA*h4BV9BFLAMIJUuxTj^iH9{&N#-@52Vl8+@;48E6xqwif_Qe!t zVRq9vW*s2|;tj zUium$jN%)0rg?vreJg1v`0>G#l=!wUQgGa&EJ{%t4J&uf92V^ACsSUFM69Dlwhw0c z1r-$Mw-CB1Z=Udo)z5oCgf5Sa|Nnv71jU04>BK}?TK1s~#)GnlJ9nc~%qe&#(KPL^ z@;qXuWF&VQDfl{_f0kU%_XZDSLZwD_@D8@DoHJYYm4@pe2>%To%pk1b{33-)?LYmx zDp+Sc@}VwNVuywIxKpesM+=f9)Cg378$}5DFM7;26GU2^K3X{*rnZxPq90@OOHvK| z(LY+i2j($4sEjY`_J}^jBd{yZ_MR2J#h7HvhP=v^GCH9(&3xBrZXeSVGcAW`Of)v8 z0)7!W-75DdBj+DF)Cd(-GJbA`pkM9QuM%c$d)eWJ1`mCR|E%U$y4b&q_%}^UylQaAT0l!`Anbsj`1lsUlN7aDguIkTS6+KuU1AkXGmnj& z`jZg}zs+~)F`NV*S0~$F)98FXP$N!UzFz$xHs;n{U%ErmR!P2gtbAFm*KMV2U)xoo z^Cd_#U<-+i9w5s&PcuP7#=Q269}l~{^O1Ms-kiElpG$tjo;g>*0H4)fj=PCVYNu<& z&y^}#+0r%{V&dbzWg5ZUC!gStWEy&|e=Xrpa@84K(Ts{N-{=A5^ETH$2J9qXa%6cT z_(+tIc_OQ;?MEONvHO!0Z@}+1u1s$D?z#2DfQ-R{o8xL*5tDzEjo|c8!I4<@KfS#J zx0d!AMd4tkVcHa{qS^jzSisHvjfRKE?n|k~b)UMki12^VrfS9pOno36UA}c=2x$+l%y=nxp`7do>A_ zNen4JQ9DyyKVip*byPp;EE&HP-WjZk{sBLd5(&*2WxbNs6Z2Ebj=r3(^apdbQV54m zE?-0ria!|>ReblcQ)LWrS&MIPMI+es6+lf%Kkw0zc=$3QYXoDsgUxNR=@TBXZz`u5 zOS~Tle$+c{5J|)2Kv+n$iI6=uF~oY=@!4qbKmd=tG$%=>iI{5HU?hXn zmIN1n6_tIT;RYU~-ADHZ7k>1k@2?}Mes`jG%36JuT!3y+@J}O>MMVekD*XJRMj+|MgGpY!|O=bY=h zzKWtZj3se^N0&oH1wWy`uk60){`~gDcZ1;e5Z*_oLhal3kgiy!Y@pIo06A2W;>jZ0VX|Zao<; z#$P8;YWO=o>Zpyf8RQt}>WL5A3T0U0a1VxxIJz3?E0&Mv_q@k1X9Aj75a|U=G3ruf z|AJ16_&y!cvsCdNrB5e+zph48_&*Ty#Gp9a1VOZ{%rwo*GzVhHpEf>q4rler6yn>X zy#b|!$%JPM(?u;L_d1hoe^S)|(^b?@72S5*aj#MT5N<~J|NNS{XCA{YXoECr>)QvP zXcY%xorG?Wha{@~1#uRFq@r^;F|wATo`Kk+U2$5%d{o3p3!i#mk&oN#+qdE!ltG|o zcL!5pUNqEgRBc9i_+#mYhhM^}qtG3hSyKwkA41Rz$Lk-Uq$n0h5#=FBO6QYp@ph$? z-#43bvsD&kcBsgm3@>})q~(OJQp!Z8t@XpS1I=@oxcr-150WkIzQ+wbt_h(B0^IkS zaWE4BTO?HbARCQ~k@M>&f(IKRHY)*Zy*Z|Ee2TE!?}>s&CXR#pFU zr1fYeILicNsJENSN`A_8MIMgj;6rnynXMm9>k1Ujbr)&!oMUfI1KwcR*FR=% z9Iv*rtsl3%%-4mHq1|FoG^t5UE;+-2hw9V++_;qOKGhMiP!NA%KREM<%!Rm`_ga83 z3XM`>vqUB%Z~GooUgix`oYPCPo#Fg z-l8kSj?3{3C4g~W6!yC(KQCX^Rv*yXBIPMiHu!jT>dhIpbm8TL272L+JLt3HMnj@3 zw*|dTQp~VoOEw1r4-$NM!46sS>_!_gCTzCA9`d3qP5`IqUIh+7R))zFWBbhI|8=J| zvvA?2oz!FNv0J654Vqooc5D&+j!_^PVl=qbVHM%FDO#VbHM*|e$O%X2hTneEn3dT| zY2!?HL|Hy@wUoGE?DHPq0z;QqnW;){sWxE(kOjkzQg!1^A_l%Kuq5TlE=vvIxwo-V znB9!90KS5o-tw9Z!N5=@t~-WAmiq=hYGt$)GOobE=SRmV|6NvIYFpyX)F6PP4F5IHr$0Nt_0fu- z1NXJGLo7MxXFZ|Fzdvr{>qTZmoT4TM0agdY?e$j#pVS8SkBwt>lx8`qNN{Z-RI@qK zoP%P)Pkn0eZ+!eL8%O#=uHIZ(CI2?6moM8@#Z3yA@(!2k$UrS+wEl$_J5d=>fo`DF zzPksq*azUlvm(@Bwb2v={;8XsdWQ#=w0bG>I#+MDe-;C;TV(5^Ya@lrJgOMb#4!Ud zpQ6H3*9&*uA$Smo$g&~w=Gs};$SNL|-%S2Ies|c71g5x%*a?NEdxxf)K3^6zBFbT~ zeE4b51~EzDOXV7~4t`;pDP$^vM=^QC->3C12*67!<{IPdQ@Ij(h$%4E^n!-AU|()% z+-eHUbZDQ^g}3k1#h8ka7U7NQSB=a|RA=Mzq;8-K-U^%Z9J(e1NTEX8C|QIP$2|PP z^|~hU5rMbi=Ok1PvW6(;V~5ol_#Lkmeba^!X==y~b8SUJb%1if2Xd$I5ph<$tY8=H&IVdC#8E=`?X*J@H&YW zt57{wr|SYp=^cOeBLR*X?R4i%CUa#59R7Z7JAP7)gY}45>s81IdUm;$yE?`DU3zktA`n$dk|VK>Zug5RuA@s8W_->t1%Ja~ZJkYe zN1_-R{OeRxmL>d6!rWI+ zhM~?EvXO$9Ev<&%qAn92*P-9}Ekj?~aU)<1ALG6~sQ~^mmrfZU1XH<58J#(3SsJ94 z32 z0SL53TRB;-Fu|+9Zp>^bjYB5UxoQfs>D=iTBP(H56OC5tCM`X&0VnX?Vk<$>6K0Ex z(b)3?d&AN1%(N!QBhm_fI`Q8n4jt&2xBdCrBH9r_FT;rk| zYU}>Oy|;XeujlFfMMh$r1T**g$M^cTm@TSxAa6+kx9i9LM{ix8{Cut5vKTKNl^RbNuKd-w@j*<+XTu@)7%|(u2!UTVtc4!mvfDmGeXh4!f zwDCjlsN4tM{UA1ZX%~7F9T_@Gk4AEz1?*l!>MvZr!1!-Vd;;$tSuY^)-lHDI^dgQ+ zr{geJ!9-o^{V_GQ8CqNg`-5huQhARxEZDXaKQm*K)+N zx8_nKiCqYPeQ5)w9EH?jURlGTFVmFI8T`TwTqKxDX;0_6Rc|!)|F*es!acO07qvbU z^Dra6ewkRU*M@H{T!)^W^N4h!J!Jr>wayFvKsL2`k68;4O=v2PSzA=$?-2aGNrN(gv4%OG zPNS1g4Ps~LRRnL7^?f-4?cc>ml;Og=9(g5qM4;Ad;rOSmrkyqBNylLYF_FEErNQpV zWsN@Mxg0+{FAy36dtECU;_1Fp*`E=W{%Wje7O{%hws+v4q-;sl4W6_2FMzthEk*fs zV`X>&MJ_GNqq~%KjE$P*XDoM53cUiPD%!)hr6#nK)x1NL_I*CUEkW^C3hi7%q}7$) zyI@;vUBL0n1ec*7@vtMH7Bv9=%G6K!!yo@i6K|&{rM#>&?@2V`x?AdTwL|M(?++z2 z@koko+pPFDu|{-$49jhWEx@chkSnGjD~bMlmy@Bu2EM=)Rmp`4`d@WRi?J{;{Ud zv?LV_Vh1m#+m>b+@Fx(_YFP{!_dibxX%(AiZ^qNn%nU9uhp1Ah-EQ?hr7sZycnV5} zC(nEUV&`k;kitD-2bbB?>}N4GbdGM_M;4pt$=C?e%L4HJ)fJuw)Dp(O!rc2Jq@nV* zQ{qCX-Y4E!R!XiZ>ifS-znGt3A+{j4j9)C23ZtS)?*rXTDt#$)$?geKB5!TZoRxJiv3xoNW*POJ-kcf6!|3UzIybYR@I+_Cu=`LLW(@>!W= z19MXU_~P%9L{+;LAmhnpjM336B$7bYb5XECC2YTlu; zxbDLOJ|bt8;6`3NRM;nMa;&*;pV3_p^W~!93BUT0?u`NWrB?_N^Fg;( z=FEvAl&}IGr59(Wvd}PsIq(-<_~2`<)SN5qY2_Wi`+;|Y1ZEfu?Yu`Q!|`0>kzlk-J}9-bKkYYH)(;vecBe5~(1?pa`Z^LNL-lgGC*T{<&4#3SnTi1a zK|F$ZkMmLh=_^9O{<{$^Y?CIh+bADfM&D#`D*ODCZ+R_W z9q1bZJugj-g75Cmnjw!cFTE!P=T(kXwot35fP~AazG##%X9&ew_wQ5p4;-S9X+v5y znvp_5CylKA;4uk@2?{>pgNho2+Nyd3=~;pXSTWHGwxr!+8fL_rQQ=AxsF&- zNEJemZSsJ&s|ok7N3*F;*Oh&Zt0fywdJHz4$B?3AF0COY*lExUC(?eKi1QrE_@~s@ z>VmEpo^18DNygz_l~|Un#D~ALU|lFBw&O{ehMuXCwn=LF1+ee15GIYX7h-ICweKVm z@#(NW=JZ1OYn~{s&m408e`MpX&#`!Im#W?1@V+WnQD4xrTE4PA4Sc3^Us)F({mdp< zby)j@itJXN&?%AE&AlBU4ao?n|H4YWENFu|vmgLRf(NgUVQAu-KNp^W4GrGluc4U5 zV0(vFby?j{`-=-_^<%y6QiiD8E1VM5uhjy7NK5P{K2OKJz!>g`ku1-NIDlKBJ^qlN zr|G>%F(Om}L4U0t7JLgM)cDPYp0h%DQ|YR;ack$rs|bJW{Kr9$ z9d${|rv0bmjl)cIgdloO#c+{i3e^<=sUez|uUz={k{IO>qi1T`KrNadw?R!P)JQ+Q zdFB5QOs@P4w=Ayg4&aYUdBNfsza&t=)A)ts#>MA zmnlyTcNX5Z)Kso}cJOj=TVirip_o}#OvzSk7#{;Y9x{908+F)~0gAVHDfT(|Uzo5R zokEZ$G$g|jPo=@nKyo(q{KD(#2;Ig~_A@3?9GZR?RSG526s#m)17B|&OWP~V>!;ae;rBw(DKdrYX={xuU*#V>v^t={&p--UMq}eSIEznx zRrQt*q`RPyA0(zqjv-wNU-`Hb_ITW(33?{k-Ip8pjF9?Vcp3m^8~L+CWc2lpV}FbA zs%50`*eOSu=f+*LoXT#6dhi)u-x#|Sat(4RF!K@x)3c&mMPcv~Oac>Nrd|X;SAEew zKr_jHOvn5tg4SDhr#!N+12lN+di$mr56V}eYbfgz^vH$hU{9G!2vo6E%Nsp`mmS56 znjRIwogi3dY|VVLDmqq*IpDtUJPli# z?hloKhXx`CqS^$SCZmf9G_?g5Oj-KUQCPpY*U8}C)aHBR{PV;Sf*VSHt47{gpE_K| zM#l#r+De-$G%LJcdoycz^Za1^3V3BF90%(|?>|4G0ObpWNxn6sh*`ejWx=*xA2 zW{GN#sT1(oObi#DTa}tpKB*fb1_{Fl2}yL*%8Q@HfCol_6bA`!ndp>yf&K>Ba_;U& z!p-OD5NdILI3$}*&cFl(RQ3_`FbD4v@*e&Y;80xRpZL| zk+hZ_?=HP9Y5ETMMfDS6#9ILqic#$2()g9M`bibhRJoQ_11}~ohea{?A*xCJ?+JYB z$+#*0Og8w#EQJ-RaVfr7gC~I;X6N}ditn5?smg9O*Lai(yoZr7cEB%<;~)%lYdMa! zE{Y_mdxvK2_6Q|iR(jm99YGCE@Rt8&Eu1x^1Yd}zFP8aS(b4BS!c(NfuKdk^u9%U! zw+0(2##q#k;wb$q?K)1v3ke}gK_?gCU3rrgK!u8 zWG@m&16z=UE3a_IGg;PSle68gjoA_@^HxjIuW1A`olqsaGey21-25t0-Uoal z_qg+yCUizn5~60`vwPOgolzY*&RB}ch&$ab15bfXu#E~%hd}B4O=e%Ln~t+yVX2?e zJjk0?n7=kIYNBK-hRAY_J%y_4>?&fUzfRc;>@S7gF}=tAQm6d5{R;-ClI8s5LV^n^ z)DAWy{lHHb{EpxHsC(8$aT|&tWSJxGzoHmpw#trXwbQIqTigf!t^C1D)=X0y0FMvr zg1|Zq`x9s_c@aO`X}xuI(cR?i(AR#l{Xx0t8u6(bRq&lPW)}RFnXI39^T*r3w`y4I zbL3#&NLL3fp*|-}NnsI?m^N}_QCllbIWlBMSE#k?Xn=tR1g+oIB>eQvVYn*kmAyS+ zeHceS9)zH+PPIYM{{;b_a#Mj#G+ddfV*#tZYm*;x&Uf(5^jn0(#+(RWq293rJKDPN zg4q`v<-dWC3Ax8hX_~Y*%M6Ql6n@3iXDT&NIJl$|Q)ObJQIQ1E$+8Pv%h zHfp)|X52NiZyxJjn9Pvw*f-8@{mAIPuLCG6LW3k~=G{Kzg%I#c$Cx{0fFYbpP}788 zfHi_^^-}aA(Uu}jBs>$w&-(?5{ppqy@JQP{?9J_uC{nkYCf4SQ;ZxRukK8i_+*pZS z&jA(5Bx?j5WdXDi4vB}A0bb);s%L;7kjin@H`KpnP^|RTU!!#jLrDtOA>X z1^iuIhNRGR12O3l08rkpM!9~(k9ue)Ji)W^9vi4v46|l2&c*MmnYamw4<<6 ztE3*kuRDZNg71&(-hL%)vIdp&T9cjW2K%iYS(c*pJ^rYZ8Z+_^tojZwit0h+)r3=c z8WpN*9{G$0AO!$ge`y4xtIb){nK^gAea{(rzgsCUAS=J`ZPWu_(3fWvd?Jn;+7O*w z$>ePKrNgUz9fPoy&)n#uZASardM zM2INYt(}~D){n~YzvIfF1h1<94i#f=J(8nSj@-%#mstFvMAqY^f8zA)@kfEbuZ1NH zA5yVBxkjWG(My>p20gzapxrjGe2k#n+J1KgbeTJ=r}sRDDHX~OS#w4LlDhKXFA(o^ z!5#J7f^2p_lg3F1&ztZJmni*lr##AymyYfya#?%oh^+c)nMCovgYKA2I{?pyxXE(X z))jN+e*Lc$R)R{?2+P*Fzz*VsAg!Q1cw3Gr)!~pMyC)oXWJ?4S49PP7z?DR+VGT1< z)gOP#Q~dX%!dGZX)4AK1R0y;&Bah1grCNshoSf0HuVWlUfRN=P=6f4MmgD+zNh5^* zeK~lRV!TxOzzBmxcJk3fK!!-OEG^?H`p!^;T1IFa&6tXP9!j?>N2il1V&5617rLCy z5wNF_^N3?{yCr5L2uPNcL8o_x9k~HUCl_*1q{)Il8Pnt4l3Q519%bRjSHI=}z(srq+Y~Cblhm(Im)Ng}QUn(p;x<(6o;>&suy_E=1bphe(|}F$ymJ&VV#yXx4z}Mo2VEv>ixdYk7F&vs+S(BX4;d zctjc)!2{@D!h2l3u@1fYz&;P7jWJoeY%ENFaqr__8WPcpnAMMqIAQu!`o4+aGebfd z<-OtpjMyFMx}_GwVCfD#`ZMZ9s~d#xIQ8ZTjUceV?l)+0}i>%gWrjZSVFy2MD!`mH5bBcphVx0(Jh*>_ z#!THZ<4I6N5EC~n@1N1_S)05E%U3l~h*N#L7b^-@Wx%vYbl#G|X0u~P>^*Csw4!h^ zR%;+au;8M?uY{K#JQKBJ^RY@xsdq6gw9%ZQNh%d1*@;r25c7wxoc4W*|7?QDv^Z45 z>X0v2%}po|YHcR4j>i5Zu)^lYoBsK`kNh(1NoB%HrTU6q@=F3HgDH4gRpOd6A)}Jd zO^CX7bf?&{)w--Lx>!e<6@JX%K7H2{&i9%m?v)W*F*@scfG~}s3GfK3F~wMgue*s$ zTc#QspGa(0_DK77bH*F$^GGMu1-`r;SE|sV^7#`$R?kZLY3mtY5&m*|e&w{+w)tiQe{;B%%LPil&7SVb%9jI(~~sa0`Na|*hKksyE!SvYeM&41IpC)y>g0f zHO5;T>+;<<+!m}v{!Ki&dwnH`pIKq~BY#bRqF`uc#ApS3D(&6;eG2{%nS>9v-5-eF z!>0r_E6BkMGz#+>w-w>f_S0GR>j`nazT4Bc9Z{W4HPmp&Hc8Ixb8@iz^lptj16%5! zO=l}G5`ZPFy*0ugI5*peXq$H|T$V;7+Z$8*zU=!=DsmDWG~jV3b}9ln4b5y)e3rFC z&%K_YDl)#G5$F2ok>l&x!fbw`hD747urs;vS1j!7$2UK0`hesu@MeJGfjf0^v=o1icjQ$kMT zB#hlOR=3vn^oZBs`{rgpA;$B@2qf74%6AGRDebnV$`h+etqe|vnYX$PW8N$tnkmwg z0#Ew2RgmTzcU`!x^yt#oK@ol`Ax+(S>#F6&b4!uiPRSzRX5Kr&D!HF{&c^)eZG!<& z#snIn(?2Yb7Zpv)-&N5=k!fqQzs1`r%(Xfhmi_==>}4>WHB`)pUR`r02Q9Wu>k!S6 zWJ=(bKxOC65Sn=Y!aRkZn4X%gMHoCxpmd5AH*UR35s$qBGxPCOdP7_`H z*dCMZ;rtsTLK@QH`K@7;%W1+k>O`)TInbbW91p{Ds?&$b9%yePP>dV>qZml9J-Hb< zVSy2p$0b~O3(s}*H|b<)3jCER=UX=!+ZIoQz#BE3AgosZYX!4Kisc)5lOL|h$fwIh z2mv>}q+4*HpVHFdLKvUAxR(Agydj%>-s%pmVrJ4U=Bz1gu0|Bwuo6*} z%>q=Bz12+hsuoPblx5gY8vUW6O`SH>mHng~>^K}P4nEqlRc79${1y+6y3%V!N;yDb z3^~M0w#!`ab)hV0l-XZv6!r9ka0{{`L#5)0^Tg=|&@a%)evv2bNe)qh5NJbb069R$ zzcg>mg?E{P03KDgjR7R^y2YUVh@kT}CWk92nqmi${Z7+2FP}nJT&#%k3mBpaQ;NS$ zo3O8_B;+{=p$W4nf^5LJTXp;$W@{X?qktzjEgos38;g#dWfz(KrT zjTL+vVtPYPY^aijOPLA!uRkL%1)-u6g5!{8>Q5hP$@F0tKbte-&CTVs0Tp@ymi28a zA-A2ZoCstH5>tNBLr=~H^=0ZbU9-I4b&Dyf?r;al4+Y(^LPfGJmtOq&n}hkUfL7m45?YxhbX}tp-i|?DwmuUR?zSx|WEV8Twx8@Ct7A z$H0e`golvRJXG5A**oezi5fUE|=uad=fNxXN&z(8%q}F-~zYsjJFwv~E zEmD1NLFYE?c}Sai(j#-1B)tC}y2jTI?4xvLcE|vff46l~k#7x`a&Wc8>>}E~bR1a! z5DXX(ZLu;Xd;dRbF2y8<^-c1nL36I(K6YNnd#fQ=U zu_S<6m!E>gGq{xKGF5KVWC0%HtNQt^gb-`NJ6!NO@ikm_nSRB|cs%=$GTnId@_;Gw zHrAnM`6~yxgf`C-slif?I*@)od~e2qC=?ui_Y@u{qaW7aF)x`ysfXZB1&40}zJ+Ok z=2HHE&|I8p*Qr;Zwd5RS*?RwnMsRgR1=d)T1gcU|x%F>Z#SoK=YUt##KYty-!TLLx zl&8slgo&bC@!(qiq4{jJ?D(UMXJ}%B;u(04@({GKAKzzvw>N6|ZC&sePHN;y+4X(_;a9QR8 zJF!n0!P6tl;oJ&d#{F9OJBO6F%Vk>1@6?Us>sDz_lg4ER2tW{MEp)s>`CZiGrelR6)k1HaFLn}BWQwv4>4n=9MUVq};Upj4Z{$+> z#t_ZFo5FE-NdJm9Ghubpe@E>HbVm;gA=!ni%Ix(bp;A zhP+yqyrx30fAY?#WEJ+w-Y$W@;GK-B35R4t_iP-B#1P7iZJHvm?zkx}3z$}7)VwIvG4!B zE?HuS*ASP>pi{Wf(}-72W*cV;$NFPalQ9Ci5&{^^qCXU?fKJaKA??mB|~|^T9X6D)Xm9>l$rDiCKj) z`*Dm$Dp>%BJoG}3UHo{FArOwKiCl*1Y@4?OJ5>D3W%spgjHTc;ZFhV!ahEf zsU@G3k@&?cN?P-isY({K>We?TLVu4LBwj>xXiJ1jYeQdRLAsZa}e|!(AZHTKCZSdzc<&Y@Bo&LGWZFdUj2l z^XL9VrNQ(S^;%KuSH}l9mk(B2JKsyh^ZnA;Ge}5OPP16{T^0#Nex*YN$l?UduFO6C^@Fa z49S$NTWuZh$?B_zca?F;j>1jBe^RAD7I#|mZ@l%v_aC|i#2-?IrcmZ<0>>s>@UEgp zT3W$#1Y)w*JTnr$uf^x9^nnK8sg%Jz^7<3Qpi) zJOJ^|2nu;D2My6n42v4gFi?2?=}^=pEDqg;+3J9`Z-xjrw74Cm)3E zM{Gap7>2wH)7-ckZxh~HsRnA-twL$2W!cYKr=AcFWwpaw!#kM^cab9%v%oXdnkk zkd+km-t5@Q_u_B;%p+nO)BumDCed)y*YF7%tlH=#Ap`47j|l-(!ViMqlfPJUZ(gFgHPv#8uWB| z{!o6!us+}&LW3=($jRW@j%yuMk`G z`>wKGoB}?$BmC1zS*w0*FNwxYd*FnT zL$yACBAbvSy+?w;y#8z?qxOb$gEcMyqpO+t6-}+<@`&OOkQ>2u95|3^LN1YNza3zL z^IO$QJc&q%8TOq=`=?PMc&^WrKh3YBuQ$zYX593FOIH~hb|tg#TqxaPMTc20{c*jB zEUo)|dAp{%EZlo`Z?r%>Q!eS%_$SA0$sTxqcY>eG9xI0)^cKFq15-vl0zZS_u~h3Q zYVpiUKzNq4{lw?7w8`o}q8^gTJ=cXD2>~*@|00i^5qNB@pUGI{Ws`)B0B8(-lFTBr zvTM__E|9>T{Ba6DqRI?n%N|uW3RM1375cbx za)WY;qqO@AGeS{-ZTnuZglT!L8R-xWF8?EcwEh?1z~`i$TMz$2mKajq?T?j+?lH7V zw1wnO_ha$k9U*vsu1I31D{)Ji{l$1BbY7aBokGE=Lnsf0mkovm;;+OuYB_54)v9R; zgR2_Ddk3?0F93t!yjXUjjM+v_CqK!f0*Ua>pDa_7$uHtDw?PK;;9X$TOp4fYhmh6k zqj|l2jPY-;U2eM58b*mkCGYw?<)Jy+R|2vK8PiVFAzXAAXJ}*r$ijDxp5M!L96KEr zNp&rw|DxV$i?^)QI`wx#~{Z3*pQFqOs7lzjUVd^F2FH6`6L30qu{mIYTU=5`~q3L>oM)wL$1w_4& zKL%UVZQ0r_nUBC9A+C$ zLdI4oXo3%VY{o-AS2IQ=;ALw6rJ9^Ryg0o5Qlu z_4~1YKVa7yj2@#^eSN;ckSq^7Ir{qa=W!>$@?+!;eHyw^Hh8GMM?5Ma1g_r_-rrwU z?*lwaRT-8`%Sh8wbZ)?Uyikzrwo@F8>S92eRytI?BIXL9#R{F98jQ>!N>GYH%#%or zRU^%<#GPbG_Naq@)(_rrw?d+#m@YrTaK?ozoU9OQ1lHxt0n0;0I3VN_+^~JY;jEui z&;ZBM>({ZUvcxtR2ZSA6+>}nLpng{s&Tuvnl_?_B0#OVf%YZVMankC*7nLlymgeje9dYujR*$?2VV99Iini>dNg9`>Eoh zU=%xifUFjQLA{uI>c`y>?b*Y=)Sr0pC`=fw{VMiOv~M%nR^vTOPO@*_tzBNYC5dSV zd|%TLLNI$W6jc*~8n)8;$AH6=a*la zBH*cWQ3J{?ZBQ_GXN{d*Px^@Vy~YvBe590(oKo>FB<&%q5S{-p>;@@)ry4JEEjGh} zBK62?DUxE-hz}WLk@_B%>Yfr3B%*>fv!!n3;UwT4yIXLB)|LbEVuKqWn5g#b$Qb2N0(qBV$~`?ZVA_#}oEP1{+%z1!)giSW1WWD1VTykaEeCaYi@mL{1v=hzFH4n3pVhXGh4fd zFYxtIa^N7>zz+pwrdFap=1>B%n_hN#89W%0>CYFk=PpDt_)7I{W=EI2=A*`H4!p95 zpKb0A48;OkkD|NwJ?PE-}RX5GmY17q=vP8&BR`aPf~cKA1n>;D;F%LR{jC_Dv+q%%n88B_ z$jsB2<7HVyMYYFTB5Wim4&S0o-$IpQMp3vDT@iAp{woBaf0o5YM_#9x4ZLFQ&F3~T zo=WG6ppa%Ma;VaRH=AjJE1F2yN)Rvj+e(nH?cHm>e?w>aKaEfC z-mA7w{`tFUNcjvfwBl1J!bn}kfO(m_%r&UktEV(I4~tRyW+aBC76HCL4u{6xwj_zj zn%jY0l(xLdCfPk>SmFM)rP|ugr23w|l0!=t<1rmpyazNJz-*3 z)IhuCaj2Z2r4pxn*ij!(MoLEfKXOyoUWhlB8o~`Nx|&HoGCuIWLkNWd^DFB~Zys_1 z#r<*(xu#46+9p4P!l~jhJllat3_)Ax{5;|}Da4^?0_NtFysjWoc5ak6k_1t?LGUnc z$Hi(M@Hw@X@#k6xuLNZPre%$F0V2xME-eyzRLextX(Y0Ec4&o4c}6$n)9e@~&?lwD zdnU(!B0hNa{j0*Or<0Ds$F<5s_3I4Dph6e$w5m3PSUSYbfqw;hbA9>Ya1SO`k}GrH zG-gY7DdiLgfRw6FaKX?g@(jeflxk>{SQ0?m>@w8UC&FjM1I@wx;C9wg^XhqV%*y8T zztRX>_TUxQr92ovhv@NBI^}s&?L8y=Nv8WDh%i@(7%*?_i&J!hHgE1zRX0UG+umxU z)gRlO170r+HKb4%6pj>1HKTXEw^cRfh;=@wpqWRFrFnnl; z6daTe6f1&S&9f#H*lx1kO@&*JK7fA~E4^Ogj;MsRwk!OL)vj!wq8mD&>(5x4(GV|a zm{u(msFVKImj9N<{SgcE-uOoG1fbw>;H5e{ijmB4B1rhhd&2&vc8QgRT?Jg%^6`1V zYhjXm<=LO3q837O`8l?!l;_XdVNI4U&Su3IlbB@#V|}Q;&e*lPI<$4?vay5@c=`ba zQ8a5;7r(5JK1Iet7a?t&$DAViq*R{6qurTE&A&n++vg4tYK; zd}vl59c==LrCVSP4}&KqC8Z+lS{462@GP;X?hpz4rWrRCAb@XSz5_kDQMft~mE^I= zf&ne8lwn2YicJ-EsPI5*$~uCW!xdW%1quW7u~oBRQnBbXaI?fS9H(l{`+N2p`{0iY zJo44;C7k5|-W{a{>1-Hyq^)k({tzNEr*iR0ZTIY#D6Q!0luHWgRi>wtetab@d03-S zo3!f3xxEnWj=4?C;2i_{Yz^3Xta=Pwy zI0MVqA5rtO;K$r7qvx}tvyXlB7QqHQKMMSNZKWEcw1u*wZ^m zciqakLO;$4w0e_unCB!7vC3OBk68euO5D|F^Unf>sm-@*ar5}#K}}NCL64DcbiU|+ zO?7->5LMCWT%20ru1Ln?qrpPfC;mxxssx8BI}w_XyiZIdioi0iTHQ7*{4B~*WFlAZ z)ts6$m4OM$fm&W=^FWk5_)N{F5&&uI_rjH$VDDa%xl=fAI2W?JNDRr1QBY=|^%WP| zA5DjXi58glx&$RMO)GTZZfW4;HxiU9>o)zb)XD_Zj~bBwX)^CRs@@E z!tSnn{%|@mon58}@$k)vrQ!lNI>QpEP6BKX-Gr2`-W%zi_0TKBjbiX+79IR>xPGPB zxq^>%i2NvRXx}P}M)L9A?Um%6SKLvsWslL_@pQVp(J$bxw>7vzUbmJzlyDQ~c_m-L z7ieUb6lwWWAmDB{$G@xidmPMy2MOxZEdu$Jaez;j#O2` z)Ld$a=4oFFeL(A=+;a0R`U9dpFsO~pS$mCP@OU4IEoNmHh3x}p@wVEmG$6RH*%ie>x*1~n5v{8Y-a{TMTC-+r* z^Ww`MDTI>D$hShb?>1w-Zbe7nn>Gf>W;jw<#?Pa%4l{oXOo=*ITNYov@VaZAL#X$b94t zd;L*qx2IjVQ?28+oDTRNJqOW&UjyNYq`XruUWVnmpMQ@zS0Wpv{SY@GeBDt5jPXh4 zJrqwCFN_izkV3A{Z=)b4^El(_<_0aM2P47D2dgFS+cnM{5Hn^MSqUR8br47p(Y?M` zdLiLA2}(1rl7&ih)fa9?!<&$iY44deA^@m*M$cyAV;V&C=I-qO@QTId<(Ain;iQ8e zzNVyn1&^4wb?8e|&TWuKHue_GWBfMP_3hL*wGSqAlGt42=fpNR;I3Jos3R3h$6jl2 zgdY?Jke~Xs5aYb2SurZnKWmQ2j!YzL+|6wb#nE~mr)Pjq5iPV!hLJRllhr&Hp)2M% zG5)S)A2_|SM4lv{3z2=fydNt5&mhq`IOjQxkh6IV;~sFUg8enijVXn2|N1YvE2=jz zKjni;i-yl5)Kw#@9r%%ATD&-30#hzFT!VRD+VXlgqGT2MpLjgxbiY9DZQcTX{epZt zA*_d+3xZ4|=P4E{K-;n!=b3GiN=PpV7FMkd3XSSDzYOujdAtjT%~Jw=Qfx_X2_wzy z$0EWJ>8y2MXNHWS^PSit{U6WCHq&=ul0}`5f>0%~ho+xm;5`*&pE9JDQn#M6qzphx>!M%TwIpB6POqDdsT?@n}{~U0BD&{%3NV~)kp;r9x zTOm2n-u7oo8eqS#NQ`a&S=+XFPOq`5nC|aApd`h3r15hq(;=NL<3ueDZK!QG-BK$v zqoO1#^2-i*{c&{Yztf&h2mZke!u(Up=ei4L3XOCFu59Fu;>Vw(;=%08=J&FabO5=^ z!t1U3V*_9ywT~*I*_5lIdC`53pcOr;ZDhH4V(`bzpxOBbAAFES=9koQZ62{hm*Hq} zuPoNTl_aG5LE347WvXU-T)v#44NtN{yNZ~6GihGh!`+bffR#>50C{Vx zCiPor?g?+-3nHk{P9JzmEG82`fEBJ~BF6b2W!J!#+4r?iwrzW=$*#$+$>!vmG}*T8 znr#2Fo9vp*$=2kidf$(5{-0o7*K_u>_geS5@djbCMiY;8vd*?PWZ~&<`RD?^^+_Xr z8^DX$k$yeiXo7yhOnK9-)A7_K8cgnr*rSpZW;UG`*|ycQkBL(~pghlW+6E9ErL6TR zGH|mGMjQJ7)2v32m7s6EOQrga%FX1q0G!lw|xwfqs;MOKntnEH0z`#Y$zOQz&6GN!gTPT!2{4Llb zF@qAkTUC8qzky}3c)|~y4&jvYl*H}cS>4?n_fs-X7hVX~`qLCF< zEJ1+AQ^A78(`(76;O%7LFJ68~RsOSGmgHAAVVQe`2=E*8_=O0P%9Gi6J4;f1smNV# zcd_Gx@Yq3X{=q8ME$zSEY!2SzJG|+{Cb#SrizKbNfW;*t%C}Xa4dSE1Q-MSE7*l%(CzZO#Tk|4%&<1rx7lu%3pU* zOLv`il45Zdn6d!5z80GNeZ2(YV$pQ*lwn;n1Fz(Y&{cx91wP%$|B#y+In`}~>aTTd z#(MzoGC0x^T30HfyHS-VDX&j1tXOK`(&1dltH2*s3Zp}lSPX?959d8oe-4SS(wPXl4tKqG)T)4L~rQ|z88>v2d*Yrw+rc|bpR|gM8 z7-WgrP>tsu1a;vKKvi%tPa1_vph~6Z9;5!}%q!v9lD?6qP`4`So6mXK>fOo+oSL_# zQv7)PL~9{y2rSMS4;0UoQtOX`(R(gPM=*#;4{h}OR3y-9owER|UKO9=J@<#Y> zLYt}8WBV$C2UEU3e7K@Fr^&5Kru*rP9{@!EQ^K#(_uGec(+OZ?g#0Fqu^QLoIu%&P z?{E^%44!~e{EaHJzZ`3ytP#fWHd)FzgwQ zI^hfh2%NyS&>IyRK}F~r2odsRN)VbGjW&;#Qee7pYuyHKlU16-bhqk!I@SJ--!mzB z`F?%0r8-EQpEt#Ms!@$$ex|S$!9BKji)h7%9pZ@_-XDmJe4Q=7kCZA2zxzg9CXy=DaM`Dpf{caGh6lUahq_$fuZj0xY*aLl@Q#F-n?88QJk z@ZRFhKj;qz|N_Xc{S9Zek zI%5?&+Cug>@N|rUnjT2V=m_+zS;cm2P0&8Qa8la7u5{xK6O0$}Nu_y3X^IgCPeyO}Ww8+C# zTx7WCAU<1SoE}Xn_6@)lay#v++Pw6kTH)`k&B5!AQL4QC7wRmtzJ!#_x&i)FdnK-E z1Q1S`aL!uRm{$}l)gY-YgcG%UOPzCL=h7TuE>ofK8moJHnj};$%yG5?C+o0ncWJF6 z2wzgINYkfy%LmsYExFKsz?~)obDs0y(cAVG3=W8HrZ3q}bw8`}*ydS7o+h9=){5c( zEI6`r0V$YDYC=aArp%tM1=25DcEEnZSpE|-LdMGVw8ELDm}=fu;_EBK!fYOuvs3El zC-7b7m3i|BSt0W5MVN^A!?5b_K_goCF7n{}ih!Vy0FRz5I*Z+(i^p!!yPo{D~2 zY=mf2Q|J%F_~)`{(_0Mx?%+CKBxNffCFggbWEyrPI-^cdy#t_D<@C6Cstm---+m!) zo;IH;D-q?aAG8}#0k>^sJ>Wr9&Ls05j#b1u6>eq-{e2&!caD?89sJSZy=3lHcAu#^ zA)2G4Gmd8Al`LFVn}IiACwV=)W}X)siQAhwkO=h7wSs*eOdF0J4efpJig>YIsjKL(VCC)2_Ete$6fB33|OOuMPt0){lwt-g0GYI_ags69@Zm;a3 zdtQ(Li)!J zL(eMos+Qfz=j%yL%(G?{=M9i-d~TXS|uQ zhfedaq7l+VF>Pm!AXjwsIshn)mh@x)Cz}oXaJVo$D@d%}O)kRJk^1FEWEmPB5C*@- z)lH}HuZ?;8RRG%}aYBUPdm9J2#!B$$hksZ9t8zdsuAGmcjPmlJ?}JU3q)u=IU}nIU zac1tVOi~mJJu~KmeDl5*58T{Z&&RUH!eRiw#!V+kFB#~ZCXK@(U-KDN)LBaB`EelX z8WI+2r3ugUTChF%bg30%T`bDvQu5*x5Ce3aC)^ykD6;T~QM9flW_n}-B@oN?yH2wpnfj3AX0s# zWF?;l0<2P!cBfGXwHFoD9n_)qk?!UG$*DV|_E z*qLJ!0oaszn)sOML-({J2ieWsBH4xke(q=Sfb{-DQA5Dq`H+R}JuPFaglzDNXNND! zU1yg3zizCjPNQUTUm3sx~zK?;oF{H@&Dh z`%zpPWMmN<_i9n40c~NPgas8Ta~dM%PV3fkzF7|;l!3_vPrE=uwuXP;EtFcNB)pMP zoXnriEt#DL@o)QK71g|+VILo}{rLDwWC9vJ|^ z7!@+sAe&7Q{vI-jzZ+WAZCjwiVmy;h#U#eg5WIV2s;gsKKl{-`U)pjzzb*<*%HknH z`;DMnet2AJQRE9D1`)nN|69CoMk~+8(|nyV;BDT%ZBA`rVU(RW$nK#t@lP`c-d+2m zZk@IC?u{5c+c@;Gme9+`Q}9dp*UfWgf9>|re}dcjGqw~-U0MiCQFKJCEz`5#vMWD! zdIrgbHKPH=ycSWMAa_fQ9KHjWKxIk1lyJwy2N%({dL6qoOz=suKMi}bTDDA!=RQ8S z{?o30#XePEeo+zZs+X+$?6^+Zn?su6UucJ{&Sjg&`yxt93!v=ls_;v|^Wcxsh&nJr z{dgWt4q;U=@DGAn!g;xL0soCPefAv}fE2*zLGDLwE1qOJXqqr=iVF!Ky*o z^11U<$;HSE3C~nUra}OVlb)do2Hn@9P{3n?DcH66>)8xx1lP(@b0X$A9C#mr5Gh?+ z`MwR7D2KF$g|tAHE3PSOx1HLxP@frGf*_SaTsUXli9~YVt&u2h>xNA(K&mk7h^$_x zaHcjFf;Qq-RD^{Ix@4!zF3Qo=3L**o2$O{AtD&ZMpU(uS5C=WVy(g&V%Jp`P+pDP< zeETeNz$8z}D^gEeSo9Q^Hs4KUWCK)2{$$1BbmIL`2qKz+yLDrP$J7p|^o_5VVE-bW z3*K3EvpWeWTbhg1 zk>$&L59jv0k${>d56Yf8TWlm(2mZPz10A$Rtqsr%9l~s&nsz*~28qgLmfNYwuoEng zH)ZprnNzpj06twIrPFE@A39B%(p1uDsfr^kJrJJeMJec?r3nAP+RS|30be!Jd-@sQ z?<URIO#9I)Q9{V1oq?$FF$QF;roZ^ufIeXtvH-*a5JPGbh-lSH&yj(FkRZt*f& zc_GbHjc)La37z0g##FwAoq&%aj{K8tp?~yX;t^6;7pfG|>HU>-YA&_J;*)0pvoo36 zm-F&p!J;)Jb{_WxPb|6_+yL?8ACL8^+<^4ahnC}nCiDb|xRWr-`W;&2@% zhF(S_#DTx7qaBDA4&#ToyKnBg)7GIFQqJ6taGZ8mbRPvGLp<78vzpm|Fdx6kQJEkU#56h6y_OI7o@>~=Xn$AN>z-6xFg^4~ z2+_+Y*FM)jzH@g1tWeg}`9l0sA1W}UF-L2v(P$O1xPWTS(uP!xHUy^%VaF0&?Et3% zb2Z`e0Y`?T^K-{O?Z|%E}!hK&robe#BQ(5i8`KM46$zccc2pEL+D3rOQd|RM}?m z=xsHFM*r_%^*_%>MBF=}?WY+G$6Lyb!qoW1P!pJ8yE@qgh_*q-xxXVXiX16oXB2?J zxYymn3ENI>2F2_x>yYC)=1Lw)#kuOAMLU~6X2GW`G-gyLxnqsmZ0~Fl#yWfnZp9z% zp~LxJ@rF^ga?w;Gmt^xhJb<(*=U?)-f}$Q237~}hO|x;bo`TiyE6hQY9R1tOT=2&% zz_7z45rTZ-3cky1L-Fr!SijNqef_+0jD09{=op+8?{LTYj)#q_lYbRV6qg=rmJv!6 z6}ZzC`x*+MExIL(A4~fNg?Seyd~PYi>fNH1x|Lk9QW4Kaz?5B31|l#0sg*TGeH1%Q9fVg!%M9+pqqkL$EPeDSn(3rE*#bw2Mn;qh$Wyz&zEJsDwyQ^8kKxEo zL$|sKHPTD-;5-DVT{{7;2U}iWOZU|8FX@WPWegu5nEU$mAS3m3&GK;nK7s$nfdX~g zHf9ySP#;|mN5>bL2!MD@xPPME=ruZ{j&J4gNtv54{=;jWfpK13{}p{Z0bE7=m9IG% z%TnEjb>{U8#~Min~o;;~4<18mg?X=_3;;W0r7RcX3pHU^6nHBzxKF262Z zN`C6)*cKYKP_9*R=GON8{53Vgb`J@p4vCx8JzQoiPB(-V%O+M^TRlv6FCe^i1*i|1 zGNORTGMhPEST}|yMi%W|Ha=PXnfO{dE09Q~0YMe#2zNwA z&vy}_c>ZWC+XGH!*mn-Ar4*%v6lwQTj&y?fe)R%dCr}hY;?5N4W5T*r*{jwtnQ^Jz z!8`S?^VCJ~cCz3(0zJN`JI?t+^4)yR-yx&2NQ0-tcs{rL>4-5kevOSVqe@sJ8i5lUHP5suNRUQ9 zK5kUqVe&FR%)e@hrN_sb6o~w|gAgt3mU=rVlrQz}B`%khz!GcSKO*hi}Q96C*9cX|^DKK3x; zhexV+R*E?Ny&kzgr*te^X_`$h=L_>fsr$yXVoG|L5}_>aUj+ROg%y7eyQcyG4b2BA zvBKii@fiqu(}uNI9#Tr0tyZx3ck0>b{|sL5)QA@fM50&71}I`C9;xxDnvt;e)C&HG zA7B>oM4~+yW%zEmswOv_bQr@%vHQI03q;!tAQDETxulMWs%`1|v7v9?18IE!xUpW8bx)9=`!+qzVU@BYC z`rQDKexlRq`z5JG&JrCN)5gtytA>elr-EJ|f`8|lbOfIH5^`Z~tC~2`7$Q6w>@;yB z;i*AmtkZK>=EQMRN?{y)Me^KDm8*tPf`4 zDNJTdO?{ahQW5|>^9;*CGFeL;Tz&oZ9-bz*PH{zpGpu}5&0i^@XlU@ukVdSVmCdO< zF)cKcdmmJtsc)Vxs9&j)wD^ozk6y-t#=>wzcU~{;T)QrGOIu`Q0nfQ-1Zs$bEB^DZ z@#@6>+#E;WM`*7ska$@6t6_}5i*RC}nzT)JP=kVO`T1b;@!Z2BQi-`{m-d+84>ou2?ycobV1)!zj*P9&#W2Q#?RyYpXfy7Ic#jb$JCgur`6 zr2M7p(z+l%j@kbzFM~cAb>B@z7m7OL_m8d405Y zo#=fCMcx3!@UKNF^s`UUE#3_9h!ZjA zLr3Z&UBJbZi9G$fNYAqOO|2bL@Q3{-tdpZOzKL$fysgBIEqHpQPpih|8f9@jv_}UD z{*;pUe#z=Q>UJX(N{^alQQqqKRaRhHHs@@{xkvydHl71O`8al5*m|>=Cm+C2xwGWjEsz~nuqfOIhm}Bpeth81TJh3}9Xakr= z!qbOUc`B}84x!}#)IRZ4dLQ%5T4nEnohwM0AZI6yaYgq2=jcjKMzkMTa@MJ2r-Ypu zATCH-ZwUGt8eoU<*;Pm@JU40Coe>B8&^9KKQE3%5hiRka+bulDRIET3Eoo3i3W=cK zmZVT`GlXtqYpp|#3}d)~_Y1k<3K+(ekz6ZD{4d!0#%`$8^s5O|$Qy?rsu#jlam}_8 z_-Mq!w`2uZF%;a>`f8dE+Bb^Ao4KN0f2apQUb zaPSLdm%qJ-?FplGty}RtUGr};{$qu+_>YbI6IwS7cqoFOIlUIv8FKUsnW;@vax|6- zI=Y@yc1qi#yygi`nEA=~fV8XkY@DHDWJ`!PK~cc{c`0dskxyRM^{=|vcO-7djGmQe z-(QcA*c;!~(!ujGQ!WkDkN@*WtMR^hab2lV)72t!v^Hz#r8D_WMqT8SlS?)*lJUJ( zbm@-)oxRDIR^Xc^Ri^mRpBoBjB^cdto|QLU7a^D#KLiDOW|=Vo@UHC9BJrd1=U%Zb z=QV9T3HSO$`{6`e=x6x-?n1u|rlFaWJ4*Fje_6#fW9r#@nhz`h-QslyY;TKifhVHm zQy^T???WEe6XyifD> z%N3ff9@btfGkFBEC-yN7cgXPB_U{hAC5X0a~chKAt5p@YS!)Vuef2;H$Uuw!X+SF z<>NXDmKlzk zYtv?4{*V?j*uv$$%fNifPCG^e@1Oe=8oPr7csB@Z%8jwYDJQ-`@w;aDJtlKg}V! z&n^SOWrj?Q=BZYVT>F7?>c^IfY1S&druMs6#A&)Om$~4Zz2n<Od)#0Lw6a9AFGpS*$kC)EabmayNVCAcT9x>DOXNL95S`KHrS14*GdXUht;F2PSsqbx6)fAgwu%>5rA|lihKBA`Zm%pET3IwbhxqFJ4(5+o~0|5erMgFBYepw8r zasJaX8x33^zh+D`>P@}vI^t@;I~f&6qqB{(>grA?cF&#^HlNfiPX0r}Tb0456O6qf z4N^&d+2U40JV(FzNL=)VDq#is>dNZsSEVD~^%PJA=!+oy(^F05GyY-PLXHt5&4O3| zlzw9Iv8IMPcKK-!zi$KUN3f|$kUS3e;W<5_HlpSxAS}D!{4FM}uY8Q6x4<(B8Cb9W zG58`;roZpz4FCBChVfMVCzAR=IOZ4mfigk`@Q`9j7I}%ITlOqQsVv@z7nZ!23n4j+ z2s|c-`rN>Fxt1@`m^6y+mhHxST*Ie|q$+*_UHvbObMo4LkJu6J$fYX-+s2 zy5AHZg$m7z&Y$hu32>QyH#ir%fG*yhL^u?Xhp6rOtn_6WST?S66-;{Ya~lsUY}AJV zwwbHP_i0PwxN16o`b znO`q%WT*rAFnHalIW>#>Sx9VO)OVaq+Cj zCGu}TBAbP`=%4^)K$^cauh-@h}oio7rT0}_6V`RGXLI~e}&M)Xlttb$va977y$8tXD2)^~nL@lh( z*=(E-%_3Ichicn6$Ng$#f>$@1>Q(n+HrglbiSl|K%KMvQ8um;=Y4$()|9>J+g5*xU zr3dkihl5IpdCry2(U=)?IJ=`+97eIhBLWSO=&p3uD9O|(Hx!ji^<+595MMq7)Q_Vq z#p@DBe87r+UjV@RT+>F)x&2OQRwCs6jpy0REBC)P+SEi(`6ewAHD*-A{3M zQ-{!JPcj#tntr`Jo1acdEdD#UhJIZPOBT3H@o)}oj_Bat_m+ETLc?edA#!*OAyoX6 z@m)Hiq#xJf*ak24&LCp;(fr`Onf+wo}G?2h1}R=2m?+ z*KZX;vKK9(FhJSmY=_onB(x140#k9@fl#8PMobdE*L_~u>`k&5e4`=(x ziL8@xbo;-_<{-L!VbV6VjF>m@P((;XJj+|87vnheo+OOBvV)>&0i*7Th|?D%W&!qZ z7R^u*>XPwG8bX+B-(?a#;&gzEqI8~!-UeV3Av0*4-P;D&aay0#F(-fCM*bLHG5U$KFr_3O(i%+x?bFY20$6XR%`y6N)|%r+HuWN}wst%(5%g3K3}T z3=oRv?8mL68A_6^a)4)|HpFkQ(4FuHvgkGPeP?+t`={#A&+{2QKjz_29r}#^qXDfs z`;YvS`|>EIT#D&(I$&T|4;}4*vKJ%%->$t8-XRak2KmZ4x~MB9PJ%rm`1sQLFDBh_ z8Sp5mSiG+#gUJ7!?Pz0l=t3E>Efsy}GezGQgLSFd1cMrK2e_kSC3I+k+Rw5UuTd%# zDc3%$O0MGK*_zl3*=>16UT`mtyT#yH5p8d`L|Su;ms81)c05!lJlW>VQwg*`wQ&eH zctM0o}TG?k`*jc=eH z1aEg&lTL#n4}rt_fu%qgGZBkN6f=h#dS!*v>L9`F>WhU`m3KE5VrX$!3CVNGq^eI2 z+>UWv6?8^Wr$GPrQ!XYDK6h;b|Gb5QXyL?>Y4|^7@T6a3*YvzPUWbPX$(%Qt_c^E9 zKqkb&rq}_+mGK1qYGPxgBfQ@QTQ7*A%3o&cVmN?r{J2%**)^N&i&Mtk2FTnu^^>s3}>FZJ+Xf>)pPyKg&g3vMj!c+du zJ$`oQ7q?2EYwH&2eQjP`alm((p>lOa47-h*92#b%9jUifB~3yS{0$Cw zK0*4&yNW>s-&bwC3lDa}##qVB?zRnQ0-IU*C`EE!Pm>M;%Uq7KTAy2c*WxggqhAk> zPkupwKhRbD&>_HqoXG3dhi ze;xv#e4=6lZpWr@+$jE=)V}yfCAlT`ie1DM_3|$ZGPM_U;bfnQrzwIq##>iC8O@o= zqUFRr{yjgLzzml1Tmg!Cn2ImVuFBQlpq#}IGi4Mh9)KI(6Zk@P&*jaX`0mLxcNJUd zNe-cRpW3%nX75k0;FECL4k&FFR5a;ot-eqh8F~9nsUKgXdk#|1(VmC0T9}`;I$1T{ zHvNYaup{U20J+!%h%OPCSvEG3=L_eRUpo!e@{3B+k~UftxIx@xWU+&{kq((Z142D& zyB#2Cz8z-@J4g4_;79jKlOxP*fR^Z|9ijk)T<>u$bZ#DFO}gJ$V@1obqJ4 zuDdrDn17I`(ZgroYte}T7~-5^c+iCdf~Yf}Z3trknQJoM+<%oNE3`Qr%$;BT_Z{ev zW)>fO)Sl?x!kj5|n7fa>ZF#By7wUg)m_}-4kAxBX-3`MMPD~SS>jv4=Hk;GZtR3Lh zxPd#FEI!2Q1H{$rd$tLUhNODVm18-nf`W<=dDBaZer$GK@L$N~66^&{x2Tb?BvbpD`TU2J@;NjRd7zH0P=3q7b`kEl@&~~6^%-drLy<(C-d>5 zh6E?mF64eu&S6LiBCVrU3GJ^AU)X>yds5k!H97dg0YwGO4!1_=s5YJHWlM8>b#edE zYLA|$YsGV%-ax{uC;@(doFe5cw!Xy3Z-%G&t>u7_12tCnvSdZS`HJHX^A{O)`&~Bd z{V6fr_urGf=!GwGtw7C~iJ(wIE=<&kUJ7C-lRTfCqQJaK#JJCPbwld_`26EcQnZn` zO;k3!yWU-Bj~is1w$2v(!yeTV1Vc%m66Us=KKMIdDe9IPWnVj{l)^lq;X5%sOS#m% z89D8ZgW!Y|QY_pr(Jy;d2_@2!@UBhZ;U?ojzOO|Z7#WfeLRU>10e+h9BHtX^xF20i zG+t&M_d@=mC$=5*Y3)yptR=agKLX4|XS#DI3fXJpzwjfem5t0DtDpBG<;1h}dr=~t z!T+5?cOiJ_e+r5klPG5>LzDlcUjKq4m(h9rwvl=46w&;Xj+vYr8n$A+Hcz&9@1~y^ z7$MO}$DA&ji{t`o0}UR?TyIQ$DU#83Hv9|5*a^TdHCwm4%SG$56Qac7h$s-XpX)8! zo`n$y_zB!cDVANC3Hgw9xPK2)bFDFv*XuI|tO3=wZQo5|=kdSExi zb{HNt2lB-&f}bR+kKPLPSzG(V(TxRh3qH`&I8^7f8N~{b^EIJ{9b^oicsb1n_1TVj zvK1Q7ahiVtyF!r#K~LmW6G+y7wz;B^$J_)J@*3-f7=~)@u+qRIdnr8Aq1dL73xNiY ze?@-=S%#T!x5}ph-^Z4!`%Tbik>ntqyd4?cG%O_D(LZ#Z1Aw%8-R?KB8(CqtY-isS z-PRAm6BN969ZXnA!e0hd;K$TDYmAGfk($rXFa<@AVVnUG>~Cjcu=TLXGL%a{x~5n5 zJlvB^ntru9H_1mhW%z3W8UWmitl{v3dk9rg`T` zx_c}*CE_zBL;k9#FtRd_V%(Rd>Bz>gT(%z*HtKFXB*v+0vsNDz?hFrzcL2Bi^(?wk z!3G?+b^%G)JPx6t#n0OJX5xoP^-gjR@HFRe6^{SJrjlF7^{H&Iw}x_ASZk&pEkzLi zby5bMe{I7?-=Y>{aKwR_FXDxKRB6@*oD8JWr->{vCTwiU0}XKNZR9aZw(~C#uzd1< zDO%ltM{fnPAMCJww>PV&zm49on(k}z@(Mf?ecgS(O~9H~FM5j+o*ZjSBS{=*An(va zCIwRd{C0(H$WnXYPc=*$q&HdcZ|^goA^)**eab)n6@0jMO>D}(T^}Pc;=B{g@q_a@ zP8)-fp^ zv&NygO=m=;m7l5>cp}-1mhni=zhD#+y`^otH%lGcE9zaA3)Sfz1qQ_5_%+K)?rDZ4 zF46vIO-lQB?Cii_PIY;8ouceU3ZTiE){>YMJrC+zEY?*f4ohtR2KYxJaQ`^;5)w)* zK(l!Ut4f)lqWKST&$w=(X*Y!l#go#pPO$F1j>a#}kP6P( zopc9jvq{3@>M8&ofm%Fg%NKp{+}mOq6=U^1e2f2-2~^q(L=e?qu~vB-VD=thhxP!6bzg)M|JU(No}GmzcFJ+IWd;j)REp%0GUEkzh*#rMqv|K1WBc>a6r^`^rE|5L|1Y_W7MTYa*IG`5~O;V%&doURQ{ zCYRxmW7soG)w21TJmVn>nok-yV9>k&BRK+~W;0is=Jgnk(6_uoCeiKIPm{uoMM>?u z6m6r38^HI~KR+u=J>Q&KPMh!_ORiQ$>y@_{=44qiN~0ToDw<78`jU9~dr%vOYmI&Y zojWt23LrTT+@)_7*WVYd+{@p#j(PCY5`RV)R^C1d&1-v^23-sP}^k^8u%lEZz^+X}H z{CXmWMkH|JRZ!~0O9GyW8Ys0Vj`b-Wv12sfDBy^pKE(7f>ue0!)k%~F@E{quf0Uzs zOt~375NCf?Lznop4#=~#G-5Ns*mr7%<460?y1@@Cfe^J;GE&ZA7q{W{b!RPBD*f}H~ib28qg)YZUu>ofkv-1@NVwqjPF0v4U$f2v!}Hrcl+{<@+k*T^ zOZqXOPVKq6Jt{X{A7S3aqh}CR%K6l?!yjdkH2B5B{Pag7=DO>I?C~w2eDjWbPN}v>|FZTPhAgEAUF_Z$PYr=ecL6 z9rWlC@0$~7d|4geC5*)fp4Zj00#A~NFQSL(W`CQBG)Te+iEp8M#LqQ_!6mnWUtGiR z5Ueq|Laa5DW7J~H3&uKk4fO9C*~^UYBk z&8PcHP4?xE@uPaUbI6HD(h-ldZOqY4b0o8`A(M1~gE$|7280m_10*w1O z3kt_#0YXRfS&Fm|m!5rji7-0dqz5{* zdu;G}r|5o@d+~E!rQ6xOiLs?Z+a>A(hjDZI4wQFb$6vO;3^72Mj}0~APQh{V;XCgaqjI$GVWY5p`KHXS(YgcIkKi*yBDbaBnPSJ% z2lqrOlv~c;22Qd8ez-TKKR|w^p)M83j0)1Cp`&npj9yGoP?F z$I|Z60#_BTO*OThe%*1=zz|@7A7Sc8;MjOf5ACdoP~V}(F^ba@@dQZ?#2UEgFAL3+{Uzjwf4zZj^F5JroXV#lPX-|2_ zu756c>jtldX&wGv9NZ8O>(;Jq!2z1BJX=67VtT4hcGgXtSOufO-Levo(Ra#3H2 z?cRHW3otsJc)e1=V0h+0xM+P(H4?1*elKu-P}vsQ&-1KlBrwnx8`#L48KaD)~;xe${jG`Va2rq-+B!j zk2-{SJo)#)A1Wo2BKy;NfW#iW9-k|WiEAFsIEHv~MOu<2|FbDtz zPB#j+5PPnm+cL+s2wCR+9#u67dZQw$!4Lq-S)=J&Ikl?eA4KI*sTs~ zj1Xb^fKZ^{H3(K6rbKb~_S5Mh?=|gV8%3M7x7%8NHtj2^CV1!lD8K^^h#JV2728^J zrtZ;@otl=Ol^z&3)2=h)V81e?v$3AyZ5u)hw;bN}HA*l5X4E}5FxR!kXOzy?5hm;p zW!;j8vmo;KPnv#;R+56xxK91_;K78raI8tgM?7>F_S5#ihNn~Vt$!_{-pjLA8}p7t zBNzGt?)+hi0jtog?o0se@{HO2tDO?vP2VQkc&h9CwsnG(Fn*~SWmO=;H}DkLdT&&6 z+f>pvf2opeIuy~ zR|p~G(M!($<1{_Jwh&3a$Gx2Ir(H%5(1lxu$le)8o-cd?JssY|3>=(i)kPe4BP^}AqfsXM&ISEx78A`eNYkGKKOMk~Poab^3E8A|3M8xrR|h8~uo48srske$QR@Uclm zYy`jl341J(wpoYAA7**Spsq>y={{l_r8doE(B@{cT?i2?sEcn}9@E}U?mskMpQx`3 zsOc^8AQaB!2KJ&DgvxXO*z0^$_{NyT>2vpAU2Fh&WG_5>qNj>VZ=F~{Z;0&j<22$ItqtJp7&|pU4Ixk z_49A1lc6+PoYxWeK%Q{FK`~ zT2>E^liAt7fizs@bf$y5i=W^8#(&>Me(rQ{yA1omQaJ0Ils)(eKCrdODYQH7rG$2S zVYBWjo3t_KOftQ9WKXL1d8vs~FQ?q6YiB*>T2*|6ilxTL0`3@KF01p<<^Q9zlqy1J zj5?WMau|S?@jv!f{rto{FIi+}KDD~dC-7Ki$|ZHO^37L60GRW zE5rx*T$m&yf!3VJGM$N=m+neM4(4|vW^B7`57zyPC! zhrh?G{jKeM9t*!&!{>Gw3JwvrIrctj&;3SzTf0>ifG-O}G{yyWQLk^XkNwjxQj98= zD-o9ZtY~`u$p<;`(y~gKx-wtgz^OEEdVPTFM0EVS2WRr15EP#-G-9Pk`dg@-UAIg18sjCS!A}}GPOB^xh$B|Q}8B^L2FOH@%2XO zZz>(H(0Zv1p6(>S&8au8=suU|xL=s$Pn1Q^_9~n!X=+#(qe{2k1CAxcTavUne<&oY zWCok!#kSI_?9#g!qa1Q=L(ayL!Qa*9)L*(G*vzTkucn)gabi|GXk%B_kBtlH$ROOP z&|usxb^JurBlIfZ*kTGHJ759hbII3L;>FI$h4(mX4BG=^1?_#ig~4%;0|slfWblgX zP_pr@o*nDY_zqkQBA0Jpme(%Mvr`V=bu88FKA|cM$7?U#zm7PNy0;^z2EZw203NNU z5KWY|K3n^J+Qdzd1R`VvR)zISN8I$ab1F_g?`Oo^dBI=?9e_Zj4c(uOb->5ZQeZ`0^u)=~)%%Zdwh(kV8nO{OBwD^ej%`Zi9BBp!;kOb!i-0LS&h|xX(qPpz zI7s^jyO7CLQG@>q7s21v%?u$#q>V8N!JBn3y!uT488O+OH&vzM3ZNi9jvagKLt;R< ztNO>->oBUGbTxS{0fIlOP|Qm#Vf1j=S_ryP(D^e(NIv&2Gnnj|ID6u|t5Eb2c_ZM< zZ05;D1guO^$?zoOdK&URx66(KPz+wlrg~LQqm1Q9E=t*17$(hr$JdjELJazAV@eC) znW#7zO8p?qtR&(eQm4YQE@b9TrE3_+?B;*}A7$6Tmf7ER zPqwY8CcDYDZU3{)iIZ(>GAG+juE};2CtFjK-{&Ko_Y>^v?&|E{+H0@9mQOVE@c;!d z{QY7z3wSX7_+^GIA${p*+~P5KbGqO<9k*NaFa-W{O0PZWi7Wo4fpt_Bk;F&qw4c#7 z>gb!wPrA$;KbPcg*YW9K&@iYO<0wSuvLg4T0O)EJ>Y5Qr4w+GGV=zHnts@q zs%6U*f|s!XFBXg)o%cS6pDisa$esft*HIiAUa#&tgcb(xyhp5OCvg6 zN_#Pt8p-mIwW~I1rdI|8O0MpL#LKOT7v3-x4PSXc37W_^R zw`#J3D_K2LANlxHqb3^uQ#=FHQ$x25Et<4zQD`T0e|b;%b;qF+KWFYx1GtJ|OKym` zbO?ioMcr8O$H~6>Qa`2FYgoJ9$dVtk4IXo$0f-&5hi}hbNAUHlbRQ(k<)p~jx2Sa6 z{vXN8YK~0V(Ddg}BY*4a6(~H5@Xvs8hfCzzT-kM{FwYt>ybR7UTLZfHmO~)$db1S= z;SoIf;*NWoZ5NbhRx9SLz2uG;`O^%do(Ufa`BiE88Tfm4fn5@QRcki56{O9wvqql- zs9KCREmYc(Orj`xOb=-WiX7Rq09O`_xw;Nr2czcTNmPn#R7{lxO%uafVetyy{|xBF zD~kg@7dAQ0%5`eUqqPgPJg2nTTlEVHyB!=$&Hx5vYO}CxG{JJVaYeAV+8$cH3`5tA z5{{+Z)52dd!GpcPpY06abog)fan;l9kcyk%`;N9*uH%IZoYr9F$Dz!rh2RWvg53$# z#D5-zj}0;dRz;{yd!3JcF2)0YQ%%>=t=%u&xO){0vmLGpn)AW0af687%cuw+B^R0R zW?qXRROQBIU9}I7>*kfoI^tPodPpRt)M3m1LfkQx4feFK^Z;BJ;aZE|agnWuNmyvU zMfXR)4zo#gWoZdo6`d#DgZDNL<}AKW^xEW@X6g4Eq1rK_i@<8Y+AL(^low?9fr>FC z-ms}VcBV(C%9offS(~v4K=dODoBjC(x4*Lg;jjF5dsJmgh2^@`KVV&+Fp>s7u(j=b za@0njTgPP;u0a@hcTp2JDL_=~n zmt!GK7Fn=JZ$R+L3x~DpeD@2Favsi7!##Mu8X_-E#XQPX3?cO74NBK1OzxOc>sxl& z)PMDaQn<~#(ypwxb6oUHtp1c##h2}wz#xoPmk(8C!-HMkZMca$0oTUPTCw^Za%;a& zs7M($_+Fg_yZd(xztvStvm51OihuC~H5v;|3C18gzJ#xuw*HE1@^cJ98geo-)T2ST=w2BP-K+vVTB7esef+2DkM_8}89pq_qVVo@!D$mHu{S+<|BWnS4~ z2(h~fZsE~?TU!L+PrV}zI6UwTeZu^_^0A{Np5Hd~FoMkH$&bIeP;WxjBrG|7KS5S^ z{HfMgUQFb;kEjetx|2sKr8hF{Dovb`x>i9O#`~hIOQp!f?pbv$7+HenK>Uqlh*F|Q zOM==wCUtt~VT~na|BRx)N^^9!8D|}g;9XM+C}1PEZ8~t3{r()@3%GAM?0=%y&r|hf zrXnrq{^Q%hZp*JjntfX%rIKd~zJaumB4U@B46p}gIJx)cpKkFk@SAM7^&^m%a$l4F zbo~v%^YG6o!(bkD=2HQCS!MyyK9u-XDdsmLyy~+{iBkR6@F`%KuzPV*)4Gyt-f#_m zw;23PYlUct{X0#bsl9qn<(J?iVSO5_{OTp`y=E2lq{crQj~`#e;xSMN`RE@Jx&Som z&ec0EPdG_erj4(4{#V4xuKYzCrASGJE z>CF>S;D!dz-j>Xq1UQ=PYkFSTAD(?9$sW&RldxTt7O1UPGCSQI6esP`n_?*qUb{j-bwvo{^ z%WGQNl0m>@fFCr1jK6$Y(uj_i^Kobpn~sQDZ#Mri0rE@Z^b(}ndD}aZdjZPR_S^LZ6Y!b#+}UJCFe_0 zVdzq2gz{*k!D%fMfWwNCnnZyH#AUU2S?5Y#`!FFNcabV%Ma*Zl(O8~x@L&)AB7(cA zg2U++W$Xs?YO+5?n%QZ~wg`W^7Gp26YkttE#DFBGk;le1{j1+{j1=&1cJu+a*dX$B zS?Zi{D$F>Shv_EBC|1Xko$loK9eCC!OiKkaY*MU9w|74nb|75y%_;#* ztUvQ_`2U!FqXZxXmOC&Y4m<~r0a>g5{!m#fB=A0ab)7UB*4Iz8?dIlsCHqMyW3s)1 zRoYiYN~eVt<%>(D0-*o_r-=2Tf<@8a$nOaPz_e^neA+TW`rf4;57ctCoV9Y94kGJl zI0e>dmL@JO_)E=@#qj@r_hmh{X5z5Ib+Cl-2i||@KT_a)6AKg`pL(`x?zI=?K|I{TDy)~SY6^IU=eEG!%Xv|O{4#kST z9_a;TSptOyZIOGeh0?n%V{fh;*aQkMvR!uZgFY#05zj_O8EZ>EiOE9iS5t^ zF;OGc4XQVN5mlJI&7_+0K^Ly&s>0|=|2O*j=E1|zcJJzep=;L{OBJN}`L;bZjWTLB zB)wVZF;dY^0CB@6%+>+W4`-_>NsNSscnMt0#p;Am#{B&e&4&}UlUA(|y#c;~L}1H) zoj}ptvV6dFHE@Wk;w=5l90Rb^wPzl&nda*|e=h$lkTZ53j3S7me$+_q5BT=>@$w!L zz1swc>2jQV{#-&*+MA`{{zo&ZC#!Y`o>I(Yal!Uv@lkTGC?t>$9Sb>WZyp)Y7i-3Y zK}t%+MFvcBrs|Gr;fe_~@H=e|;&$7q3}o zt`P+<2=#oe&QQPV^vTWp_n|ZvnO)Mc&tDjJux+&iWtGg$UR=VhZSgs(K0{$ah9cTVn6*p$ zF5LXQ!^^Yu;BgjTg}10z1IQuM8}z1%^xB!&s|lS&#x|m=vgpxVTL@r$Mk90suWl-k zn9}2S{nkIB8(}Cy8hTUrQzx>9 zWy-S7Z>**l)&-v(FKtw-Ld_HH)jBn`>H+Cb%ecb9{`6bl#>+D_q9=V_bf>-m=D2&< zi;cti;5%3A$$NhB;wbxIPi=sA0_(NEz_ycpeEVJ-a*b+}&#gx8*zM6%0D+QD#^TmS zqMRg9>h=&p`X#UCn(J-dk)Q>3I*z@X)Ei+pz)rkh(Fr`DFC?uz%VTuxEPAo5DTG9E zp=&ezp&CUyG`;U&`eh(MwfeJLPuAqN`?2mqF?o)(2k?Y!W;z?y!FAKqYYwju2XPd> zy_WZtbC-A&;{?+H0A08|@5b@a0(+q3P70=sa-$hV!ik#uNxY6LfFTcmQaA>L&f?F)c#O7|S@zC~ibGdKXJonDOO^z(nmn~~h$HbAp?(JGa<-0y4tc#$5_q{Wd{P%ZIk6k-3QGkgfCOZq^Lwp5Y3x&^ zAH0^=Bw*VzJZrL3tzs=|bMnpt{ySbEA|ps(bF7w$k9^fna-ngM<*4u07KqVoteQ6q zT%vQ(?jIXe!#Fs1YpEJ8Teh<7Nfxpx& zpR=;YJtp+{Y_)&@p}6bGpiP9HLkIEOFBAP<1Mj&oHYnWyLw-N%0xw>pnk%9Z$Re#4 z6i$|D@>SVd$k^?nS!H%)Hip%oL#JN6?+bVa|BID6vqf)Ny^`wisV-EA+-VZ5POs#L z{b~)6rN|fwo>8*@TkXK2dgxazFqbyHq61>FJa@b#=BCe}nl~|8tkXlly^ zyGuOO43FQ-TY-;f@Ie-3_mtZO5JNbe%UR!(qM&e@oQ&25rUY2a{nQ-n zG;twJ^^s8S%uKl?=OA)?70EaOf#VzCYDM8^MN34{(g7cW0@|CQ`1y5-?w5PrBPsAk zBuzhAF1m|%@bwpyk@`Ala?4ksKJjkThL?=lG{!x-1l4el%U3aY-z)kKRW~tUR{=8x z-(ZmXmn8jhil_#-C~D@s*pXP~mVbm>hR?i})PrB+o&hi`{+Otb<~LtutwIDkO>Ucz zQ1dTHgpm_lYAhnn?Sj;Q-Yi?eRWs|p$x*uijkczoD{1UJPkt|^u~V_bA=(HNGFfeC zp@#zwPhPyB3&$BE#cq+BPA1^2{Z0QvvXk?fq=E>YT|fB>Lo<-sol$MtRN{`Nve6*PWt(BcB2fFG4`=(Z5l*@5? zz`@3pXvb;IeoY}7?WG!cnR6s(E8i!RYMI^^1+SaA3T~oQ=Ax`}*dihN`hb~rNllPUJBT59nRcG?w$-mfvdORH_?YqTN^%^RU03h$E_KC=N z9j}ILXgw|RTUbD^9Z}V{on%_?C+~9u@V$Cu#`rktL3uE=aY0LUGBm@fB4(qAp1|%A zC9N#Xqg)bSV|Wl*DwXDeEAvcS3Ja)>3D0}6Mhy&*^kS>hnC<7$Sy$9w-}InYNb5i% zItM==3x-qd%SsoHcRF2|N2dyXJ{wpa`HAoL9j0qI2R=HV=-j77V~?ndktRP>i@1vg zNG;VR^iFUictdfplZZ>S23VMt&??!FZt}i3O56*ZS4>If zo=-&}t|*w0K~?RQp)Mj7QRZ&&vH$bG*MxJIasuF>x(`dk%r7+dSIvH)2K1SpF0}$p z-ZsAdf#nUA1o-*5h~tY+J|7hvB7WRuP4))rBE@n&;;q%)4b{{lFEI{H;&+4RNGz10 zR&SiY06Fb5@XR-F<-!ua_6@pL%j&9tMTDHiP&+DvM0Eb5%4nGnJjy(z@eQ9+3#H4s z2>-@im5G2jB$v8%Wt~}DGv|3I1tWos(_x$gcSL%ikk*RtRu3GT_ar;u4GYB0(o<=c zDCqY1ArnWaSwUp{(~<0wIR)Q9cshn7gi4Nj>TNC4B9_;KGjWmXpMSj}my!`ir6fb} zW#?@^tjCMmsZICVhfNIwDPeM+?7tTU|0qiv5L<@BNuSXrx66c*>gCPfC#bA}|Bi)u z>i8K+R346oaY}b6tyioQ9}aCEtLL|)xO7aPEh3!Lw(238_rha_TT*#98i3!IYm1m1 zp@fM<-d(g9IZ|r$3Z6fOp zPrk^wB#T`%ivx~m{{h8KOMV1vFh?Yypij+NY?WC2_Km-XKUDSib2sUpGw`Eg`xiar z)e$v#jetI_j@ctCABCt}y<>c~VOp2U!B0jxaBllhk8Vfi< zapgY3_v}Hq*1(3%M$L;o!1|K#X2C7by3x(vl1pkMuIiXZP8MSfJ|M2vu!y$5F0IvM^G*{yNM_qb0c^Tq@sbW{F`(CL?2!>=9-UJ<0ew(td3H=7gCBBFx?QF zcv^V3LPdZDL9FBIChfh+WTHX&wx*SZe5n4U+HMZ{OEc4IZ1mah(Lsy^jF#Q z=aPWjsFAaQq^!uj6MEnlbsC*JSpbc z?I@zg2FZVLzD4Vo(3Ej*d3Oj> z8vor@)49%#Q~fm!NMG8=>h8_g9k+_Oj;+_fiW0cBv3Meq;P;nc{N@91F&zJWvQ75T zsYja5>A!RKpWz2|5vrzUlxUpO+2l;!k61FTh!9R8X!KJsGIXXQoMM0!eQ7cZ8tFd| zLN8)6S^m@1CJlS8=MK7!jXzX0$KbPn3VS*p+(hz5ydY6ae)m}5`4q;}u}yc(Gzm|3 z$Y;k+SSq`if759iZT#8w(G9uu2x!Y~OQ;OhiU0kiWKoCtQm!LtlpcEmUy?5-WR%xp z3w}(kG%R8BRg@Yw#qj+ezA%z@CYkUkUz-5E)9}dKAn8(2;igo?8dOgo%>$qGA?pQf z+OA>jE;~zC78tfPp7=!F=CVubKgpxxmd&?0`$bLIqbvDv|t}1_o zWqZmB-W{>?cZVu@L0`KvYK-u;Tz7rbbXxwX2go}r3ZLEl-=WM-cj7OKobytQ_BBMC z&Vmq}Gxi|CA6gZCKDYC*gJJ)DbOJRXXVbJ8S3h?mpdk7qSc)i$w_`HVTw8KAzaWB} z_aEjaKVuWXm+*GBn2)7ssXBe4_0>oeq1T4f_hxmI-}EV?<`jJ44mKP?+8-}%xkXDi z@-5M`RC75p;VnVRE5k#!M0smehN|&mZlXbPG66aTa zs{-Ae+7qYq^RR*c68kN|=Q}l)sr_|Y&fgu>88mJkTkG;lMA9eOX?!i6dw+F=p+j0{ zejky;NZ|6nc}``R)ms4eE2HQeQa4s^IBMMwQd57?lL~LQ;_hrAsn4`eJb(|RsZHCQ z5o}MeL;p7OUvvdrBa|Ve%Hy&*J(=vx4___D?Yerl@Rr)*P%No!9 zR&s*)ELGK|Qy|@Em1w?nT3L6^bR2X5^g-W}`427rfvwi7wr*IVLH$Hd>C(EXX?MKB zk97>*G^*P2t{fUCyc*~0AI{8c0`gslth!z?*+EV;)Lje3lb8h`lL7HFTl@;K4YfS3 zIurn-C^@>otwbS*%=MI;MlE{itWP#m@Ob5mQ%s&%5cvD!7Ue4;t3=%yo^tY5;-4gx z$@P}5>!|VlSS+DN^y5t)y^NiPdCJQn!X}#G=L5uk0HN6iTWwK0zWl^Q3ocW5(3Ou5 z-|7a5LQd?9GSeb>32clYnXY$<$6Dd^7rokAM2qHiI&RP3^ZNZGJn#;mrq_g#Zm{Zx ziY$3B2w|G;dzgWBa()zAtbZs>fwxDzC2k4B4$V8%eMa8a#_bPFx!@J$9oGya0Vjg) zg|?F^RG3!iGg$slqnL(MA(}fb6!yd=Q->Q*MsD(`^QUG!a^2KYz@qu6+$$>DZJj*xN-TR&wNOsid!Uz99#05r;rjJg zPJW$ts(B{b9{w+scjQu0F~}X5BFRr8z`o)a`Sr3g^U-rZiN|1M52cLWuOCw~f5Qb{ z-L&Q4BliN6sW^=zr0LuM%^qyAZ;$|?b-!pJnIK&=&r#yNLkzjlALNIJq;Jsx0C1P~ z^`u^Et#RQaNAkOK>;tvwoWIsu1ZNan?K1y2xP$)1Z)FT#+S<()hj;0ZQtnF~dhlaB zGd4<=Js00tzW6BQy4klV`h6xb`QIPG{C9B`Kqk#uc>guKD}7Su>sGk?RP(g!QZ;2H zx^@PS=zc2rG~kBf!_EuhLEmhvyKtHNcdq{^UNXsOs>C1%d6_hHSF+U12Tr=;g9(K^ zj<6D2fBOMV62?SZ=ZUb`BsEMoNa`0_Bg+nZtPG$_FSZkfNMv3;U;X`0v+1C;pOQ-@bEfT<{5g@4n#vrL}bwR2SA z{;VvjV<(1ctrq)fsy;X^;DuyVx{#svO$N?Rj*&JMm{c@%Gtn^#MnST^`Yt0C_Qs!F z!m59TOYN0MDq_c9{=fm)xZltk9!XAwEcY*L)ju!28X8(9GFLj7kqS*hq=3KF47m6xXeGcF zHVWRvF=8MypWFA32T5@^0Uo1n#5^&A!QiUur7GEjk3MhsLPWieb({vfLm2u{j;-a2 z24HjnorNR?(o1D)l(Q9SsHi`w(#jz*<$X}@*KS8^elmbfC_$|(qJyX0%gJX;pu<3){DpRsCr); zRQ~o5;rTygDglgBhhIx|0AE0$zd9UepsCPnyUG40aeua4uv``B!H{nmmj_S2WYMO( z_726d^ez#y&8|Gn3V(37Q|Ig(bnc!MBwg}`cq+mOy~o$GFbRLII8&$8258e4>AUr# z6RwfG&J+7X%T1%+n>ZireT43?zC6Hy2Lh$BeDXgWQ0#(6MTy$@)MAwR2evxklG4@v@|u((%$w8M5zI`?J}4*(#p$LW3r@Ji>k!qLJ1$g z0EJKD|Bw>K%Y-JDT#wif|GaotX8n`-BZ{EIu1Tg2{&fm*d-T9~fTllHrh3e2dqOH$ z#}c#3^z(5cNx7{*oOvNzR58@9ZiXN`x<)BBT>JqzE;Hw_Z=2dn)R#OC*KsSBD~6oR zHyH{~O60C&`~dGd7u$e*U~SAv4KcfCI}wpD-O)`t7%A#D)U6Oe@k@GG^y7J-W7w%@ z!PqD}HX;bc3(!8{Jel5@!uV~%d8!_3-e&O)1x`Dwdh#&HXh;(x75p*;BlnL~0b_)_ zaHSmCOXAV#5Omo;!jYh*6g=X_93$_=rK+z)mgg>uKb*vOnsE&PSbv=jd@40rYaMUJ zy`6)-87~Xp6c&t5S~D`yBj+ganCtAz!9)Rpkb8PyR`o$(+^eLYlZY}6+HYCeR+P%G zaGm%VRaHa{`IGC*Br@MEwSnr(pHNCOY9voO0RR4D^kJ_x^zxxC6Uzp?F~LBb+y7$= zBjqHH8=*iBSFpvi{`_n)tpfEw04Wra5v0ZgflYmX@(^|Xw{0Dep;p;eqcO&Ofkw!vvdg_(5pPLv1alGAF(A0IxZoqI9Mfcwl%fcgs&3}rt+k`+T zgttySfhSSXR5#y(6%OUu?e}a_D06aQs0FXQ=!2z>{RcUr84!c!JMeo~n^Us45@JRV$vCGvGA>@y8K@M^m(=@RBHiCCm{t z4De^7e^$zu>ad)ZhPY%JD` zOGU27Y3H{8CKKCUI9%s7L_zJjD9a|NhcTn|!h7?Icn4Ivvmy5#@D}|!Ag}?I$u+wF zA^s0t1fs86G=v8$ZI~i^!R91*qC6{J>G3RSfJAN}l12WfgNE{H;u_3+xOM4U=T8d7 z^d7m+DEy~ya-X6IS%2*B=qUrpjIb!Hh=pqeYek3?@(*$*yeG|^UeZnXpG6OR!@#F2 z6wrn&QSGTZ$4t-IKR?a}4Da?NzZ5C__y==h_wAiW?_Tz=^|#&MR&ZsE6Y%{@iGUKp z8BxOX>dMwy)Ects&sC?0`P{9R8|v024>2MX@P}5lOkFf8YyPR{Xk+WdBI#7QQDRAC z`|IKd<;qR6(q8x4V&=3*d@ag(c@@0>-WG;|>V*#>o`yZqa{7)kk{xsEzOF5jbMAKX zK;&6t7^ZLFpTz>-TLN+BoWQ}b^f6B3=A3VP5qF3H-WP#D?8X)v*nM|4uEfN#XQp6Cg*T(dd46}6bYeY=yYnzHS28xwuh(kCHW#8oz=->7E%{2Mg@!YfS?e~cqt`{-$7tQ+{3 z;Y5LQwbTEQ^;jTki=X}_}leJ5@8k5Fql0!M( zmDVVq79HoAOqTtiO@LeLzhCE^;jIz>(V$nPR!g0A=Oq;ksl7Fhz{{~Nf?w6A(r9OS z9AnVJIA>%qT&(9qij29kn-NkBBkvZ#_029t#hsy4&nV&GZ+BF62KBlCzm~9Da~R*g z%$jNc?sLk>;Oi*;<5LiNDbw_9q7noi?1k)^H!Lig(o$Dt&N|U{{e*&07}gW5yt^$% zJy#Kj_grS69a~=P$(#t;-9zKY+5kvoRmy_Z-(6MyX`*b<-OLY1tKG^}$K}v}=T<+q z0nb-=%*%?T_&TZ-di&Mnl*Y+3Fhxal#rJ8Av+(*bY_!#R^Y-0$Z|m_pjiOmREpbi* z%GJ5fqhrxyB{S+|yWdc|4?k&9vQ}(%xQ%=1&2fPD^-1?8VhFTs(A+}OF4}L$?*4A) zr$^E>FRe!&QlFnYmE%Ms#DJVkPcb(Goo#;zSqx-tnh_}6QpLb>6fHO}?@azV^edEU zV(|BPcVgmy1+Uqwl#H$}J8$N9lst8vr4BwQ{~b3#6Kt6cOS@|p@0OK9aE%cttTZ|U zM`_i|C_6$26big+*bY834I1tkwO87qM^T4ii>vU5DVE11|1JZ6W1b)}#2;8Zd>@Gs zsry{7;coD*4^J6PAkoe;6M?qwkRp>;F)l_npxAg{gD?V*+y+oRs3>O-uz9I$69i=B zii=PIp)Ez(84N6QKcPHZ!P8A4vTRfd!WoAU?@3y!5;@SO?w;B0#u~*1A_MiXqjB_J z6;qmwh;Dk4tctjlvOcDOzcS#4y44xk7n$LdrP6*r(y&(8h`4?vh4bf6Sp;~qnJ6jZ z#`SI0*;46exMvorj>7;3^LqQ6X z21*<{sLlqTkZ*{F%m@$B@7tYs6f*k_4>Hosu{emVtnj%%bXnp6?($paM~VjHO!+PR zvRkviVo|qEHSujaeSO#o9i738j0$J)G)1{0tXmD$goa^q$Xn+L@n?w`;T*WoJ*`HI zqk52fREO0K9Hv{o*8Dtmfdd}JCC<%_kc{UzDkmg-VwM)j*md($aDs#u+9bZ(VStz1 zSQGXWYfyjnVIlqGEkXeEO48raB-Hlh(ISD? z{!W#~*Cn}^AWnup59trl0{c#j4+G>e`#CTOiFrzHx1q>yxhyoN%1eC2@oq>GT@9t} zy7jsD2z=*iMY+}U>94`AeLN1#k$)vXaxqH1k%!@!WER2suxFzUnfR@au=AhQ?~-nF zVjEXI;3~Vd>{2sRur;nBWXO67U3x0VT!@efDUW7tcX|l?s=nFf7sGr8mim3Om`wZO zj5=-*t;vtrnE|xf)oeouXxhPu&oL}c*!?Wte+XZ5;nV@4bRS6ONs{+%sy9IC%P0a} zQ+c{ZMWFCvwW2q$2tJ}V%E;xq+e~ZKye9dFPSk%{rh|oOWAxVcP&{gla;$?gTM8a8 zwS2bH_d9wP6I=daK>D4txZvBZN%m9l*^P!F`he84FtlIyV^y^FnE4ub4SnFgBB$pK zV^!@6obqLhi|I#TD{1)T@jVF%rb@S@igSIwW5w)%SzB|kG=vPA0yY4r9eEJ1XUv0! zu~+EKSvDY%h#tMk4{KOMFSMAA4F29JNaG)g{&ig5o~Vrn6AUsOVn&NVB}qCZP~uNSO52|jKJUY*3SJQQW^4pIs|rU1srGK zt-{MTy>KgR?`$*xtP(9(N{-SBd@D|K)od!h1IxVPy34-^d-&zp5CU4j8RY!_(6&t!_2H;JjnyTLusLoW?+XD-P;J%^-6eFFN zc!Y8WHAh7Ei89VZ1>xQ5$@HroE~EB4%74n11Vj=R$GI}a{n}A+bP{| z#o9IeTacaM_T;7~Jnd1Fi+=v!;lrMoquSJUXZ~3pp1OcMEMPXbl$$p0Z zu4cTDA4S7o&icLPgvV(OJR^{d-a`lRb>+Ae_>bo3B3Q+$VZ^J!y*nuIiNOG>b#N}2 zLhGV4^xk;^XH3w7<~Ip0}+chJ1=y6o9Kmk@f?#^uN^NPU{+m#|Ih5T zc9$Ja=aAdyu!pzh(d1EgWc}REz9a>FkVP?j)!j^4vqjb(`eQBogx!MIN69W}OK&dY zZ<>fnj>DbE9dtHg0)c(=G$1dsEfBC04b0Xucua9b7@I zitR{hpz!I|1@hO{KN+~E_oKw!4o;hxY)!I%Z?g8d`?PI#&@v|po+xh!Ud~9SRPA|< zP~+Z^3|UrLA9z*4LCtY9jTOFE~Jyb@hf9Do3-DFi1C4pSoZqRjTc zJnj;wBE;~eD0S? z?A7_NuS{w=f@V@+d%hJv0}!5Y)4rn3dM?f)wRA7Ed3brw9^Bor)XXTul}I52f9f5J zC?N5lxL=|E&Yyyi$K=tJ-IyGKN-I$$Wb{-Y4kLvN8-%Yc7x^a-6~dt9AX$ zsl#wP>RRw)W=7#|uK9rqZ+YS8G2xdt$c-$}g{vUS8wAq7lC8xZakYO?tZJr5i?k- zkoFe$gA)8d7fP{JnK4dUcIQL{j(pu(^pEeaYcb;IuOmL6sPmSfJ-@ciIO7a$y`i-L zOT>iG0J#t{`?pE8aIwinCrPy~+H;;9&xI`PDt?VxtowTKk$B}jlD;why<0LC#rnSZ z)EMy_MOgLM3p4*kxLQr(=#KZ8Ouu|$g%GRo8c6>NW#SE(z_t=^<64KQBNnqV1(m#I z&yO_ZCT*mRbs6yOT7#z|qWmvnEnJwte_AEy>k2EQVje_n_Fq@xs%4JR7& z*6wD*!)A*$ucOhB2b%5caF4vEd+a~uri*#YVpA+!If&Y6H>H#GWOK6agP)H{T)XGd z+dBoGSm~ts1~RTY$1W~VB823#5;HhBp0KDw%N*AqR8>E7zTkUsBbNdy)Ymi41kDJO zu!ls%*Sk>POaFU^?v`jQx5cbHF^>QbZ(XDsA?wZ|*s1~NKA0Ixk)UH^_WT4z$!Z}p zlyw*_1g=9VDIn2@d@MsBie~I8_sjktQ^T5F!~ME9$>FL;LPc zLc_P>H}hHmet-NYGwo7-Cgd9K*YU71tD>fr!uA#JpBFvWM3c>oJ73Io`Wbx|n7-M7 zrBs@4vL8TD71Ao=vD^>Iw`Dd7UPh-&>8ge3)2R&_Top{2Gw{%-&ea84IEGsm&r2sd z7C|H%i@NHfWYV=;*F{;A(NVhE2-k)lIY*;}$+-8_ozEc;80epN&edRx>GNSG++;{n zTN|C1kIjrV$UCE9EGz|&t1Hp@*uLB3ib6igpc5ZF@3~To=6kGK&U3CuY29F19g(#3 z6?$))0sc$E{^#s`m4NDJ22l&M`m0$DXh|paQ+wXyH3kM#>tJ40WgVKZaPYXg($&#` zb7$%6uaCvb>10Bp>1TBrUT^2^vs<@N9pS#pw2%o%W%(~ui&?%Bt@?Gqtw0YqU2bEo zKGJt55>^_at-~6M(Pg_X$sRU~mwp)b2^9Q-U}NR~EDE=G4D?y8Si) zp1p0wIAFSpEBe73^|zgu^vc9jJ`?$vdG8%@>Q+hVCFmY0C%X%g4E|-1@b;3U5H(u=$t%7ZCYQCpJ?@-{CKXyB=P!a?l>|+^^=*~(bS))?9(ETO5c#?3}Cd=JPcDGQvIvkg5$7L zVcGuFn#12ALmEoErpez2yq99kIHz%{Nz{I(D0Ou+x%%q*Y%6L&xN3~%Y5wDA*4Q~_ zFuqDEzwOpXPl{Rh{yZK~h{?L0_2aMu;zI5stRRMew|o1qM=r4xzM5OPf(`hESHrLq zWWty0kMN&=wIiMR6$8JY*9j@+a^w?PBErqz7-IUj(WBDq(TIKF4lCXgF9rN|QaXtQ z6TcnUvy(dhU3}*_lwKvg1SHEw)@zxks=%*t7xARNC5LN!V>Nbq3yKdGkWoZFrr`VM z@l2l_3)Pa^{(8bnSQ~4A+5M%3IG?!!)T7t1cxNc`2G|Rr5@Tn#nj6hs@R3bJZ|*2q z3F?9u8O6&u8Y};W(l3?@H6MFijd@TAziQ)MvH3E0PzdPLM&|n^Kbv6_GieA2C9#(4 zRRA5Ayk@wAOIp001?qPx2BWI3JT&llYOwIH(jse(tDp;~(WjZvpHGXE&-eRO&A(71 z5jT_keRR?fZnpo2zfy7&Z%8LG^?RNcF-rjP{vXFX5O5TMi=1s+%XmF{PDmi#wE%Cv z_mJKndrGII1N9xe9HW%fOXsFMgu$H;LzL$1lK5ly>saLC1cl80e@fkI&BB4=jILit zO8@o#$rfjlz`O)*R9pw!F5%!jH^)BPkR22wh5t|tUapnlC>j?Y*9r!I8KN-pK*Sg3 zDN%_d^6Fs5_Y&PS#sv3z`GklvtTaW=E{oh*9m&>ce|z7dB~8lp4p6{FuXz-=GTAXS z<0%2?f9W&TaUjY2USdVguoq>c?{rJweq3dDO z4j)%82@2hpjxl_t%4LE@6xp&bBBqqcG}^tukUEzkjelFhXTEUFTt;nEci$Z}UjKaA zN0?xTrcCfHGsVfDY|Ji0BjL)@r{X!!{%zRnF4wj`YsbK9h2VEsKW_nb8rdBOwo48$8EnlO1)Z!Omr<_ zBsNYg}G0MhPEiv%~@K z(`}*_8vz1XlU8@Kmg0 zQscAGEOqK6AHV-V6BSuyl9g0pPlHdwsa~AhB@Ma{VOOuum2f+c!D;{avT>J}VWF(f zQ)Dkf#_FI7kw7Y+qx9B6mn;(P_&?~w%Tx*3ySg!>4tFKY7Ks2yYsQ6L;UR-;gLCxx z|KUjH)M3J0Sk4-ogn9Sk*xd8QWnhp+KwI0vTFO(kIpA?ySD^={W^EuFt^vTBCf5U0 zFTbT<5y^$r!mGr}^!aO=*fSyYTGBo2Ha3S((Sn!27UW~hcN+t9^kOZNn*%unZmDv- zUVcV2V-Cu;1#6?vInw#e3p6haP+zx&OWWpu01}-l)pEirOzIbRZBUq!FF$H8HFfps zmN1eQX^AWTAE#y0BZg|$L9xMoB?WZ#EO8WDnXM75C6XBVmsSw?XB?1z6k`$3V=7Pe zIJjTfqLqL<_|~>2!H}>LME_bd_@hBl7mMWqI}}dswK~TNWEVb}c4@@x ztV?@GM+7DW8*BAfhJ`@mfob_|Qf050(xd??gDzU^P|FN_C3LsvVo)-9Ao%$>p_@iQ zC)m=VIM4rq>qpL=6;&{D^t$6YVn<6Ix3QYMTM?cBts{h8v#|&_clZ*Vz)SLbj*1%Fs@=*r%5ns8uwSr*nos#qC zI`CVV<{Zhjb-7CObU)ggH_x_5gulgKS|TaVaIwOiR-dRjv2=q%5NZA6eij$(yvh*k z1K8I3HRh|ETm=b>c`doW1l^AfIpIljPQwty$;Lgw+s}p|3(QbPavNMc_+!HdP<~*x zEsB-QxZZD)NogH;&aI-wwmw1BNA@%lEWz;6ViW)^yT_hhWt7pc*_NX>@Mj8n)4JbX zVfy+cx%~I2wf-Mq&#rer`Mj(BcVyJU?-eK?oeg9CkuGm6Lf49p{?29ABO^mYWTGLs zBVj?T23)s%;9%kJLGpx<>#s#!%wQj#SM+g7*J15l9ZRKp`lL(n5j;trPka?AtbTaW{Sq77?`?q(4v>$nnf+On<(vTU$(v^}Q6JQdpDagc zss=>vKoIPos_5tM#xw^3x!`lmix+6_{?bk|=H1C+9%j|SKG|x(@)S0lLFY6WbmftG%hB8^`)KX{kXtrfIuigA;7JbX zt&NIe)53#mUZ}<8zv!txRc(7}LaO@O9R2~nTQq<@u(-4uub-IgV6fDBYZPf_{pwir zXQaNMp+g`Pl|dOY972+09LvCqjaP~%5O{&!RNk%iU^kBta95%+wlsh@`gNF?vTm=l z^W}ONycJrti-E78w~TQaNsjM4Wl@8iF~viAT``no<@_|FW7$T(*qz+X$I)Ym2CZsQ zeEc*}3?X9MI?kg2+vo#ZB+a8#JM<$?CS~Hd{Guh*5(&HnHWqfpI}Sd1(O(2riX1O) zoui})^17PBe$`p-(`aO?F+r9}JWj5QGxl4rU9=@C5)kFo6ROcYTYG}l*R;$xw;#_l zLMCFjW(RkUj?pFw9tb4GV#8;|>K`ozM>t4VWgJ0Mc67}9Fs5DOtx9(V;4n4%nkA=U zTL{tim@5b*R}=%yRruP?4(MJf;h8%IF+B9m}P87)NNt7qIF`AqP1)AyV{-Z^P2 zpNqMDpEU^KNt6Oy5$6v#n1*=m@L$h;(pc7JK71Q(34Ay1rTx0}K*@3JM$G zZ%56FZ@HDrN>oGCsWYTQB$+4RpT!uv;TED&qOz$ds;_p*mc#i5gtt3%?*^)C?2JEm zXL+-q(|yKO37%ieC@#>X)HQ(FpJ8S`?~fb6K>ny|-dOz+^({B)(CxjfVCKzy4XzEl)pSJw-m@c4u4&pf3XuFa zG0SjoILgxi{B%(l$Ty5wFq@^4s1MV%t>$ zVF?Rt%YNt9^YM)TqwE^|@_PUNsg{jp+iumemut1X3LX0XWAqMqZx^u5z;&E!$~78yR0Uddp~x z4DytQe(sY3uV5=`f-mZ18l-0MeoT$J4SC+L91fkiYe^DN*0H!lsC$a z+}I&ZD4GPgfM3(^$a-khtv@6Qw(XFlGF#k3yX*ZkGwfE+?!Zf6)7~c@qs*eWLhFOF zf8xw#w>)qc&A|S4KQa)UJ%J^nbvUG`cKL6}X*CdP=7wfJ5`a%&qTNUw?Vq2AJv|&# z@pIu=UL{hs)G-Rd7BRO6pK=pDsFtGKhy<0G^EG#)V0voK=z63Fxjo}OWUm2nMmk@4 zGQh8GFvyecNk@IYJirE6QwRSq!0*Sk*+?qt8pDmwuih-i;*{|^pGw{*q4c`m zAzelk(a9$XhWhR|v3B^I!7km=AI(yLL9ZO&FXWnhY%7^T3_qN=%=tM+VGPpTRl@Jy zAQ4pH2YLctl+uj;vB)2qDhVieJRCjExcuz=Z?C)vU;h2Kz?DT!*P=2xM)0-vGvY)1 z>3;yP#mQrV(UM8x$D*V_8V+kW)7t?XL5P*GTYFW^g_UH#|+6m3Ki)Y^1+5 zy_$Ky3t9coHHIxc_%_IQrN3>6?PKEE^ih*r&H<0;z{{7vwaUc)Iu?xbULW4)BtAPa zN1C%y?FM+#3iW78fLE|}PgITP1tRjQyO(JsA4DvxYR#sKuOaw3kw;Td2e2et))|q6 z`L%Z~3*zcm#Qg%cpaa6=pZ|VR@}+r_^6()qiO;=+dP8SyApUcEBZdop5XhX`o^KYp zI!6@$5gH#xL(cwg6*}{|fq6e5OA@1?*4VV4UYeuT&U2TYUz~h)1<-JMIQ<idf+9Uv({3=%leUZ<)|9?LSvj7TM5~3@)say!BoH+Nc=spheX8oB{`$!$0hif ztg<)bHQY@rxdJsB3)q;SSV6ey*rKw1-wWrVO}0(>IpRz&NC};V(P4fCtGIQH>Fw!f0E8~UDERb-F7!5g0gB#VcFO33{4tnzez`F1a|iH2@D z)}$Ed-McHBK{T;>63vWNSbRpBuo7|CC=Id!+5>-m9CtTY0vL9o*Vyd4Q{6^l zoGg@$0(kKEaYm%6Pj1Ho?( z7qw=0-;#E_f`pS0Ak5a@JQ_w>jJ7S1Uafj&THr9ctRVylV6_`R)ToM%I|(F4q?zSk zTZa<=S+Y?dgnWJ(J81`xW&R1)Zj5*!>DgEz?Cj4Qh~rMqhn8sTN6wzz%#B2+Vpn{W z`(-u@>3yWtW+9}xbrjDb>QT5kR66AyP=b@7`d$_M&=UVnXu5AE%&;Jb074uX*Q z2`di`x2FD;lpDgO&L{plYBKpUTpSPW`}E*OYhjM(O2r zb^NHDoP1-RJT59(eb3ymgnfx|@W4VILxRq%^mtM)F#1(7Xu?s4N{`Ko@+se#R|k*dITg^FU;ZP;t1t0JLTTmDmCF)15QD6oS zXFXD)#0v$LicF5tfWTTOBDqZOCT>o;K` zmR!t$j%-V_|G11uH3%=@z}p9F+;CfO?rn!-g(D9)^g86B{nVyS>3>-KB>GtYNPY$* z+kU2fx8JT9s?NZL$`!N<#9G*C@Tpb41mYiW>_*n0H7c40qcUm{FV)~c;rN0F*pjZe zQqSZ_Tn8-{MO}Sr+Hvvny0M>sj zN;R&pxf*|s1+CT2EP9sUYwH!+u%~+qz&pHth0pw#_l0OI>fE3&{67(kB*=L?-N1R} zk)Su2W`;=(reuB&2`KG*Nne2tc^q{s$ zAM64?z@QS^aL1EoVxm=Y(W7!3@xaP zmJNV4hHj*oTDjjpMJHYybYGa$nsd-4g6aTsY9gB?2ku)ySh!{TXR~x znY1MwglS6WZnQ0S(7pRq6P)oJf5h& zIg)`=*sMD5lBdQ3@H5xAEWlv0UKt;tOL^@u4iEQ4h4>+3@p&70n-eyvjliI}eJho! zPR1s`1D|kF85QWfw7mByHp1#*ZaYCjvi{A32X%qTcFuk@CUTty0srNSvHs7%T12K! zJPsIu+x`Y0JVi~x+CFl`Cx?thTcLuL4I?&NfSQ54ltr0td2FT)Fp5q0od4Ljb_=dx z)^_2{^_z;g9tcIV1tvF_|1E$o4+|!7Z_ph4?wp#aBD(z#uc4LMo**%#(ujyLrCgL` zooV>WugjnqsDXM=6lhf3+XhV8Or$8b)a1EQpD_`?oz9P)3?FBoOfrcsS7boQz-JpL ziS`UAKezTr;X4#z>3+-bsAQsxbGtKY8r8~25rBwWRUuZ1k`2 zLqRqU@ZsnsVrK=i17=&4k@Q^gUdw1_;zA4m-TAqFVB5CW8}!mK=(}!Xj!GRtvyHS^ zXpOJ|8eMLF^E5MImSxF!k}Wg@PAzWTR3rzv$vjr4#B+lm=qt=g+hk->B;H^T7(JXN z>j@vTDq3Lq1p{4T%TZn*kuPOq=(I^|NKAYn^@&1mXTZ;y2t&hlEEecS*m}%w)%1=^ zTQY@*iVAR}kJi2z;1PiW^$4~b0eVr%P_^PwtUROF_>R zY&R7?u-fzgn7Wk(d{=ie4;jkf8JSA0WOoXdfz(%YnVrKk$j2T?3L*GnsRs0#@PFjo zDdn^wFr9%K_HKngqecg@bQJ9E^t~E|QEDX|&n3^V5~t#5y!Rdl;6?Khl?B*8kl-sN6Xll*^~D;vlubAM#TxpqCBy}u zxGtZT;u3WjA|7qfNJ(Fuie95o>nJAszF+rGA6qx*kAsRJC*Ak9@!tzm^w$@!fpkFL z>hkr%%eKuYM}@*bt%_*LZidMcqr|vceGV(g9r!r-7N1mK1Zav#a_flXX-W8&RyyVY z7LzGTI;gGBJg#y<|K;I!cV=0p8GN`HF6p^|2R!t#H${kV=Z#Mn@A6)VvkC)+C=IkL z1k?UPMnwLCf2-?iRhZ>zY64^^q3L$=47}E2B`axdnCSR&OWh5X*0xv`y7ueHcTZl& zk2Wu4^yz?v;#}4bjFQ-oJOLkF>I<6y8FR+zRukd;kgLBN-@y;`5ZS2iYM#H^{E#+l zMHALKhHd@`+DQq)j=WbsoJvv+NqGJ<&fnXL0^u;nCvnUFznFWzQ_d$MJ@YYgz7*;V zAFN(hGL$N#nNdzv9~EzcmpK=4M=qdXb@ZY;(uDr_}N@;yCLw zgHAR`fo+;f4Q)W-O|`>>ag)XHcLbIt)5zOe3wj1k@Gn=&D24=ajNDy0Q+Qrx!>UK(u(r_Oq??`v zz5AK<9}4+c@fgo}k>iKS4#*eB@XJ2|j{E3?BUh6_W7zCQyTv)Z54NSjn@Yan%G;E3 z>iIU{Eh)5xWZ!fd%wRThsdwF|IWmg55|qDW$dgx$<6dX8&&;;p63_IoGWn~tCFDc0~NOH!tA3vNslCx0^;1Li0 zM=|Pr*$snHacqo z@;09{G}30uWeDW_>l>{Zjclil@f{G9(H4_SsR{ad z&{YBfAbBcBzLf4+Mc5ZR%ZkO#ftw+(J)%Ooao^-jSg--!FqyGc*IU_%<}P&iE7qdt zBzu+`5#|lnWN_*a-BtewyaN@RTqeBFayGf-N4!sb-ugglnd6Hce)OdsRkOXe5r$@C zL9V&YhEWGpyzorG4tPYM!HaclYhdN~#2Dr?sTxL+FIAb?EOS)GFRBb;=U1qZIrgGt zmuQolrlMZ&DY!&5K&bo^;e<%{KWH<3Xp(lE>US*hyE{ZEF&7oLOo6Zm&<97ZyTlk2 z_cI@iSdpCHCfA~BR{g%)389|JRuM@_C;HSnJHhh&2UTm<-Qw_@v+yLqW0vAZl}#Fe z80p`3DG*&&wVaTvi7LUusDoar=ZFaU;B=1G(NCDlwt?7qs9U5zzbs+$yM#raaJ*(I z=8Nbr7#oWNf%FajZz_v5|Kf9-ag&I&z_+DT++hgtt z99>;{Q%4+6>5_<6&SEJro;NNOv+!k^MHKz2L|L>4zqHlm*)DkGTzgjhFhv;m6Dm#h zTI5DIMqUfrR_{w&?)r?AjuQ3{V`6&6JQ{@^?HYi`htQ9UrR4l4WTv`5cVVLu=gMt8 zDfoiTIz{NZ1Rg{!#mnJ}JIp^YOEQ6&?EZ`aCTz6Ikl-@k43u$HQEx6!QQ!kj)eipt zlb(-rs*e{2hNhF7#K;3`PfbQjRrhRg6n=)=6chjsr@Wf_QhVTy&-vt{QZl06M~n2| zJ&=g9@2qtO11RI^{o}JX?sUi%5JUuEBz^4SfB&2pOZ#Sa+5;pJ$*6`q01P?kHLX6p zySiqd{g3OTRP;%#>`Zq|!K1eUjyEXK=;gau9f{)KL zsH!?EtMxLU5K%Ogh@4=6;+F`fiNZ7;sN?uafzMT$D>`HkoC=Kp7*F7Xs~5r7v!pJz zHDYq)su5LK-RojsRn=`;i-&qBKf>Px9cQ2pSoGPd*$=G?Ww4yfLwh7B*#jAcWN6WQ+k6%lq=%XAZ|R)?$`%hjvXXu;zYB#CmnEm{@lrEXGa9oRN%7$SMzRa^Ey;hv=MC-Hte^zAnwxl(? zp>4+^VF#?9X*VXnooH}%MchsqAVWqMNta9&r)vKJ;cg<_hrdFmpS1TeZX+er!yxke zH=IcGis=q!=s3gR=jy?>ACAYJi3cQq+IiqN@2Gx@d`_k#B1^~G9yCK!$eUL}U8%mM z9<)7=@$yMKD*}3rCdtyd4v}Gc{bwXC&bz3r{14~qrGKGNp^PIj&Vm1_(M((`pPAy7 zeAPO=rr?oS4t~DF(D2njL?C##d+ZBe0u?T1W_EPz>hMeqM?vUUAi;5`P7lZY91RH6>wSCrkrL}$uucZkm%`@#@>tPiB6c@SFW;)+r0G4zxnX zkw0ge*FWAZJnBt3zU8t)r>9CEXN9xy_voaYe8+5tI8a*mnt`v^wH_^z-N~VQ&C*Dh0ipru!H(zI7^pjPIT{ z(WW8uJYujyjGQ2)5&>VcR1a0FZci|@9=@Gzp>VIiOx4xMPdEERwM$DqZQF(H9491| z%;aQYE`TJLHJ4Pq3OL8niq{nkFhedUTGG?MH~#N?Br%s$%BAUTMuRa8cvVDkKTS(l zvhBIB?Ja@N398E%5ACE}O%rVLK)R*YMn{r2Q~axhk1Q<30*h_15%ynzgNM&W^}@~P z$s$wMNBCj_D89xb9C)3wnaYZ5!O1h=9bVewLPF0Z$O%eXufk`|HIsTH`{-b>yLkVOUm05&$ZpLtw-Lfo>M&P}+q>|C0M8!Z*uFSRwM<n4#znh6jf zSiL))i1iD4u&=D<7zbxD6~lOekf_?ua9T|B+C6wvY`S1`$G@P?6HYtJ+@RNd(!!Kz zME__9!OFD!Gj~620}-9}1j{0K&Mg-$$ViRMJaCk(|Et`}HFt+sKsM?y{&0GW-K2VL zCGz{3p9lIm_$jj$Ed*C9T5EITT4dczeD%JVIe3R?8ItaQS6TfpRz41Kl8BI+YD$&j zE`<{62Jiqdac6n@sL)%0HpS2~)lF%A%8jM@3cRML%B+ozs8sN8bw0_U@9#K-Jo?Kc z@4~+kA9-+F78)Jt?%40eLvsgNR^Jpy1z(P6h6a&XSm3zMfPU_9RVM4-S>m^Q-PU34 zjC(O}x!C&50909B1Sv5&_!TBPcMp~8$%-nxZrg;h1iCWgF_*$Jm~xi zW6n(Doq4PRL^*gP6#)Ipqv(kvaaZ$-qMB)Uzh;e^_YrCmFqg*+yu$`@Esn6v13GVZ^<`|*QNx5TAzu3oF^U=N8MRPq`av$imrsr)&?+`}fc z-DqNe^p||l@o!n-&j|R~_GH+xp8(u=?&Ha`FWJvlc;4|EA_nZU6gfL~4Li-t1|Z<^ z{0|`&tA;2vlnd(aG^fcy@56nUE zQUOX;d^yu$z}K_Hd=(Q(CmO~S_nK8xq&daG6hiA2~0QB;Z+v>L~u`;DL(ya`7{=z{c47t8(w znkIh1^qF{2XY|BVYvDqv!xNvAyPHge$fNz4G>0VQ+W;ffcO_ueCK69*{ETIPzLXrlsWit6oe3^dHQ6r z5_bx2=#ZIRmu#bxKj?y*z{@&q6>WY^@X3GV#%U0D#i+L^t*wT6fjLU2J@WN&@V-T_ z-m?N|xPVPjHCTq+3_0jd$ByCDFG>`ArKI_JJ*c##6+|%&q z`*&c9Lm#r80$h4z~b+;O6M^4|V;^zM@p&7XJBFz@S z%LC+BVs&xSaF+GB=hRs;(4Y&*4y_g7rXGkUn%tIDGQP&=3O!YW>VXb$N?sCcg@dab5O&U-SLD& z6+5iyvTOocZ>r!$zhT9s0Osb0a3HIEY#8%p3o0xw`&v_}2wp2qyo2)(hNf-2=-U3N zKN6PSp|XFk3}DPyV97!G_i?;*OV@uC1EpH4M7$8$dsm!`cx1oif2bjDwNu76Ta$d# zf?QfaMP~F1Ks3W5qy<)^pxl48iLy6Ew|W-C9Z6Euq}tPxmWKkwMP;z0QLX&((wnI) zRWUS<(NHG@Yl}|E%(lh}|AXHt882^=tVHrXGR>{1_w#7aU#;shjByvMti1*)bvvld zrkF62wZS7yk~JW3GJ1ezG;I5)-YC;3p7# z(!>NKHW3l0Bp*Kd>C*x;;$lh~)R%5z{;5I%s%{;-H1rx#6q}zP$Hqy%^H~52MTmp& zhNR>lz7ipz&Q#N_y>>b_7TJ z!s^{8?x)za;Mf-f=%K2<-5x?a+hUoB72dGnn*%wq64$D0#BzvC#DFm6{D#MuvY`m7wnWwJ87n6+UMyXCk4Z%Ja=V64%40#O z!g*EIqyXxoBXm6{!Gr+FX!15%HA65?`A>`~qPK|_^2hT(`mb_-ncg-JWl6v<=Kg7Q z{0L?&)SBeCwNk77!>0c-VU@DYu&EN2Ms1%R1wPe|gjFfNou?(?2cTJaeE~Wh@~J#t zcDaudBwrYYb~@!wk2fTQ)%A*PyNF|G!S|S_u9W1~&>8li>31%}*Ksdxt(WZ?jg#04 z$MsMhiDgFbb{tGmwH-7)qBhC-Gp3~gex_(l&TQ_b<0i76g|d_mcaTZGv_~?}uo7M?QyM_s63r5}5;D<5x z(6SyQ8y($mUU0HGr+r)>4P(CK4_WeHK{jwcduGD}@&5NhY(H%9Y8fkxe^4@V(+kB% zZN4o;J!f=(<_ZLlfwW2~1@oC;i?+N*4(WtucB}i_hY9!ItiGz#6tuhj7wjmmE^35l z+H&SiFa@oQe(jX)D^U$MDf08`Jd~;L%uC7d)PX0v>r60$Cyer29h6r z{i0!y85aA(8n&{dJQ(nR*#GfM=2Q}xp1HAjsA2t0hxiv zT@n}+=_v&`u$V|*4Mc?%ztCgTCt+kL1G%2SM;fL!dej{r73^gDMAq_xI;I?N3|cwT$*TqPIx4nrm;yRzE@I=Pgs;5!RD{zn#Js>yf)zrj$+Zb`gGR zs9>2A;Q@ETa zW~(+G=nQoJ#TQeY2i8Y7^%{>)H_>8ZCB@o(f^|YkY&9r;hG;87l|)i`fq%IcH$%DT zdwhD2zJH_-UK{tJ80G35Bh_v9Gl7CyW{e1mm1|AA9f#;hkdNt33zq^Dt*=VOH`dpy zX5)TZ0vvYz{Zs@O)8(QQTL!vZlStsTKC-NIJAOR9K~|U6mnpNNtqUhC%};Xtc(d+l z{^(A?{rd2jmW_>|!{ePpvv!spAeRWOpW=CwxroHjlB5)|5GK0YId`8UvvY|<*T4oo zLMigcj#-^>MS~NZmo zr3~dM2zzGCNjMqpjZDGdz`y6l*|QqBHz3us4xYG%q|u+aC2xT|k#UmpP z`-|=}2>@|A)zE&%8f<6d4@HcvEKxE~R@J>IvQzvFFj)*!;NhlZQdWT?9Ef89vB)WN z#Y6b}C!rE5w2{%zBkUrkj0ESS*@MaAK2o+Qj3)|Cup-XD!E$x-T3ZzxS<;Fq>i6;Y)e zdBQJ$&F7oTEfaw;W`O``t)J1lB<(c_n2fMty_!(l=M9Ez&D$%s8`%>8Y&vlcP9Qhg zr#L;t26c%8oAN8JLjD8QmPuzLw=M7y!)*q=7yMebmwSVsL^L{NLhn0bUbuBhb#V3) zy$=EnwH#+x$uh$m`0^?pJR2t1q=64iGm*?i1(iD~^BIOu>+tlR@Vg?oD8ECe&6vKM zf&W%#%+|E*>NvP4i9b&zC~nywa^`wE?}RPM#cbxVvuiyfkJasm-Xvw45~36%21;uH z?Mt-oNwqqU?S%9VkURZ2P4^#krzE9q$?toG)p_7+5A(;`Bq4m92Kq1Ny4hHUd5wax zG$*Kh%Q$EpOYpGHb@~5gHPN^o+_3S^GO&Cq`2q}Vhua~EVATgm&zG5Fr8Ua;G>PaAetp*Ja1#0Zr7OaJ8>O*fZZDZns&-}A)$JLH zYqa(~GBk+=_#R`SZhV+OZiImBXv*}tw>|ad_GVS_Bg}M!c`Ts;_$Q7?i0CLm2=}pR zVbDa=K$|=rcZ3hs?_%G2G_NTyRB0cCA3*KiCGvZi66drmdjs&B0-E3#f}7VIy!+vK zzRnZA(dvb#evWd(Ev{ckh@x-(z(Wjt&Rzgd zzA<~?fwW5eauTvTjOYoKS*I>1DD~h?@7~Zg+k!zQzLYSdJ7fGt$6>mi>Y~V^AkC>Y8w{MCZ0JauG1>An~$`-A$0h8oD)D)aa0c=TLjk+HJ z=VmW>m13eBksrs^h-O3GedA{Wd7AR>VG9fb>2p&|Y{9p1(aGq} z5=+6tQAbbB?l?6s;PL7{Y~J&PX|ya`!;ZDB{Cwl?OQJ!?N$3rdfT2zD6b@ZclujG2 zc|rHnsCMgZb$%fIf`GNIiJ2gYwK?VG^3{0C<}2qH8$0L>?dhAd5%7$88gvlcCkS2Y zpit*==7YWI=w~U0#1A)Q^Z!aAx35rYks1O<1Y~{%DtB=3bAkG(0Qxb3L#R+wej!7v5ztO0L#DcN~b&o6ktkPoT{Jz(Z9u?L# zg^ip|7yV@ADAo)ArbojZfYt>(KfM@3zd{uf>zXz7apG-Td7bYxuXY5Kj~HOcCxfp` zYlQGJ*`#mFtE)}HKa?>y(Zj_$tf-ItGvT;9w!fKl3ZMD$j1bm2@a@s%rt$UZ3V4-l z`t$EVJTdAm6~8Hyb;5;81}@)DH;qn99Om5_e04{0rq(*OeL87&ta?Q;efkK_Z;zjU zA}mW*&3_(kjWVdbQx<%ZNNV!DJxrat?_8+^=IT_?MqU3#K(=KYrI+=A9&L-_y+%rD zaoJ7Uuqg0BB#lQMa}`mEha#tNFR6iBv$oVU@KNFM5F+|~N9$<8*vRJv zPV(#OLmHlD?7!AP6o!ec6%Rd|{QP;Igm&-e1<0wc_eB}NDu_kGZn))jGb7;h-}+A; z!I5a~SfoXcbsM_k{z&j^Y5fPOO#V)wuLBazrZ5U^+e(v09+w>8%`d_>GG=f9e&%Wo^8yOx z(O>JkM5ZI7Aatvjtm&)YI3^7L^^+}T&smxzOsPW4W%H5Gz?J$`UdaPNIYW%cYwS)J zAID13E9&EMzmN!`VX;TF>NHCorh~to!YFTPy^;+asrt5~Y$zzDYycwio zkGJ{QLKnr=ahSk%OmhZ#{pWS0xEBFcI*K~ZvMNALLJmD^;e-$?+Q!W2H zW}mbH!e~tW!CPDK0e)!%?#Av@4Ele2 znL6~hBtk&3v5{7*xsO^G|JP50CDhjV^ogB2Pc1$$>}czRRy5#euHV(E6q{s1kSP`k z{RrX#jcf(fAInAGbt^*$K28@BX08T(%R6pOtG0u^E>F+=4P?B=ZlL}?=-{j-kb>;O z(PaFMz)PSepW@;&7mw8iuTpG;yS_TtRTl6*sk*Cn-1mg;B~L(!Vt=-&xx4I#%+u?7 zHl?ZJ{8P(_(pWi6UF1yza*2sY07T=6d zynx22A}bRsT70jgjXHBrCj#fHg3s!jYI}!7`ifW5?8^?}a*KuR3d1Po01~R9mG*&Z z;I#Czu$-G%y7q28xkk)r4Y8vXEfVK1@TiZGqy022^BqjuJ)D9pwL7T>Tp4ByWi1{K zVG-VCuC0Dla6PT2NyC&@mIgr!r8w~R$J8}tkBuvMaqFMR-LDHv56)4UX2Z(LR;Mr7{*zPukKZ-w$=Vew}%(zdP+uLc8}MoFEj$G$mSj^!g! zOIkl~Q4SAWXU`N6+w)`lW!S;fKhw+tb@EeQ97tnomBg!7Js;%69lc_;vnpzRx@hz7 z!=Oy3_emm3{=zl_9Zi*_KvR5!wQ~?p00RB#YNKM_>yjX=AlnEl?M7F2Y^+in=z~MA zXoPALf_M*h=}%y7nLT-ohMOi&+9fQtq0f%Dv3{r_I?J{kfg( z$Jh8ahu@A+q^ERuE8j>p*)#arxvMmAihRI7an^`>Y@yV2%J<7lk@j>5jeVGGo_guf zW$&OsUgICOlZOBV^Z!sEamr$MfqtnPfan$8p=kT-K1pqT*Jxb}SqQ(PdtZiPA&-bX z{?v>Vc%b|XLgAb)yEL3?KaiiL@)~rysu)($_kGsp(z(UH%jIZUl6pd?RY(N4o@m|c zbRL)v)HIXTO&Fe15_{cF(wjON@++rM*Y^_dwr(9N0pFXNL2QPmC}Z`T5yE;iRPSb4 z5e#j2u3L)qkkv62+ldr+ffvk8#csFZ(I{cYu%t@^6bGaZY@ubean2j~ILy;}?f5R> zSu#V(OV$XyZ5D&ykHdeo(=4f>v}$U(u0nk#X3;@MPlfv-`8Fs##~ofRx$ikl{cf35 z5Z%1P>!0Hpat!RjjmuM}pYb^0=<-YMAK7PFJu$qNBW>bv$i~aBd4N7RA|D}($`p$4 zRU#Dmd*S0s6g8#25qpM~gMzn?nj(esXX{YtA9TM`(w8{Lex6oG0aH%(Kx=xaEXB!6 zcRU{7p>q=FZc|~ltgxx*HzE%3pcktd<9=t z5|MsL_l`_HM7OOENB!0Hc4UL&=7{&)tPyX)_MzZ-e{6&#_1Uz`^V9haJR*|;0AyVW zt)LF3+4Coklt-d z^Rl#OUB|cTfKbzgSA==8Lnih_vde~+Vh$8wvpr|75&5RsFm?99Y%vf~WDt&oeMP%Y zBmtUc1_8VvG~JXU3aviiQ06EJR{i_j>`;2o6cpd)y8fnt!Glfz61C~=F$y{-mNbm5 z-|8^j9569U<@*KZ;PCf}8$PLcmmQx3lNtsKXD)YNLvx2fCU`a#-TKh8f%KP3G4hdt z@5zHly?~3EMmAI>?fFw*O2!M5q-_z>;%nkCM&6Wp61xbXSvPjwkfJak{_9O!S61%| z^Vl0^QFP!r_~litLX!l%=yx#pDUVgtn@jU-94#VBJ1Kzio>S~_X zNU}_mvPx^`X~Eq`?z;l7vj3rOT592yLihYD&ax@lm=_$8l?GZnPb(%s_6!*R(}anZ zuBCzZK+hl-OCybgY2<`sG-I^GZaYj^$*&C7}Up>v7Tt@gQUVK<( zQY7lsn_wbf@wIAaCgg`1By-fv3-G5eI9vY6-tc%u>t}Z)nDD?J(q;4Wxw|`aX7I{h z68ulyG~v@8Zgn3yzM(Dg<*Ph)3M6L;GnK>EH8acU_bd8=j)NVoL{fps6Xw;N-hnPq zvPw(M>?6%np{q@$>D+WYw4YVBiG=-|kkqRQmkj*hsZo(71w+*asYAIBikkRphf)A9 zuLUktHI=^qC)&Mn02PB_rzZqmi}fn${i0Jc6gW#u<`UYc{@z+}$WmGiQxmRTZ{2e9 zk;n+4*>e~Rd?soTreFUd++O0lj8?xxLuX+nC0VcJrSE6usYU8{boo>gTiQ79yy?6v zUxuOyyZtu+In;hX)UDlu55LTni zBg-}M6U3&P@vh51)bdW#us{1kf(uRtuAG+^cwMRJ;IAgN#sOEFO?T}EKYY)YVpM2p z)5eGlP>RWnJE}epvwKj$SI0N6th7Hh*ve4{CE{w%OgIzT`T<|V5a@qanL2~_rj|oP zt7oon8yi(qGr)f=hRao0|D++=dVls?_ZK!_NGlGLUi*DoVBxA5y3mTxUkQLUAfqs! zL&aCq54U#Oq@0v`H;z3;!H81&8%g}~0r)IMNOusP6Mo>x=~)Qd48H|7g8yXJGnojB zUt)&ayjEbL8OE>C z;W&2$)`a2-_jY+7Jn+tt*be8*r{LLXdIT;wBij!F4t%gy6>@>^nWK32Z!foag2=F^ zpVscxsG~SV4wb(H2{(KfcTn=qEDHZ9pi)Q>t-o)D?p1ot5H1|6x8#Da6E9Eo>?NHc z!}&TS9lk?cuXC-@byS;0y7;5D&vjY9T_W3Sv88W?`s2M3n@A^I%qTE{H|t;FHQJ4P zAWzDQ?c_YbP<7D|RdEYFE~5+7m7HoO@4`%+lyjfuxV|Pobi7GbTZ?|O49q2 z*P2g3!Fwk*A*?>%zSjmdZ2g{>04R<7MBzF;tUuY&o$;v#S@v?W+#1?kz&jERuTuUG zJ#DEMANE&V<$YZk7?(b+zN>tas-tjKc8JCvMU4?|jq=rCw<2tR-!0&q4o50ID1+&nS5@Pj~IqW+;^BK?g2-hCw$9uS#u8WR?0*#k3+8YX;aAfY;88Sc&!u{gQe z2=6gRin80r@Nx1FcD!vGu4Doq9AM$TJVo$6 z9pu<*@)ebP?ZcOvO{e>umneRC#PfpIAK>|8l}3#L`M0;<%WJ9hi+Hk=)i9?3OS#ZqhSZAfMGENn?^5H zF*EQ>ThJ#L#n@X&hj8w~a;q@H`1`9{i$vts=4WTk2FB^$?J2-Y+87f_FbgNg)jo?K_%G44HdMr)0e}e+L!;xkq zWG~F2$we#HFj*C~dmS9g6F%SYGHz6<=2K=L{jDVc^hj0#c-wzblfkM>`cCQ&6gAEq z$6IG!;?dW5L80JFhknIbe9!(5m|@Nf`G+h&UhyB?*S41fz7FkBLq?>{rZjzlbbbRy znQ-ImAIvv|!O4K6z+{~AbKrwh?nPw`*&}>zJUzh`x`B}xJ=U?tHh3r^{OXznh3Otg z4c=!Ol_MdE)Vc1%rS4b7Vn3xyr0`S&&BlrYsy=uT*MCBSW>(~xfTmyp!%3F}!dFc! zA2L1#rYB@dTA98Ny>z%$Tth?cd-9IC3{ zczKIDR(vD9;wNXud1~*LLbd>_g!I~)S~Ged!s$CSc63chv0@RWX1f2quFxyAHTX=_ z_!r$u%0jl*l4gSIv#FFYfm4^orMbTdXkP`|8tYAz)|^FG{JTiYray`Ltf15dLS- zdRK2BnO$*A(gIbaa-3TWH3+q)@zqr?`?>~Lz~2(0tco>9XDFfeX)`*i$~0O6c1hVI zEV72r^T8inJ@!+K!<~~J4jOQ>*KFaryBd^o9A}}quSd|drn;5?;%3|T81chd)2H{v zYY?{(IM5u=X=MFah3lF}t*b^+sh1xY0Qn()#3c4yv*wUG=c zF^jo?`z#Rr__x>Va|(FgX-6l!X6P|ZXW&yNwcgzm*)1MQqX`f%-5M${P3kmB@#45s zOCA7Rq|D5;e2#Mf$fhj=cPeJ8e3s-1<=HGGB2vmuIyCA+mb{((;f-9tE6NKHS%ckk z=F`v-*UvPDt`CR%Vb$A$jY#t?$8u~+aAPL>XT`Pw0?A4Pd2KfLB_JKnUs$L_sz2pd zJ%vt@z!*OH6b zLuRi(Q=7oZiL?w5PHzExX|h(%qw{yfO0M4EZb`w94R zhHjObDi}>c7q_-Azi9jvFm~7TQPjP(mty^QO$2_tb-j=getFJkDBu1V{1nPdI`RLj z-@DbV?{3i%@XvO`m4g3 z?sXXZYhLdA%}Aoube;+Ll$$aiHtd@+>XyBM>=)y4@8oJL%&MK?))Ntx#5S(yC_|0n z`KxRy1TxtL-BAO*1{(kxA8s`H7K1Qzzcjt27<+|EM0jdN8i&D3jVB>3>RU>zxr ztrLW%AQOY{&*>9ellx~FsLim{&B_*g>1KGm30IiBj>?K7FQHg}|0Z)91HV+S3YoX2 zblQ&}8juJHP^dtd}LxWyovQ2xcT@kwNpSR^BEn3+|p8cUoF4o6ELsEpyqn zZJW!kWxHi-wY*w3mbLsmFX8^bfZuhl->vUCAD!}BnA?sHUp;Gu01bE|&bP)HFNzL8 zY>JjzY*r5t5Nq$Lp7qK>Zb`{c%|M(em=>^`v9zu(rZpQV=mhUEZ;7y&$fIEs52&<_ z27a|TRVE+a4_FiOW$r(6y0C0?-CtD}Sd}Y-~^+3>9BpLGnTRGmZQrI&gBX*R9L< zxCaFtlJki+xCLhfnlJF%pG#Z8p-(jqnEye+b_9cemY~-=(xhotIqb2XqP((Hjo%>t ztvdrA1*W6K#fMao-Gx(xOYA=A4w?^$Ld#F$EVNqGFdU!QmiG=NOVw_T^K*uJ;&5W8 zwgHd&gdjUg*&3$&?ie9mzm$yjOiZPSnm80^hQ>0^Z82q5H*^8GB4Fr@fw3v%MX+q&klNmHvubG`bm8DH(dd~~xTzWc6yz#p-sux{ zALXt4-FN(lcSbXmh5uCMj4%r%TEkZ9#pBjo>qP@hQkv0}=zFn*GJR+gvObGXF5=1s zqGshTQw@MGbc%DNXa#6>0-3k^ zg=mLsGF>dy8|P1#Do6=)xxdApKFPFAdO=iUab~RziwMt8 zv)N2!PQlyh_T?UiAQe;l`HIVWcZP*4V2jN_kL_5+GdMN*9#%~`Dng`tlH4~92;y@3 zOPvUQ(PP~jJl<#V@Wb9}tUPNU1t5+A8bboEnCmsA^>#k&TnH)Ihgg?jISX^^)zEWK zZvoJqrSVpmcq8NE%qp8`+UB@SGx|{0+}(n?tFAu|Zn^lm3BmUT;fsJeK()!&83z#V(FR znST^kG(=Y+n*sdF__`!NID1A`zqmZE_b^8a&gBLJaWRyI$`D$k-%6u{m$@uWcdxXt zf&ojC5L7x2699oG|I2Rj=(*#nm{W>9@X zC%H@1WUsio+JFJIE;OvmD-yh!uBaKM)!|a9N_0}=AC#MiIW+_{zuy!8ymwX^UGp1H zh>|b%?X$nj)W(distq1}8^B!$6~?-(^9kjXM01!-!+D1Si)eIMgZS5h42Onu@PWH+ zSCYk=TeK!Wy(Xl3Q!-<4`M0sfCDVk^p4^DT)@aqn&wxI>6gZ_Mm8Q-)LT6?`ec@EI z+AbiQm$~x0X1B`BgqF#_jME^4)Xz_M-#&sz=*!vy?_dbb+7viRAR z7jMB7@cS?HA{9fY#8IKGkx>ZW(~$IAeAM<2NLzdgE%*oUmXxH_E{SbIXZJILyqg+r zwxuE$=R1uslZp5p^qoV<+;d^smg>&Gwf7ll=^66|zte#FGUx%J+SkHvTy&`Oq$T#i zj0@v(>9qdLLUV?sNbs7yFm+Lm>8ck6F}j98`8tVOsSd=rryBu;Fb&nzbs{u|;fqan zsmSNep~f4@UJJdqm9IdGM4p{36UrKa{Sd0$$?jJ9kTw8?MwaZ{I!DLi z;g+$-y>i(yv&YSsSFV)1ptx~@0e*pc>^~8@g&5L$xJwcrVlm_un-OJtF0{?(o=Nk) zlwHn@KIKMlnvCJ@^{AymmtPDYfYkNM(2BJ!*OsbEX46_guDAf}g z@UPQ}J`(@Sr6weFJr|+XCSLp1>(G99nZ7?34^*7UPW18dG+r=Ls< z{b))waOnE9BA}Esq`ImZ0z7f8dHlU-byT#<9D+dg$^ES5ba3E#rHoRYrg+~lMwh%< zn&}K)lj6AjZLyps2fGAl$Wx*z*RiLZK1Xo8CeR$KWGQ)DYSJBf?ZzZwod&PVZ0syM zE%{chaF6i&f~vwIAPqJcDXnnWdH%C8fjlH-laXZaV8sNM#4K5LcdFj*EwG!~eKB77 zCHAhxNJ4SPmv?bc(<)Y-61u$#o9p%-JgHeMw(_=!^)vSgdQOcYWn<~Gm-udw;8iiA z-bj*gXwUXQi`97^ippY*(h*O7fyx`m*DCN1QEc$P?Na6q^zkYGj&&Y8z_CtwuR5PP z_XHkbtArQc+t;%9Dqe9(AFI2WMH_keI(Jb`!)ZOGqigN^_HPjetCu;qye(pbQg3Wy z6<8-uPm2!=v0xd1eJdqMW<*5to{QQ@WniCGm%Y*kKRaaz^{-zAq-%^%PXSHYjWuEv z5v3!ugEZFMeEF(5S#O~uV>sg8jyZ^yHbu7kCxSqipXk^NOh?lyQCVhp^k>V057U;C zGf=c0YW(|p#o$Aus_=Sq?wOY+C;En=HpLBDM5}|Um!v#pw~k)Y1CcMqJiBrONFpA; z*2w)hAJK_>10Il*hV@VU+I7=sBS)iP`w)*Tx35rf3x(NQB476C!GAK_HiS#VJQgDI zxlG@FG2aNVjoABK=D6ywHm1Z9m}1F@vqLk#2{e>iR7TT@g0+y~{%K$E!-(Luli%Dev^_n9uLKs7lZM+}_ zt9#Ic_aA5On0>RcNeBuLKr)nTgM<2fOkr=-PV8TNU10Zfq{g_M?2wz`^THJUJ;F{s zT-`b_iW5>q?z&5w+g8wnpBzogBKDjzmJ;cKKtryR@d-SC+?HIQz=t9|7Af$tSb@+= zKm-SOE%PI)7uf>c)z8EFLtMe+RARpzm5(EjC-o1#cEEaP+GNt=F;4pe-p5#VxN+u3 z+bFqc|3_6Gf;o)n{}58a(>>*Y+dp#H5BNaS!wEyb@pOm^&!QOrtBo2f-<40v9I1?P_!RBqdmQaUtwR!g3rAzP=g7a{P*3^W$M{T{rhL26fj%qj7~VXB*NjEVEgici5bKHy>ci zT{zoKNnC(H^aD2P@zD=pXb_tdw@aSz}-yrtEuFAGOMZa(`VzQIbv>yEzMwG5%7@v{_8%5C? z?@p49@YR(U6U&oiD-}y1nAFO{O4Wc&TWdyUT;J;>?YK+!R)_xuzcBfSHFNN}w~6x3 z4F1D-Qr2PQ)GI}jr4xKd-aFJpR9MHE|L8k@FKchW)6@j^hu4%Egk*W%`2r^QE#v~B zr)u-EV~Nai;^Pn_$M`>8(cf#2S9se?!F$?Lq-Q>Li({n9aI_NUWLDIC!N;K}gUPds zrh>1(^j$1vi96 zy_@X~$vklklmOGe|42@=6bo#L&*gGB{>W8PAZu3~4`GM!k_@eUPX}GN_4NV5jD{8e z0rRiqcsScutm-eh0<$T*uSWE?@jQ;zBK3pG%q*q+c#={36S;7*Ko*Ib6HmAhc@4ja zKV+ijt-71%uHtjvp8O15xa13Xb9>^|lB@no(er2NdA*}4X{&@hz;03aKCw@j`(5C3 zcR&u0*L~BY`XrB=XwwCO$~r(UN>?7)d!2IlPNOht8V?_}q@|ucbY3Cf+NyPh83|sT z0IOrZe~mhTkt5NFI!oizMVM!rKFNm3$uC2jrCh6%i$LA`cUgTYOx48-qT2ie5J-?t zr+SpAVRWd!&fNMjpJd%+L#D4i)cVtY5w0J2OG=sb#`?8{JUT(r;?8b~>4hG@@VPZ@ zhrZ(lqs&$^68U3&vO5Lxa$|;a-SeN(NNpf}B-r5w?GwV%=dPK*!^+&e-D+Y_?Nuk| zesZqp1>g$@8K}Au+X?!)4@KRK$I;1<%5YDN4K!o#%=$F2OAdrRxxcbj_~%HNeos)W z{E#&V5Nh+OW_j@bRmd+6BGW-lt+-xn%wix>falh#!LtW%ZV$(ILed|bju1@7QZJf_ zaHZLm7o!xr*Hb!bG|!~z5w4g<9pT_ZSS8-Z+*AG5*aqmOw@ZzQ9>lnzhi>%QMB)$D zp~&rM-Y^BWC|XC)DuSnf>KnG1@rhlbJ+qVWENEF*Y`69qOfOdgXul*2z>#l`RXkmo zbfruJ|56|I#T^R(G`oY>qgc5Rl3>kZD%yvau-s(@9&nq2|BBU)Q<{nwS}AWs{%&|?;s%&WMCWB#uHya6 zt48rV@5phxxv^obAHKJcap*%1gEz&ts5tuft)R(|5(Do0w%@rurTEi0O&Z$M7=z;r zy5S|RZ&At^LL*scLLXe8rk@`8B0dWhO?L*wdpVXt666beCp@DoAR$aBh=uJ z#i%MGT?(@+wMY7F4mB(|)>(HWAv~rCI{PKY6^de^`esC@HgUnqLZ!?qax4BgbO1u8 z94~wNeUKQ}7}-*wvBC7nYY{?~?{;vzGUiVk_)8t^c8cesaSOF@;H~YrkcW&nE5sSy zKtc7dT)`<-(PBXhx;WVnl<_9kKFkX-br{sEA-+ z0Hq{Rwz&+xuwT_`=;%X3P2b(br>RTlyxaiK?f0_b^HsbCFAVzz6C;YEcF#zU)mZYz)uq&g1oO4CvfHkhMwX-B$oF$A~OjWAlq* zRfIIKbl{pp6CPd7B4~%C$SeO{E=X=SMZ5O5Y=VQm%je%sEkRFROCPZC`^ctLcWfT5 zjyrddt7}2NJCy&oHG=Lunr(ofHTc&l&R*a;6}KCyaHCVD|Lt_wLmVYMtU0F)YP-#$ zc?oSdEtk)*{j-w2E<&|Z2@ApStw!Ir={Rbt?1ytdHD6Sh}WI;TS+VeDc z)F*WI+!=|Bd^)z$z`ZJ%p2O^10e!GHE7h|p>!fwPJ;je$X!zyfsdAyv7-LTTBOb8- z?Veh{vk239UELE+>EC+yyVlzW`)pnk?kcwtuRQRDGYQHsC^S$`N9yjE>`TzgEcfQi z+@+LWAzOY*aV!wtWH#AHmmsAiZTVlzb|ZI zd!RVq>){5$PZDvpHN38$_TpY5n_;=y(VeOe+V1$T9hWkB`=^qGrY&X3R(Rh`<-Z<2 zm6^z-t^*Zj-SDKx&Y4^aOL||Rpev#i3BDmm|13IdF!RVV0k2XF-2GU!-P$}TAvb11 zmtA04(Dh0(tz#!X*08gi(-Z2&v|_6~BQcYqJ|?x6Je|q~u(p$FS7^GZw=xJ>>kU3t zTvf>XeV;-f$XyLNipr-2UAUA}t8#sqh{TOtZLP~{wVzGX8^WaW-{`v)|Mf^<>^Va;kJj_6Y#c}WBI-*UWDly;HI&E7{c z!OJ5H<>Q1PH#k`ngw(4g*zhagbc&7ee>WLR%3Hb8k$!1|y74jKIm0LWIfHm}npjQ* zgl-~=jP9K=#%STkCy5|U4Kop$ar7vVG9VDTq9B7G+LGbzdoxa}b%WXU1TX)5(#EJG zn?7-&N{kS-p(eC|$PCzKc{3~9^fQ)Id?d{DJ_7zC$!_s6&XtfZ`#K1DRM$Sik)I?Y z8x@rvMiWg#@`IOS+_%;%aE2i<{1{BEFcL;zB};L;f~mi83E~zbM&OOLRFk+L#@VVr zWNIRpUqi|RvhLrXbHwR#Q(P~@X%BxHm|Re?O87KLNQ8J&w|)j6la=JFxJbc0|AlgD zJ;+||^aNhn1Fh0^dL)`rz%9qxs%5GfE^BtHBk~xlaXMc&3J1t<^#~F*73`GHpy&ti zRLg6B)oi6v(D#b8y6ac*n1X+}+P5xXH7NAg15;uZ)U^dNX;goTap7Vvmr*12cZag2 zumvaODpe#@uHd{RhIGLK`3MfT+J$E*yU3-GlAN`x4Fk8qAhWBgdR%*<{u9CZPl0sP`HpvEp1eYhb4SfUv~5 z{F#~lFR{3#S@Qta=9^d5U4R;w%_C#zD(J%Lzub*{FTcw+&0aFkEYmXc*-+3U_Q+|0a1#dJ*?yz5?H1c`wV=193Fx#4ZA%=d1!-T$$S@vT~=C4^L zQSrXd*S3~>r7bq__qduqqo7JC7n-;qbEQYt)}#9~?hm;zf}X7v3UB#+FEnKr9i4<4 z_+dS)Q>1h_vI*c`Yj4le-2~+Cc|B+@H8A2md|O|W zqV*!(;l7)3xqq@{)RdMz|A9{sy!)r#g=xy`HcI>VqV@$Y-w>iHGFG@8Huh zsvz5$;yc`LOi)tegM-OyD$OKi--Qn}k<1r8sT8vLA1_CH-0FRYY;wL(pg0g%0gqpw zis^aAna_#@%E+F7Db3w>7F|N6N}U11NH0|2U#D28XN&b%R4BP${H~?E0_tVjnkpwe zh;xSz!il}i15RrAA>*M7X%U31%=cV;^ik2;}bwf=ZWaX~Qu9)oZj!i2z zGhIBd?>>spFMzLd#4w03HzG`E5r&V4#iUa`X34}2Kgu3=NVi@S{346=lrqIq&+0=R zI1W!LZ@eRSJCV|1xcVz}gATuULO4o*ETxRf24`}pY1jhYa2gT#&a;!?ip+dALFvXw zZ>MoZ!(ahTG*%>g1Go6+5)1r#mIT5?D8^^WtS}tF2q+3>X3dSy+B82#Q;6S$G_n zdtHd7S2&>#Uvf7yBUp6vwvPpco~ID&(;(FT{RZKPf%^}EXBg`yK^TUZM_v*@UoYv~ z;s<5uUsyv*C_g;IXRF?71e$wNlz3ea4fVm(KgAmRZHPmjBNV%|+;jN|M)@19W=eu* zgV3|R>>3PmmKd9Z2cK~^E9q!>Uz@2#k_?^dLvd=C34 zczMqY-Y^+x|70M--b*rxdn>(&d$p`GMqnPci_29RXR!5i z=ha}e1KXbwV|(h5;3Jd@S?=*j^(-^F8#4Wepfo#px@jrQHle4>0zbI!@D*78nx^iY zXOg*rPi4OmmpGLLHVS(esUC1LYp^yJR<`16f5G%+AH>WchVCr>K!68tn5?vY=B2p( zWy`;P^co>!?ONKQT;;6p>RcF9wBtE^96Xn}u2_AGICuN6$Cy%}rXI+ESWa)BO^9S2 z#G-9`LbX|dv&cUe6CTQDWJRm__X)i1XovuQfO^%qRC+Jx3B{cm%X+w9GOO1vx=KAI zes)%LalK0sMeKY<#y$~pVmwm<_+Y&S-`Ij;N#^y}yk>d30w2fJM52xWIgFMf53dXj zbm8b@E!|3fmjkc2Y~Xq%$%VpCzYM})svVTe$TIj`h3e(k-Dgqe` z1Dzf$2&6aPwzH&$?$sYZc=AanZLqfD!K`_)0k`E(u}RaE@F}MC#|vAFf`8MGpJAYv zEonWs?O7Faz*7TNCzl4=*8V{%oIWiYZSJ3354fdNbL>2X4`e((TH;xR62%zeiPb7q z7AwPCOh5hm<%Bp}}j2y8i{`XmTJPrk30 z{8T)+WL=LyU%`@yU3WwWj|gPs7fAk+6M0E1kgsa1iZMNseJa;QD9IzR^jvNX5>6Fn zFs>Ed?ByM0v~WC!rz{7U#UJ#bAm*Wh>Kjh?ySe9G1W+@2f^(^fwi9yemBBBv2tT{{ zT4gw$Q5)wCd61I+v2e}QH6d2NeOOLPV8jI;F0vTh-bet84$XjP~Fl| zYf)>F`1hNy4}vPf$D8@Px2G~yLGLk_WblQ18+3!z|9;Eu9@$i2`NVpZjga&qmGGoi zr6_YIOY!?MnIb7N=>%Sb3#59Yl20QbZWz&ydiyVEQ70HLhCg=6H~vp7#J|JGSMH_M zAKT!q-p%m;0zR--9(Tc^OEh>Zm05I(WIR^qa(tG2<~yCsTjbAmz=&mHiDhh{maMNF zngl+}&LENBYYFTwB0 z+LOkRp@;%ju_?I?bNf|g_+#xSv}nf<*D;^3*`DtJuWwsh>+0~G^xp8JX>cjI!y+Tj z^-u!)Y2v{a#yt7pCy88N1=l_8vZwrjSJquoWP1fJR_v2t#Y<X#Z6hP=T*3;2LwIQ;ir=rX6;zelbr&0V@b|cCnLK*SYQDHc z2l;kg&rhwDCUxKQgrs}EEyC=Z1&?aS7af!=d$}||WjFB-OwI&gK1$x=7*b`4HkNO= zqpas$+^JG%@Mog<@rB=dXz70l6qy~_4Me~ujIHbjpB`Bj z@U7PJji;%W)o>&5%Z>CbecBf`l0JVC{4^B#BCX(NjJe>7nBHE2cMtK;;ztoc|08$t zWa13*U)$yDNs40xNSN%aVmE#S3WJ8rNOi(7x z7s&0i$F@X3F`b}&2=DM>PCeQ2t0M-%x+>1mDUR3ItLm6r-LF?CzgB=;%6pz9WyT@)# zIC1WM$n*UjWkc%axIFPfje3fDT?FVD zzA3MJig``Eo3&JU`A44R`$gznmGPImxH6IYe~7y)IDw;TaAKY~q)LvSW0>F#l{@d* zy~1Zy#t#1XW6XoU;*CRcnt#`<8PJ*=esPRH0i)w+m)Rr9be0XK=ixP>vFmymEhkd>-$G^D}1)q))6?e*fw(mHfL}?p@ zPU}gQ7*d%>Rq-L7vbYQ5-PCgMT9iHXqVXSGbEYj;?>RgxfWFmyvh%E+s#SDZPKL>J zVVP;ZT{_92^p=$T>)siBjC6wuld~P|36;*?EbG?K0=S7w{v=cQuT3KqmcM<;YkFEE zd0B&Fo-(WcmeYJ0KAr~n-@ZvU&zJ>|zSP)DO!aHxy{0>u4!AUZdUH(&)^+ievI5C$K!SFb-TatjeGrJW)}yo93?bJ zzLtpMB=5Rm)!;T;VJECW& z5*)y@sR>zUXRR=Q6l8Mwx%uEj8(2wTN2NCk;|8uq1qz`{`+BBrnh8UyHmkqNs0HHN z7z5*nA_inYo+LUoicPqBIt7`~USr|yV`8hamJlO-EO-G*oIBY!<(%Cicy)Zfub5E$ zoBO1t;fwtNGW~N;-zUF5DQ`c!UIWrTClQ59H?AJQcejXc)wz!$)w5|Cp@Co74(RTS z=tntcE0NvVa~34QYXs^g4yEb5dn}NgPno>mL}6Qs7a}sm!{{VP4;#Fv=eQq;|q_H3QJY)u}E+nrdIN&#HzW=LK2j6q0c@ekPhl+{o zL}(^D@Y3OhVpKrb7zBPD_Hd(fB(&>)%l4G3CNA~HkZh}YMIEyQcq8uO(=r$0g0UGa zAXeg(e*EFQh`4R@idsfI3WRw852AX7EFup{wdxz6al^>R2a68k3qyS}0Wzi$6jD^} zDzI~ec(-)c!W1T0h4z`k1>oFBxJ3IXG$BA*~r-$wh%b`%;tnq$|WQj+&>fl@f{M|f1 zge&8i?R1Ysm5wL{!o0@g4|uYDNVQ_TA9}#2&XpZ`=tGk#@MUDYcNpu9b*URs2#ZW)cRyFFIA) znOn2l4Zgh%_0ckRV79+r4%>ahH^Y34%XfzIKluQ?!We}IiHpJXS6Y>Fl_D8>uD)% zJ3`$CPZ%qS2~W~sfK^^@T+624i9uL02be{L_wmO=4`ksVL^I|7+T#m;GhJkJHY)Su zCZ%w|&3!eBYL>M{>-v<-$#uX^VM1$GQQf5$o(}TGw~qDsNq77|y~@C_>~EV7PWTW^ zg`0Uh8LTB)^ws_V9>ia2l6a8jq z+W;o!=e2&omitNtGmhQ=Aa0+x#7>05NGajO!+)f~<%BFeLkzqUHJ+B5C{>o)La4pj zyOKJ@ZTgkhW(Qee~g8%n*r- zn2f{QS4pcM)|=xEq}Hw4Y~VW(yOqW;)4>%Duj&FpcGhajorr(P5wIA;sWvdGG5pPZ zojzgxHq$L)$U$udU+@Wmxh0?IC2FNn+{36c+HID?6=38!t2^p3 z@G2yVaf)spcvVx9f*#fENW?Qv>Z5i5-Xa>Qdhx(iGTMAr<}V(P))jwfpj&^V<4oXx zg!AQ+xsXT@(zE2yhSe17)l~bN#0msZ@_C31fJipl%|J@mPDNVcmX;^b9<6M6Anos) zJp`X*R8*y5$o7HGH0ab{uFLcck{tHVx_o^NujbIYRF-*~RRf0pI2ota-q`E`jtkuz z1<)WCLcINPz0edWWM*O)F7#IkK`NnM@rJsWx0;ZO3;PBHH^`P{cJCT zbeI>zKgsz8cS%Wfc#EB{U{#zMLCsYaVvVu0E^#Lwrxu7HdpLRx1Q{Y{-~mP0bI|$l zuWGZcv(GdH^BAj;L(p4Zh$Y~+q=ZjjD%o*VM?bm2Yv5HNc14*B1w3`PG4qW; zMK1mV^gZRnYO#HR(BCQeB5Nl7C5M{9gxr#opMOI4@5ey!zQyuuOStA5%_|tIRVdIZ|k3#!DC_nyiCqZ;^pEn%FLdPtrm+rg8FWN%UH~ z>UEzDqsE6JIGsA-@xOrgv~f{xH4ISsq_^`2;xb~idU^Dcwx0gn=`Myes>DgBAeh_n zn3*(6Q4Sz#(3pD*2U5N&{&EXEyV3vKL_C+m@yPj`L^zhODGcTUcIl29{OQy>F(naO zT5u`NIc$o=KKikWeNs1KlUg$iNuS>NtP7qRqq`@OO6qiXnlmsiLB1N$#<;We`N5Pm zm9)?H9Gkor{dI+&0an4ms(nb@jbIHt++-tV?E7GsrZP$v6xVWk?!m4`&e`(m4YR4j zKHl3#?XlpS-eD-rwmoxF%9RvK9Iz+%V34h@GQFM8goI5yr@Hl#+xx!uf)($4>nBO< z2k`H4Q^`p-@dhw;n5(@P$RS`jX}7j(GR*O#OHibweK&uG#`9DRNUFt05k(Ti<#7TU z@`%Wupp}u>|NjGzO-}OO0BDHoQ1<-7EWd864DiaAgv3KywSy>mWfY?{C*wm^2sku$ z>sR^2nX^{Uj_kt989JL^d)acxmh$Crw9w(W0Ic6{gabuis53oGF>Ic6IaI;;Xic-> zw>J(6UEKpf2}c?xa&GFsjPw7H9VE+-=y(zw`$hmH&RsGSVmEZyMc7!G5p)T4?YA@wsp zY7=SNoot4O-8pgYe*mxzie^)y##9$se@GPX(8)&RR}oJl0loAHV^YW5&FXI`ANz0< z*lfRPaZBm8kY&%F-!O=SR~yIrPM%2JJlkT1^M{USC8id9qSN9eK|Xc)^G|W?FwDei z&AUE7SGiU+F~8zVl=cfyO^{!CL6qAU*Xz0UjAY8xZWRxamyfV3h>xg-*#};L5@d2- z7+>9Ie2CxiAZ)93x;-sg-odE>>$e;<;K9h{VX%W7r;~xS$h5i=<9vna1PpVQ8VR~U zcv2OPtfR;bsJw@t?kz~(MzP8B(6~i_zsJpmvtgr-XCjbz^)lX!GtP?L&=#R0G10RB zmekg>Q3h_<1RWIfYb~smBi_461Vy6-*pIoHMx!#!)lo`4>BdQyBZV% z#=!@JwS;`@I-IBbK8o;X-bv&4*KO7Q6b8DFz7evh!JQ!(JV!_lBg?;|Bam^0l(jED zn*wkdd*K$ZNc=bXwJ@j{<0#B>2cGbs>&5U4Z*>}6!JFGt_Lg1Yq$bX*zsDmcWCe9d zsd1o08Y$Te@c$HWvWJuz>phuWrR&MVs;B8Q6EW9XyHQVio_#G& zz=s*iI+&w#9l-X+IgwX*p7A!XIci6f8M(l=%fuI-8U1PM>GE791bpjbtYIaX^NY|T zOfM@$k*EC7I6;*N-9{O`?PmuOc8l6e&$SUQO`8;op;=hN=GPXWt{Em`l;*EPM@_ld z_$S1hfQn18lG16(bGGe1PgL-5QxwWAVQU&a#b&RKjZTqD(MskpCfB?+Qh1wXOpE-a z1L~=pw%#FS_ZF|M&T7NYn$cVey>XqZJrtIttUi2FT zG)E73s(BQWm3{MhIf<#h_{xJ7Sb}Or9S9!{=Ry4FX*1uxSV2`**pKk?^1EHaV7Sx#C0NMB9A-l)qoxa z$BfKSnhjeO(t3sGTATh)a5rrzj0JRp=f&1z!wiTW&g6G}up-KZ5pg7mA1%#MrEJt4tRb+#4L$lpI9`Pux~cZvho zw{G?T>D+X9_D}@#&LabB(e|TZ(+7bYG@(m}t2@Y2Yl21a$0D!;MUaGEL+jR`;oNd@ z5$3L=qUgGj(Rq8ZSEg&TZ*si-?K5{mPu81Or6G0)GXTZE-J0}G$eduqT_}+kRUA)N zo)_j__h>w{cr9E4Je!)q5Lz6E-C?ImBVlX!VE>X;`DyCjE^{$8n;5=4-0vKv%JCVW zC7`4%6DB!u#-7-KQ5Wvfz5J5yrviu!9w^&tvrC&iv zc3|jYJr=+za!1C4I$`@=3#P_0QOv$e15;fz6}Hm`pJ(c(0=q!qHz4^HDKi5ry>HM( zp1=_YUCkomFShJ_VBczui2uJn+Dd{U1S7l<}FXaB- z=7ws&a04LwXKCGs1oy|NLVfeRuBhdW$ugs^AocS)v;XcaOO`T`J7l+Zy&gq&JhTQ3-2);K zcQs0Px|*`*E#5TEZ2tSxhpW6{Z2J}?nsVu|;E}z$-Bwn?Z5x!(ROsu;_`iMj)0e{& zFgOP0!tgxOO=f5-QTA?H8wIHX{*31dX0Hu^#0fep5z&*~*IVNE--Izk(AP%7VPSgq zzXP<;xPhO`p@{beRqa9M&Fk_qy=wc4I@?QA*Hlc}`sMxM;76G61T78HlL0qUZW}f6 zPkw647^uM&AZ*8OT*}$?F{qwvRHkqdahmb~FqRhHmBKS+~xEJn6 z#ga&9$V0tM*V81&2*p!!H}m1J%*hlMTNGcRfM|@v_Y$m);TKMk?}BRp%|2=3Z*S>bb(W= z!p;3PcT&L|I{q~uo~I|S)hz}$2Dtguky8 z;9H+kKfQg8F!s=f-;r>0Dyd3jF+&zyc4wh8%gu$I6bW`>Eos3dkH*73P`q6igvwgL zd}j6rzIJqPSr=jCi0vC!7%W0j6@-gthhrbVVnGUlhCWTA|yR8 zF1fg@YdL9*M_X~1q!e@75}zYYHK9U?O2;Y^J%GW2%Ib&T=PpC_6*5>4XxlWDniRnm zb81|6B*)7O@XI{ITy=hbNnU?Z-$Z>w|MGy$&yG+y`f~{<8j3mgrd;^;>Ko0Y3nHT_ zrKn8QI~0Hfz`(Z8C67;Ii)&GN)rtD-Qh8Q43#w_t-mfiB?r+(HCqw3U3hc3_=L}Pu zGzzU1TPqh4R;tJ@0%f&SiUluw9~}4%dVNkmkhu>WSWSKn_X57-m##6L2$b><%oiHI zt>;?e*U?r_BUUEsf5z60s1iGq$V6CrHR0X4VF_w56)SU=iXf^>STg0iLi zj~kMtXN-AhS9C2#e?Zaz(KZu~aU4_h6SZ4vgqfFIaE*S0@DF^2v=8Z@FlaALIxaxpgA8k#ii?fbNc(I)(aa^l3f>=ScbzEJ!` zX1ly^4)YV1ARz~SKBk$)=vLjl-C>zTvQ%%_M05sX@DD?@;SiEe!9{?-$9T*OJR6UB z7Pj#I26D7TGL`Z;3YF7AL0ML@O@3^5MZ!^RnONrtldAd1#wm6oK(9FZ(fM>G>2Tk7 z8Xca1-IPOzRVx&}oSfvlis>-$xju0o2Rh#=K5fUUO*{VTlyEAWKZuUON>u+z5--l) zq%E4ht>&TYa=6*4OkDYgB4ZeUVYI646^Qv-36bWGmuOAr`Ezx`be`hLW~23w%LVwg z4jt!wK52g-KA){1niB5z1@xMfJbXGgaE2l1p%`a~6c@wzC7q@Jvmwwx5hGuc@c?*l zT3NL?5}JyF6aff)ES{RfT zu#G|yIEjDOZ1wOK{N_6=6CrvE8k1IT4|s+q46;tMF1z1ERQ8|*dc&jb}2 z*Ts}=p9?F&CkqykA#R5Jl+1CKhL!uPs^1wLIJ2SU=W8+#i;?$@vddkxxpOM};`;Z| z{RvN8U(F%#UUQ74T>W~JhN|A87Q;xeC9zdzKeysYV=}d{OBoN|C$as8dK1T^R(2a3 zWBHzDU(oum_yFpaA~yBIUph;J+hCDDDw^1N1tjJCENkDm)c|2!y>~r&WDVGA`XRv) zNyH%=7m2k-6v)#>2FT(l@aY)wy8b{-A_iF~VNd?h%OR*1`W-vL z4l7i#Z(JscH^Ilk(No(17XfD%#7R?e1S~hgM$bLhoz8R|wT#PRerK%JEgqxbV!H*;Qopu#BDv~(v$0G@dHYQmrcF8 zLFCY|wAs5ywgbP+f4Jjl4z)$FIv&Hsnt*QvY&US46g?cNevQasjep5aNTq<5nO8lQ zabBQLU6)VNE^Hm;)gmm)u!q0lto!ByFlu{${EoqqFKA%Ld9ToL$Ngq<5Z`LNr1lXi zk2;YQymaoIhHyXAnRO&K-=`%3-7nKU~W zLkgH!&%EwkMSfm2t`9D1E7RkHfoFJ=Ss-6?_KLz*s{&6Al&#~{1?f`ZnPfvNyua$) zxfRj>ORKpH63}jF_RPvLR@qwE^^ z@_OI+$y&B;Z(-TBYS~zJ%Pre2+qUgmcFVT4xXk^3FX8-O!Tove=bY<&uj`AeMT<$l zS!rP+y~b?eD;u~sqoZkn`lm7qfuWo+TB_BdpFa%OZhMnLx+cwG(^?M5lm(CBw? zkNKAMgG$X%B0||G^FAO9(kC7KQ=KTx470Y)@B?PmL05-HEX;2@WDgU0()+ts*+gEV z&~c==D-J%J;4m(it1SOu-94cFSxF<8rHO<+&j`&*%Z5G+vruJH4ENg--&*~VBqn%j z;QVXIzkbYwJN}+9SaH*Yo)l1cr?1dZ(^n_^=rR7r#OBTV(2y9Ek^aI;YW;Q-%Z z5?4=)>x`-C9jLkB%OschBLvbTOm))FP`ZF(9QCkBD(!5t+mepgo*th3t>Q`VY#ZK7 zpPvwmJox!Jw}mqlIw$dYLfNi_3&aZ5+=o7-!TMI(Gq2WeM7y3G7B ze{#Djr_Jd7j?jhOk_MBZZojQ9?|wu3Ga^0t)-G8-I*`XdQ4vt@p zFhx7@u6+BZXWypKWS8irZK{Qj%K`~ zGl<(yZg&MNZL(hYdZ4dL-Bz<@o`O#@D(y9IW20$sSOwX{HcF&_R5=HP1B6x^v6PvZvE z-gR=PyA|}qoMQ1cWV|YCN4e{v6s7QJ4~myP80Eq9PNyV4o7Qr_d)?f?V9c%&ruQ36 zr5mR&b#BI!Loh0sR<(X)?5n0Hw8kHdt{MLL5%3SelOKn=P@?kPx2=|*)|nFhopvp< zmI#die{diNej*T9_S=Z?Vr-=S&%>62&yMG93t}bvmcMQPWz*Cp^I%VRQl>KP7O&53 zU+c_Hvt}tkC!T@?%dUx@W(}7peKvuA}U=UT==MO~S0ew@Igq1)}V1&zN z&;Vll`h2(AH$>R?N!4RzE|unJqyI2f;y0ALuhl$<@x%)xRsa#w@t<@ApM;FM%!>jq zM+Z-8F92x_0}X$5cj@*V{0g2ZI$Ps=89}PP{k}HBnwFsw;>6GS|CZxYe0$gy$+E%JK2dPOiN3dVux-}u3s=?XD_ zy`4RYlNKLtu>ZV`bGpusNNQc7`?E74IX2-|$L;YPW)9D{?u3F<;? z#almVCA8W=&wUHgYa5AKkX+}T{4Y$lu zho%c&ZQKY0t@$&0Z+n%{pa8nnbkY^yW&w7OS%rn-bhPq~8EOvA`e;!{H$f@RUz?2( zl@@TgZ=B&-ri8@yHBK1W#paBatcWMEjM(b3!KS8JO9MZzM%xU~l~g zXN#NEfj*=bL{kO7F70PpGd|xe^+{XZn_amV z);RlDnpe%c9+T&li}Tuc1x!F&bf1`K6!-vx@^`EaJ_}R? z12ne)nq?9GV!HS9D);C2NGBDHi|G{8io9~;3k?^3Yh(B_PX=;k3wS|j zB4G!o=@mpWe)XFgffT{|-=FXqe;vE)N93j0nGtA&cuiK>eG-v_J%-8SoIcRY0x9hZ z#-&(4YwM_9gC*XZ<9B6>TDuJMDhRSN@6{T?BalQ|$Clv%Z+R_^iR;B{3BW!9@n*N> zv;uj_GQm;&+toxI%6F94JBTpvnW~D_3rb*&fdbnqny`q%z3>^YiM4LpeYotA%bxR| z{#Lx%;RXDFO=;D^n?a%?`FUZ5-KW(2?5mXs!TocW6Q2 zSu`}Fn`Lu2Jwtm~z$^F(JySw}XJlvBo<{E1)8ikiYgu%Hcbso6orS6fr|LdiUW5TF z-@WhM59J{aAhsJ70gJ826he3e!5>2O$cmv$=`}gA>{Ln*K>Ze>njr`H;Ov5@^?`uc zFab(-pg9q`wpRB>?(Y8ZK9Co8!jm%JkTJMF>x!SmmVeIgua+>AAsHa;5*Az4i)1_O z_}zTdp%!~AKGVl)VeDS;&o%EF5qQyWat(~rj?EO3jhpYShIF$AcTFE((m%!q^THtE zr*oA;dCKz{!4&w)p*QZe=#eBr;BED(Bv#^^mB^s@bsrOwjShA9g4LJwDCX%k``K&o z@v~JuwiLWnI>&Z!#X7-GgP|0eX)(%QTigkwk6JSzF3S|9vJFfEZNJ`I8$`8T+06h` zMbOK}{y^f>*t+9Y-y?+07;+6>oSo%|`f^^uNOka-%eF~X5SlM+*>@GsSlm92_#Nd# zMMdb>f=D!kLpd=>n`G{}tlzGl!;c6ZuY%-g0Bi84oR>Qk?z;m1l>nn%L&NN!Ml6PB z`>3=Bh%C$C0ey6g@mY)lsqcj0V?wio!v}ucr`g%>y59>?%n9e~l{-K5n(CSR&2CK5 zV=v!RwiN<%szBB+3+*$e!zG3cFWXu*3e|*#_Sje&jD$C5eiVOKpm>V zOvfho;e~eiM{?-HVTz{_8!fHVQk$amN}5U}5X1ff<~;EyVElKdh%1QmmF>j|rR+VW zhI$`cF1a`xv5F)29AXoEB3bc%AocTFgRXv=H)l`lN=hX=yWta^Uj zwXa}j*~0=fhW39|BJ;robatj4;z1Vmuv4?bkn?AsNa|VOtk|>2kdG%h06fR!qz%US zg`1p3|IFC&*s-_SV$2QDUod#ccyN}$C-1ju$gd@tU-S+2>eh8Ot=xbPn_6QHwC+g8T_di~7JT^=QFxiQ~DJ1D)@73jAw)dN@|L}3sJ!D2v z$RI}CbJrYaSp;}RdAxbgNw3SrbF-7t#_K#~L8`S1#Ar$HNY84U&>HrE(VSRGS(pss zCpZ5Hg1mhM0-!TS8_%=h2VKILd&h50e<@^t{=o6PgX_q4=74qRckuKNv(S&p-&7V_ zi3?XUG!Yc1>XE%-t>|!cl+;bso{$`L^OF*8Ulp5k0XseWj=vhfZv6gI&q)dS0Oe^% zAS|;T+u5`9M`ue!$yKhU{bcaNm%5tj*PPmYu9267cFXrBI+h%BCZq9&vRCy3Zuj7m z<(bx8iv%k)xXU7z$6sobnSgWCnatHO`y|b?lwf~2E}Z>mA&l<{0XdQ6tse&Jz%Kzz zqCb^U`Y1ntwygGiDi&l#Ay7Krn0OHU_0k@tV@l2iJ@XO=pT*is>5-u&!QqE3Fo;SU zog#u*G5z|ougUDh(zto3m4rMmLviz?e8~VjsreHk{t^Ldspyk2Z~K-;C)9|VTIv4ROgRm zlV&t6S1a%cq)MlHv$7iFI&)~&!Zp*Kcx|mZIEy?Cb}Gy>?wId_!_gTDjw{s5f3P<> z`)ukA2uitwG!x%i39Ymi_;!f+fz->P`89ZZF~ujjLw?rD5;bjXPq3B@0M&?c&&@8JCSb_naL%xv|m{ zNuPE?qY`T=fz-$R03PhsiecORhHUxvEAfMiQ0Z~W@cq(PVwI#d-2?F&K7*9gW}J{D z7|v3(@}#iLbBqT$pe$Hh#wLHjVeoAlE$J4WW@mcFV~*|X#-gowICl~icyqOh6gW?EbqQ}jI71a(&&-%CGX85=#=hXk{?P%dFgJ> zYKH%2H8ije<5M%`sf;9UoFRSy6jppIR(7*b;$t-XMb=TX#Mfz~Ve)dp(%i#svc3yG z?yija**!G1Jv;s>%=*1~QLB!49fPiazjeb;_Fr!J;uleH_TV)?Tn#c{_NcD?fV$nW|xa`0xRdRnG?p!=hrYn6w)%;F+zD&3$ z7wnpQZ?^D=zL{#@M!6>$juJ+3!-f6UFS{OI1L z_$vc@s$YPyaNkns2RmbV-nDh)%+B-Ap2F?kBUw`%6%x@-00=hl4UnHWVicSxry(JZU}u(H6|y*Yu-;o-K!@K=9r_=``*E^IG`#&yQ! z=B3mxL~n1bkhTu(W$A2SW;VJdkV_yZ$*0cdX-86k1Ibw=Z%%zi_^b)_(f|hhgdTC8 z{QeEbEVS`iFKz3|b2?18Wndec^G{YO3bHpGfw%|7<3aD>32&xjwoa%v3E<53i<0=4 ztCH7i;N<-vV`2Za|LLakG(_A>+680*_+=%D^Lcv(vp*AKux1pU9PJ5)Uc+RhVT84f z%>;4b*)@LB;ycLS*~KH3f5>-N4G`o3zPWg49B#ls6GZ>lrzVp%pGDk`^V#(>(U3Uk z)s}%TApYxh=2Ysfwui?trA-CT<0psN}- zDuKqX+RnC!l@=efg!DO7RROPIhq+BP6B-wpjNo7|@EU=FArl@)IVW$jV&TqyDiwU2TJV-lsAD+0-M08$@?xqthFjuBcU3D*_Q^rKtHR=A??VQf#q6b^_u4E+-vBcB&4#ZoiVJd__P^2ag1y9 z%@huJxh&)p!YefK4}>tqUjetcRrw%@@KdC@gVUSFAEYL%R`mS8cc^6@`NEsoz5E&fL7uY)2x_37)ifO6ow7+wDR(iJ%ZzFX{%w5Zu?Zik46hRS4ZOy`ue*Ss^l zr+E%#m$TPj#4c$|Hg$nv+-a}`=;j88I{_L7Uz{ma7sC31nh;2g++u?@B77T_N0aB% zQl)rR@Cc+@TnmYX2HT~Yza z5MgK;>+U}KH=yzX*Q2TK=JPu#!|y1h3t5>e!)Xq99^mRuY&Tqha#}b^Hg{b?3y;*- z_>f-95;ZO`)D68t4ZK~Uw+`-N7Nz(rn~;is`uX0gz@y$28Un6ZYUwX5kv(Tqmy`sn z>$dwtHFsw>HLp`3pv~SFDd(d_3_BDJTE*9qUlxZbD~4{{IP>LIGVUAT$4v^hDoqyZ zk1-)|bpwZ^2Qp{agB{@{o)y4u9)Ed5-J){F)e`g9lN7Ah@rV>50)UZH`}DIjVVksH zUGe?+1ZAJx`ahvoZx7LIMI!P_@Iau+(dr^0>*ocsVmG$Be%j&mz?M@O9G?Woo&TL< zm0CEzn%QYx$u4v2`=6h&xH{f|&qGMW#F@g49|9!XDGR3%+@(PbEEjJFfhSwd@6t!` zzD0=`8Y{ zk^Ad6ivx=2e<e@AGnuT|ujkh~damFd*KJnl(2-srT@e;o91ri$kz710V#!UWAHXHa_EfUF23CxQTFZDw|*BNI;Khe$?nBti*X-HWneIox2gR{N5#Iv|N zF^ByRLxWZu7@x&-^m(sA-BrIXlg1`a6NMOA*_Y(p>^vEVbQuFbV9Ql@8bBQY@H=OP z%qR?hxZB@g$hww2dwuqH9{c#V2CMq{sZsr~fsGn>Ntz=0d=ij(E>y@DX#10f+P`ML z!K+sMhL|Ou?S2*0<^NkK4}9iJsmOcIUz>V@P0JXY?smv;vDG>LR}4?v@Xv^%8EHbb zao zGW0XW;HD4`@|~Jx8nv;HvZ0`3OY?euo`VN_(f{4#P(>$U9E+@OBH3{bfu_M9bNIx$ zN@|n?Un=sy`=@y&kKe9to*HDRaN?5Q0cG?{lSPv{W8>QeMx-%5+w*CTkcI8WgtM~V ztGDGz@FlZV+lfe;E!{lRWliamLaDdfh2|$5k2&(2MColv29`WPgf8*mSj&En$XeL? z`42Fw+H>KjBBdt({;;z(LI0gV;@r?iNbmQi&fV=AO)nslbQ|DYCrXis_?D%3{5di?)oCnty=mhvV1tGD`BH zM>}mRs;!SsnluCRluDuDy0EDYON?}XB4%*1?{U_YpjNqkGc?%KNUOlNskZEXt9xnl z7-D}pB@PZ}@PZUFqSuNb3lxQBa9EaUo?Z>%!}At{=bW4SNJ3z}fho6n9W^s)fAV^S z66KhcpMQhzzo+CrRaLG*=0vT5SEAPU#?5`rLbz-sH~Gc+8~U$}`!{OM+Hld3eYMBq z+7xd%tb>s`Eu(-!j-tyVs+d7w4IaOCEmUstE0kMwTa{g7Y$iaru5BCJt!>-3wY6=x zTifl{wr$(?*0$TXdi(8nlbhVXcTO^AlF4L#%uF(K<~+}%o)gdx=PoQZDM$gENbY@) z%XvpHXLtO%vGBYR;FA!eVnEQ)$93dVY0lv2{Clc%0Y7rwH zD6;vW{9~2&$qreWl)x}T1?V1g_Z~y{_UAU4GMr*h*?hzeO10`r>@0>zB7tC9@`f*$ zoOYI(?gANGZ5^9}qhgyHJf?}_-LWA6Z;)d-vIT^=)F*R{(Pjh;o+eirmj&V3OHOWb z0Ozn=>utGB2YPSUgw^?6zm$JcNV-takmW$@Eq3Az(zXqu5XuEF7Xh-zfHUAXb?_;EvQ&NajS4&8D zW;T7@e7IKVD~cb7bnVKBWLF71qhNqFv@3fnS0a5PkNC74{RxH@EK1J)yA8XqV==SX zb{k_fjfE?|G-nBYtJ`>OKd=lcvzW3DmRN$vfp;F+fA=`NZ_+>(Vx0n8rq|a^&H_#@ zJ^I}?h5L`YfpbRHTmV@m_}zy)OccMOqZD9V)jhO!+WShWa4+=jEtxu`qu}zl2O?XE z#7)zRoHJChh{Kv*b0Sh_#dz%}cgmpMkKEkr9h08IDoJ>WYLCB}A>L&G%)j72d22-u zv`+e{ZbK%cwY10&u!-{3M3zvWU6RQdzIj3Cq(khn_tM5RZxP%&)c;19l`0~>soJZS zFqV`s6-5zz0=2vw#l!G0A`jFsM>-d@!E7;JFKU`{n33k%FGONKDL7En34vZjihBya zdPbKBh`GRhU`K9A?#mazmflAh_I_9FRJRcvyo7mT^`MWPZi*ycq=MaM)aQT(b!&DY zEd%XWnG|5Kc~$}OZTzL-0lcmHd9Mqj?G4;vNe%K?8P=0Tso9l|{XFw-1M5{^TQI+S zh)^4|Ex__NH-}y4JMNhKw`lqkaA++C+JyJZMiIFmr9M6~R&;Q6UCN`F{+2AKtB$(p z{#aAIEv@xFT;|gn^nH&{QKfYjV zT$RMj7qEieDS+A0Xcy}8h-JM-D(*&6&Y^*EM)&T@*2m_s(wX;;j>4r7*qh!GsZiE6 zMH>Ik<9sTL$?%GQ!N;l8exXSSQ22rCo$8w|C=bP~=40fu>c>TNU0|xp$$jmW-)S-t zRt9--aJ4mfY?-UzO7+LU>R8*$LHz<`vZavY%yt}Msw_kf{fP*G1v8yr z04JqS?HUgTPNkKXWmH>D_I!;QFj_;^@gTtJt(7JT+~(>Id-f((V1$i zC;np|)@T)H`LS!f&%pKonxgZ~O@2cwmRpWtJ~sO+q~b}nXvz)O$KTvXWOnn~WK-gu zHt{W z%eOJzBNC|W7x#I1H!th*SZ;nA^vG;(ez`uDmFh(+W`pO;j?P`@bfIDk0_D@Ifi2hK z8$79uU83%-LdfVG@zes-2SJP(?;cqqsc18k4=gXtZMPB=2pf4JsfrY24gLOB#;3TK zs8D?m=8bej`zkSdq~`}C;I<)BbHs^g+T52%r)RhtmZLKC{!ygm&ABetL55YLMwN>f{uRKG1N8SzSTFlAw+6Rz)1|qy>Z{%v>#>IJBb=c#EtVhW z^t~H(rDlCwNZ#;rC`AwZHvq)j8@m4SIaZh;$0_&g9zl}GJa1)vqD;)J!G>`+n@I5& zOgA-0{Ovntd})T|gY%C>sgLOMqpio+cJL_0yE%=B5lOf1d8?7%y1ULmKQCT>tk3Qp z#3!SQSC)4)4Kp&ZX*3RtJ0Cc$2dk1qMd(SR3vLMNM~4xW)%UdEhOg6 z1(T)X`N#iWh~;yew6_7WJ3%drI9o6jDVqEF0}2oryE=R60ZL8uH-&Mrm=a6Hv`aGV zc`StlR*0b`|71eh6SgMA8MXI{frKsa)W$>Qi)JHE%lT#Q+N_}76haxZvkPu@)PcbI zPYW^5sT-MW=DpzP-Q#lpZzJhX)NsLmtwSjfmZnVAm|_8M?1b43O!dMQ!Hs+`jx1inck3nev-aYasaC-bs99w5FUj712PM=qauQkCW`ak^eK~h1~_ges4hCxhD4e3vuB`SUa=aiRer^W48FD{^~A%u$$!u)zMdPr&8O(W?T^X8@Rid0b{F1p)uXi_<)GCWDfElk2(Pz3o_1| z1DX=MOVQ>7Z^xada{Shg*Ts1XZKj_eHZU0dUmDb0^8ci^_v{AOB+W;6Zv}0A{!##0*Xx_mgF}WSWmf5iVY!N3_D>UvP`8#O#SIMtfQQZF2F+uIWBlxLZQov>tBw4@3j&7>kiw5NdTT2`4mz8qmBjWENd2hT-OS;T9Q4~mBqrJkb+`xH+2BWB6*ryEllYrC^!#de*l4OkDA zh7{>R{%ZQxt1V3d=lSZMZ2gPQioJkD6zOs98%MuO=lD+g7$hQ<4r_HM-B+Pu!`0<= zFs|K~b-Syr&g}OR;eI@;2(`ILfhz8j@?HhiGh7jh%qS3zTP#S!!~-gp_g*2N11qo* z*J1AGy#K9NpJIOC-rVP5NeADOzYAvIJoF-_7@(L5^sgM)cDqq_0mrs9@UI(sI=n~= zo8#a>OuRS9Tz#$rGv$J6Y}QF3bVTcu)6HdCxRTbX2o<@m!Aw*~-wWL>z#39-t{ zjnqTqG27qh-8Oej<*IQ$=Mc~|8!9$OunVMV;6Agb48?On93G{wO@dd*~jgGkC{?uNMV^;r~SLstAv9Y0io!-hWygq&1JG zmzuaA&K^V%@alW(&pjkisS}<1OW3nu62$iX+VmHwm22JtbxjapoL2^Hx?kmm1mfRN z&vX_Xe;_#~evj}UQujT{SX3>$VHDC+oV!n z4mp16mVM--xRV~%EWV}dpAzizm}0()diK7)09V}a@Wf(`XU&D@^LEuv!q0WCmc;qb z!cRH=jxJ_kquP^fz&KSbv|IO=VYPj@5+ngvcWVJ6nBIqaFr>fc{klB7P?hGUe(!tH z*_X}0kjrmTlAZxc>;1z-<_Sk5)^y4{pJ{yN(HKV<3=P@fn?D9}ln`mGzuOh>y;pLphWo3+yIR`E8TsT$TanB zo&C_k;%kk6pjo#)OoaMfQLl_^sMu^;8|6)7$`7(|))`gtxE9(`Ns zAjRecXghhTO2=KRvf`=H$~=T*LWXC$RE{&;eDjn+eHRIOmqS)>{%r75s`5=bbD0&J zp*&x$7P=AUaYU;GepmXN-|q`DoB>D~FOg~kd{{y#@HT>dh7tW`VZdZ>i~c4|J>Xkb z7>It$g{MF=hi8kEspRfc!yO!TavLELe%G!UL}-#$%3sLO_w^tS{+YciycpCr1#0#~ zPjjZ~jdppmyG%)-Fh|PuxwM4@%jHTW%BsHiP~}WeqlSvW&A}un--&mAD037^&qd?M zO*%GShRNzv%FN)?QPGE!=eX2jW}c2&qvG)5K&I^$L@kHEJn=k_;U|Hx%S2o}`~>xa zZuk}HZ~cyUNO!$w@)#a=Bi!z?Ydlche@utc6Cg&Om3T}3E|r1hLJn-$C$^9crVK|1 zGWSj3X^myC6NWd^x%arBf#z7MSStB18K#j=2G&ZgSAo32YR?4ffYtepqZGduaLtEq!$s5p&`06K7H0vvhO>d z5=8EIPm1Mxn9F%s?110kN3yi`WY9ypDfIBO_nw?|Qd*M-lswI$CR0^RxTaxH$OcKz zBmqKud&#fy{MakC)h2&~wV;WzxVVnai&pEyrfQzTIq$p~`mPCjvzF%HEYB!{a0D9C zz(I#}BgYa8!PLhzATLL|aQ>zWX~=&g-D_&uik1WmlYl-#JJi$~$7{71c&A=Bxb6oJ zB@~+A4E1C6s)*!M0`}pF1F&YlP|~sN!(8+`I4awhm41DkmT9`Cokq?uW<%S_Rloe0 z1MM@pghsP0*rEv zL|DDZpKQ?`N}xN|K){4tJk?quMgL;hE)dYY~%bXC9XN@Ae0=X;O zGN4oY9zOLd@I5XQF0ZtFj2Yl;V#7Br2$`Ygfdpc6&N|#`n*8Bf&$)Yp{YZAc&=pZH z3qKR_N=d|_d8?$bImv{sQUn2`hEkP9_}MCyNWt6PD`Gj$!;mgkWn6j8VCuPcY9+GQ z0mt1i@kR`2xS#bpeb^O_ojv!u;1mEI{k%mfN7HD=Y`TL|wlf3z*hv{$9vI*o;K(lV z$&aj}gUTSC%x@}oP0{W?XIMr9*v)XNBBgRp*}g4~ z`=gxpaD-iZS7dgNcuM@KzbeW&%3JL87h3F;m*l2X z`@PJ_n>6afywI?w^I(|}>(y8LdpfFC>ZTBr+J6_!zwG13B-n_Jg|lPPqQ9g$ovw4x zlKSyow#cGFf&bn$+=wPTlP}}nOhK|=-+E$DsejLZK8Oxkd1TXqrS3XugdB&cULmM5 zz-(}v1ds?iC4)CsHm83K8&slroC}0YX{H|NWS-Y<65xeZ&tZFkUhgSWpu^rQOy2c@ zn;`j`Jwv7b($=YbDOekUgQDMt;#U71e64bLM76M~=Mk=%8Bd%V2q9^sSv-Lgf~e?f z*aJrSG|l@d$mA{&`oY?0K9l1251+st3k5vHE=H?$1I5Heb&Mm!WAj*{_-jajqtla~ zUtAWNdLzp1(3v3Ms}wmBat2wiK*#S7!WcYcW?F+m*7)gCnzwVe(L1+mk1lwA)fpTv zu+!JfPGk-5*4-!~C_enxC#M)tkD$Fn-Pk_+X$&vx;c-8gf`e4!;qX@q>-caK3N1KE z^v^r`t$xmNbSqpvjpFIw*$}9(Uio!=u6W7T-+4i2w)jI?Xl$nlQ%c}|4w#$S)H~4% zk>@=0Q+X(vU7-{J@PF`R(zW`PT|R#&Lu&9=oGLVdyqOmbVyxzA3upGVfP(FZE3ze$ zBO)5Ms^y?Dy*aQ(s4-X9=CjTyw^-aKNF;d#v!a^#Pk1&|noOPeRtTu$tkpITx)hb# zKDY!8&_UKs!&NQ65u8QCHY<1UN_b80`1M*}cF8e#>)7G}Ry!-vr=mVYPxN7%V=8lA zpWfXIH~bm^g`~a(4geq36*&v7$HDv0Q`#fkut2Gc_N1(&wpSwv^E(V^ zWc8$2RM%J7K)vFF6JhnrVl0uHGxPU+X6ixVME5C;g` zCdO0)`W+h9pYNRmE2QtlB3=k`)O01G{eE!2VpxI4lD}_Nctt=E*n#;!aJherpbO$J z&?xVM`_U%#w2p2@FY2J)l}{DB;u?uB)pn}jr-jC9elRIbb^AqH!_4r2_GKlBy~w)` zm_5kiNJ~e8dHT7bL%Q{M;JWJ8uV9zQh%+OpZCu(fqy900-MPXtIl3*=nrs@-d*N`0 zwBQP3Xtxprfm&`hRjG)RA&Tz;In<^~F{@y!dVEX^leL*+JLCf#g{ZKa2rPtKB6n_z-nPz)y%q&QI{%VN>`RWdo%?FOBap37r`*Z1pcuGC8l&b@%>V#Sej zj)f6Vo>AjP!Ew7^9&jBZ0>F|$Hb@^x8ntWZET=fb#C7>nH=RcT30LQk$*vi7)E8nI zvQClr>A@#?;n+S23#riga{okT#PW1H2kw<9Lnh9M_2o2C={aj>L{&B}`y4#>h-?ANm1yhUmG!dXTod~?!zMkF6~A5s1@MRkFt-zQneKZp`I2)K1@{p)dx*bJ4m_OwL$?XfA3;4VL;9iXJkXF)t?TiOGk& zV4&~@(-l#;tXSSIV)76eA%@ek{D}E_dGn&bD~BFCKztfdMyB&T$S7F1hZ1`wT!`>C zkGlzi>zcK4u3MQ@c!Nuw|&oV;0Q&iDsE>Rj$3*tKsWHYZ`!s=SB$FGR_+ zg!$)3?dR9pu*mc@J~oi5M+HGBTBoHJbP`1jsX;Xn#ps|>jvxNJ8@xTWqRfQgT)CCQ@;m6yE3)?728B zjRNBwB9<~YslBt}5xAEt=<}lw0k)o=aJ<51fSX6AAu9pLD ziMj`vC)d@V$v1!!pww@zxTT&m`n2m42>9xFuY}3;@voxt+cm5g{@8iOGOze^Tg?6+ z_d)D>VBf^)55n4_S4`^k3nZZAQ80Of)78KYQydFcLOVoWs!73BGLlnCWSobPFMd^Y z68BlCFu!&RLoO0Baqy-;`b4(S`PkHJ-6#RWR{+F-4>iagj4Vk(o@XoB=8CIRnc;~j zri`(;3#J#nHB>Qpq=znutl|&&=79aB(OOGl(aI-U7m7P>HpEx~I0K#Iq{U~r2_X)s zpl#X3`GpG%NvR5(UFgq$c}n-oc8@=v;g}&5Z~oT5icV7Ih>xwrK7&+tt3QBU=s%1l zRlJeO1fR{C*=c-w2$7#I;H+83$=&Hf>&^Y5u+Q5Lo`kFAf3|bGS8@ge_@N0(cOu7e zqRe4d?67BG>3eXG7|Q(iUe$$I`(v_1Sgj#$_hbLkv=71(Vzbw&?3XbpzxXb9%=o$8 z;Z&Pqp2kL7_12p>D{VaqemR*YLFT~ed&sNakA=2^kJFRyQMr_?h?8bUCSI7h$|%J2 z)c5PU4X%XotgJjoLhqShqsMAQg-AdB?{C|el-pSilQOnc4Fmk}Bic{107WBe?MD?v z$o|^Wkpf?%EMZz8B`BCzv8KC%v$`CVmlg7=n5+zaaL@1^M3sR){~yggLCBniHe>SY`jv z4#(%rT$fevqNDduWvly_b_F0AUNr(MgVAWP80cSjh)4{{R!Q1c1Cbo?(Ig9;#_tP{ z97D-w_$}q=A+_k8$iEUxG$|{|#VfpM0&vHDS8}}O=G#b_5`pl?&_piOGjd_EyTD7l z@P4|TAAiUd=PDb+Bej*M``J!`Pp}{&Vq38Fw z_>o4Pzn1ueysWXR{zDvHVjmh`+qg6jelwRHF>&Rb8NX-$>j}e-jI(>S=6(V^JqN4q z3*3ml|06r*%3r(S>~g*_h&`+#&#?OTyh=(DDx0GWPgm;1lfLEi<1aU$PfsW#1Yg$0 zQkHdY&wbFNa((ZnJvtZ^Sm&uoAk1yx74;m{JD#9CQAyFl-)AdJRResweWFS^2J22H z+*AKVvdT@x<8EI-xc(S~gBZ;|{NE}mwevMpeyI>Xi-NMpW@C2tOh(N}Z6x}~1h)-amiHuW#EA7Pp#I}NO;}h9o?R%+NFW7tRhSR^A zfjlUHGD7_4 zKc8Rk8?$Q&D#`vJ%Gvh~)gZaq-!5KbYEiB+> zOLbYM7*PRF)g#11vFxt#O5cRbBuAH9=R?`vXc-YgkGcvWjZ;qE5RUq9zkhe=8YQAu zLpD47^qNsnsB~)AfDjC!o`8M(-A35**^qP-Cv9qQ^NMzE2XLpQ8fdvzci-Y&x!2kx z!(aoXf^hSsNqMVopq1B(#ackhb%bI8&f0xTX;ITM&$4+bhP3aYz{P0pG(KfXPx_&6 zPDDP0Vv8x2A)|36bbXGI(;3xkA5I~{)XnoQq;% zS0{YMXXVy0A1toCFt@m2rZRqst2a~A{AdN)o}z%u?X5INlm~-Uy;f0I2_mfFQ;50* zMUZhLGQA_kYzYESX0*0uApA>TY)j*caoG41P~p?rQqJgaqSZBU1>2)>a`D5S=8@c4 z0`CByK`5DYqKGI>DPSM>D%*;0t4D_7AtZBx49$I|J0YTu8~`T5ycjm-^xC}t8;I!j zTNSjNX^-84-d6AHiFG;ZcF>ghWf?W27|C^vXR6sb?~@1m7vENwPD@WSEP>hG1{nmA?_ zhH9Hje^9Y4pQ+~(IMx2!f;ij8*u~US3*S?8FMRXdXKH~Tzk>4v-wGz=?Sbq7{2z0z zl)^li5{i^wU-2kw_zxk(xz`wPvfC6-97*hRw~c}i4qKCx_d2_kV_n`~Zm6`c)7CoK zk|mc99!BFn1Z@{oALKmwqi}_|MH$5{~c6(Tv$OVLC3rA7fcK zpOOcs%#-IG^MUUsy$l_)*n@=NEk~c6YiP@iWktrx1H^(KmW=evj$JKTq z7g=1i4W{fkFu;Lwy_Jb_U4S@qAE!KBcbHD`OtM>nWVUJhVStPK2}*Sz5aIeXk2$EJZA>6JT)T zQ7&GFS)e`&NG!jlOZ<=PB^^V|sH4ZMoRsj%Z~}9aOD1T^J6a{Tn>gkT@=BoDpgW?# zo{jx0$`=7L0X4L_eiA|9xkVFj;c5oPW8?B}F|$REt3746hBlz;D_p9NN~xbA7smCt za$sTZ*#mQUzS7n7LvrM_frD>hZPUF6mCVhB4=Ajr#U$N$9T?hcv4e)4lzOhdnk`fw zG06%RmOmw(@ggq5d6@v>`A!+ymKtL|`^!US+l-rOm8p2CUU|HKq>2j4k4Ii{9T7Hs z?9P!lL%=193PKaN(imM!7@*=5R&Ju{5`C)ai8?Q~^fKRGs4hE(`DFj|7oz{UQVE@+ zBtm6kMrvW$ktB8~4DG=}9;3K;)#G5seFSkrAM=IE(97wp0FSq~w<60)Dp&%1pY?VE zpp8?VTdgV{DrT=y8($HDmp|4$4wbmi`%>sQ?5`|$C_pkMQ z!eF^TWqmfI(bA~HhGx~CTD`dlmRAk9pQm>*=;&qu9}H85$4u7r<@(%Y!M#@`5InVR-5L=*RwSk)}OF0=fr<`qsBl zF@ZDWmU;Xz)o$3H$QUl@ij_#B&!^dZ%{P?m;`-^ZDW3|}F2!cx;aD{RZr9%B};wKNG zMtPi{(bPY6)Id7-R!?CE3QG|zvp-3|usTaMQXxD!%~F>R>*iCwGY1UWYb-Zc9&tJ! z#H@UU7+L1oEONPbRi?mDP2QD@Mq3$~1wp&sY3@i)=p9DD9IXt^1hTT6gGmbDTBoLNR>O?b6X7;nAu(rfg#;S-0gwe09=c`|1iKbnV9xZk6YS?X+?4J?lCB zsCJ*ekO=;POaxaWTy<w5JeoIpwu^V;d29Gu5*qT9E-`H)t>OTJE%j4A z3qIg zHYqiURN3-8^>)spil~$%?3&_KXurfP1g$$XpF;@ag^z=t+i(bG@1 zqyL5ctNvB0OP*uKvtVh?+>~eN30zetw5Igw3uz_SDfaJq+b@lPTa)z{T`myEDDD}| zMl|#1bHU-$L**WRk`dG2b{XbohIS8L!HU)#6=4J7OBebx$<-mWy6uN5IQsa%4;W#k zY%yZdX47N#|M+4mi_RUzQa3+==^BE9%&Z45YjSZ|7Aa4iw~~UfBnZQ=dli5MW9!lH zlW@a*&ZZ((_0#aX-%F~X)}pG7zlcOIClUnG_E9~rKFZjX=exQ#B0q7(brv3C82}af)IS~kTM#^BsY<~V=6mN7uZQ|Fq8d=o`Orz81hsNu z|1ov&F_oWUP(gfeaD|&_1Y`)?`DR|zC)ovX>i*O{h6r*K-i7*^icAR8{bYy!y^KzZ zE#Y4bi&><-0d3ruayNzulu5L^0;A-IjRK9%eoNU&pY_#Bs=4ldM*VxZ$X+t3 z*|ApH)C~t+r3?dV@PXS|M7qgS0;Da#M=V#jRHDVH4zq}2f}h2~I=4ivBRI*SKi2|UXdUNybMjGZ0JSSQx>Sn4hl$4-V{7*)&lepce-vSq+ z|5sI0fZd)?uUfcuPYUu6xkGPF%xTYn{VP*a*?nyG(%&xSO0dPgsK4+=83?q#wkbH< z(x^>dQc>!U--n*SqP|IzLN{js>tqNf<7EUl*2ZO&sP(mEVE{e3T`A^vd+~E%+Tk^Sc;y70l>3*p7^2*wqMI zKe1h^PMt9J%E&64lO4Ey0;S6q@E-dO2d$6gNEPVP2N@5y^dDB9Faoz2w+oA|B?A8W`*N8fZ`)yl z4pKti1zXyEbGSw1jDn;L;$RyaXY_p~f;<46xB;*$Z}C4Uh)RbI5^im4qRFPP){Bj` zB29|D>-*%isjlWAE&$Z)EOM8SpGu3RzXS2k6D3iwh%bau2N=!C5MK z`#52by-pEifyL*xVPRi7RUfaW^;fx$FFn8fDM4Yo$2nTfQCMXVe6gTv&U@8i7tc|Ln-e^i4@AXM3dlU9B$N&)S z5m=`3RYuo>z>zJSTwZl@4fSNC*H za?kKnsD^H@JmHZ-+|FU-*mlmVIMwoX3NL>OA)sQ%KOiD z$n|(EV8oi=-RkxHCIU69HEm1RccFi2_96Vf-AbnSv0^ssA62Ny1hLD)7vQ3toPvP5Hgb3Q{ZM3w zi77{*scE?!#9c%B(tJmH^~$M^73_^fM%O(LSTK+)D9<1=m%cq+55x~1;Vpb@UN10; zLWEQFQAXz;N>+1ddQ#z47;!lh_h~SdGY@FqDr;HW?&k?8xAv^(*Z$hz*R3tt+Qvi* zsHz`utnURZ+^m`{g!l9&Yh2u1okJHx_U{xCPw?>uPrh%p8HVr?*l3ETL`>-S zP+wIse<5HFnNo&xT`lrVS=v^I++v1S=;?s~kw|CqQub0%w(_etAeb9t&h<0Xtm)pb zD4v)T%ca<(T*73z%v`;E7skWuM!dK|KtEhpNA{BPJb)d#G9S`X=`i!)O#`~GWpWx0 zt>$958-lKWVra6zFKY--ZW+sDspy~A@`iee2sAL@z_A58?|o=;p1GEX8?^79ASY1a z*F%_Vaq+4ZJQmLw46XJ|9z#R96y>n)?CvZ<`FUzqdrB0;cAqQm-rKP{N^pLMQjX*{ zHNmK!dE4rz=8I*r)n|bBg3AEdQ&u(LWb-%bu0~38pFnCMW2Ayz{EAe?Z0LME+5paW z^)tiKMj>z(tLS7IGhk=c-nqC+fsPaU{V~;ZILgH|8#R6UwCQ-yn4_&};D(r}g!%D1 zj7g)B65kD{%WXYd0Ud9pg;pT3d+a##p*C~8(veqzlPjz(oYWg z^sxe(^8jTWrhhzapg%-L_}YdmE01(L_BAx(_hB5>Oo1*2PXk$TK}tp{0#p$zVG`BxV^q$Bx{7kJ z`a!!LHzE9K6uA0)pu&^kpJzC(^rfEGlkUSMM5}A$s5Mx-Bh_f5M1%*7G28a2AqkG) zYNHwwi)+a(%cB<{Sx;FUx3vv82fZoDUStrnXpsN*BcR|PsdM?G%vJ}*B`dv;1y+2R z>M#AxyWyJ(LrZXV8>%K0RXb0&9LF2IJjO8xr+ka*{D6fA1!qb~j|xRKD24~Al9WAr z%?x|k$ES`N0>ChPj+XGw4cbJ4n?b4Y+is_eq}&s$3VOF;0zEVgU+sldw9&SD6hF$0 z)1GFF3pgngpSjrPQoBq==gcgoNuI)1o?DgnLzR{fhRB)(4q2NB{ai!bM|!Cp|o&Z2vc z(}NFMpC_Vs>9ndTsl`!9)hlK#mGw=B6KUu=4mX|(*TU6DdIbk7uBy)vJ^qA?i$8;V z|9%>PjqWagB6Oq=74Axo&u8~Pxq5j&TR;FN7bUGEv`m&}bPqS7Di^}xcA@L*b*64< zVlpA4)hq+OdaZ;(_Cm6r79~K7>kV-GepoxC{vR0<(-%4pkf&@lr!86dRnp-e)<)&E z0!5&LVE`MWt8FbnABwco&=;KTbJ{#+EbGwv$H=Wgat*O9bgxy#V1%8<1C zC0CLS$=}SzN%sb6Gz3}O!A8S+R)_{LE8-9IW7?(~1D0ugaQdbku92&BABR zpTPx&u!4_xk(!0%K=5OaThw%}tB)y1UXA??>0mS?DmF#U(s{j^?qC;5I#?B}o`%*Kbvc!=55Cv(TRM-Ndo(MrS1g%CSmgeg= zlV8@_5SS2}u9WS#d0k}cL%e(B&mi(bg+uIlc-TnQ7u&6>z{;0X@y>uwSWLYvvC&8o zzV>OaP>-*N!EqK1)fgd&pTAG--1?z58;m7e9iAUGuPm`ap_%w~?Xbrfm2-};fn8@X zcQw*eI~X7?YYoZ>&<9N1NgcOt(Xh=Fj;$EIGj!}04t3aq!|Ta>$uQP(3X?iBg`wtw z!W5(8UT3?x!qO5l@kr(*vCk7JHae~vDH*dLBV&82zFw_gJaCt+@b@^GeAn%(NN)}t z+!(zn3QL~p+tRD-E8oluQpsMe`PPB3 z;!LL21xffg9#}THYnIsTm8RdK8il?%4^T;Jp$36}6VE$x3s%9tofD6tGe=kk)2v8( z{vfH!_F^Iq(zQtbtz*!g!oO-9OqQMM`Rp@;MV()vWf@D^1=p4mE9@{!zJZ1AJWGyZ zz{+r|#I#myYR`+0`!tszAIQt8-<+c2|G>u=^(ojTzFR$!HQq&!3it^Zik?TY&2DVR z2x25tx87b@CQ}jvMrIb65+?KxmiWgQ=0@e$j-Ykf{q+^zxdShIkO{tF8}7jCM)bu^ zYLqtVsQHcWfa@bza&D&Tq4tPPTJ8^6SsU$3zi>3#&&tg?c^vOfFUneJ**_pbiKN}p znLOW#(eUy9uRoh7f6@N=g1x*g70$tU#=9#}q-IQu>@VSzV05CLmh;V2Vots(hE~i9u*S1Jo)P#J(kkH{K}?r^sHG2 zw|UG7$j7c-KvUe@8;~!xb)$M#CTc?aBi&PgmMC@eegU#I@ib71t;?Wo723GHjur85 z%?3bEGD>}U_Kjs1NUxcKWKN@?qOoBiEMPm&y`>&T!R~^DsKjpb)TW0IdJYE&{N>`a zAD@qdFMO#7gm;2D^z-5g?T(}hs7vp13lI2I!b5YVepR~h(ihN`fy@3=Aa~LRy7GGB z=@TDndtU7S(Vi0E0R;9={yAA%k{*+<7T0GHzNjxEX)!i^&@M1}g}h}JR5E+1N7}9k z)O>?PlEn$AaHZ@Y*u3OHHZq|dep5(lW@9EIep?%+K#n$B5e>xp2KnT=*HOtJ_EP+W zH^YgG{jK62Vm~z{;AvgivJw)PEQf;Dvl1(K{F|?r7Ttdy*FPh=G`%#6mb__2}}f%ya_b}P5uFecBzPDjLeKkqG2IdPo+v_sfRd4yb3 z0)Edw$ZB0AO)-z<c|0LlveN#pO$_L@A2JG5 zlZaQ>YGt7H@JL5~7*CV_zFBZ2lw5N`IvE$C5C7hG?vk9vcR?nBt7BuImg2W&Yi+H* zn&CLMlh`n#;*>Cj3vkPh_i$y;(m;9dS%iE%#%hbBIFhsf z=zyU5xA*M_OfQ3KA!n^lqigAYwT-?+{&)5oANT$I)5_Pv%emk8i}=PT^}(lm2dcDn zEs84N@i#1B6K!hRZ=w9Lp5rrq@I4#tyQk#%MRFq6v2W2^}WLZwo%DD($h&>0e$-p5FzQaxXgIuL-^8KFuKfHgx0qeAkHtYWqS;j7P5? zJ{`twLDLHBMX{2-&$?i($lY9fAbQ=XcU;5UW0YvsC%uMrBxWf2QzlOjreMUvM@ai> zS$qoVbtez5!xPb;-4iG=iQZ)2+4Wz6NykbyZ%M=}2eo=~a@xEjBV1O`reDbDH2{$` z5(1bBZ2GJyo>Q`M7$dV(exsXMxET6xVuKG|3g;-b8M%_6sdVrk{?XCgLUYoeSjAe6 z6!;M=1gTYqx<*z_xB#o8n`Jg-p>}YAHm$&<}ga)pKYs|mgAvL=-)hKOw2eNytZ9|iR6z&CZ->okpGxV9o8zP2#{zX`2RL`M0 ztbh%{KtqQhMGT%mG;)~~D&<14$p25{?R(@KTuBxh=1X<~)d&L&qzQ)(L`fDD3SX)Xb{=-#&N{5|IBbUqSzES(w?IxmdX{xVU($t096w?=_Yi7dDj}|JS>D!h?W9 doPmRY{7 Date: Sat, 18 Jun 2022 23:59:43 +0000 Subject: [PATCH 030/184] Update cargo lockfile to fix RUSTSEC-2022-0025, RUSTSEC-2022-0026 and RUSTSEC-2022-0027 (#3278) ## Issue Addressed Fixes [RUSTSEC-2022-0025](https://rustsec.org/advisories/RUSTSEC-2022-0025), [RUSTSEC-2022-0026](https://rustsec.org/advisories/RUSTSEC-2022-0026) and [RUSTSEC-2022-0027](https://rustsec.org/advisories/RUSTSEC-2022-0027) raised in [this test run](https://github.com/sigp/lighthouse/runs/6943343852?check_suite_focus=true) ## Proposed Changes a `cargo update` was enough ## Additional Info --- Cargo.lock | 684 +++++++++++++++++++++++++++-------------------------- 1 file changed, 344 insertions(+), 340 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec6a98fb0c..c147af0927 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,7 +111,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "once_cell", "version_check", ] @@ -141,15 +141,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27" +checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" [[package]] name = "arbitrary" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c38b6b6b79f671c25e1a3e785b7b82d7562ffc9cd3efdc98627e5668a2472490" +checksum = "25e0a02cf12f1b1f48b14cb7f8217b876d09992b39c816ffb3b1ba64dd979a87" dependencies = [ "derive_arbitrary", ] @@ -180,9 +180,9 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" dependencies = [ "proc-macro2", "quote", @@ -199,7 +199,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", ] [[package]] @@ -241,9 +241,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" +checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" dependencies = [ "addr2line", "cc", @@ -301,7 +301,7 @@ dependencies = [ "maplit", "merkle_proof", "operation_pool", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "proto_array", "rand 0.8.5", "rayon", @@ -472,13 +472,14 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f073f59a150a1dca74aab43d794ae5a7578d52bb1e73121e559f3ee3e6a837e" +checksum = "c521c26a784d5c4bcd98d483a7d3518376e9ff1efbcfa9e2d456ab8183752303" dependencies = [ "cc", "glob", "threadpool", + "which", "zeroize", ] @@ -538,9 +539,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" [[package]] name = "byte-slice-cast" @@ -593,7 +594,7 @@ dependencies = [ "eth2_ssz_derive", "eth2_ssz_types", "ethereum-types 0.12.1", - "quickcheck 0.9.2", + "quickcheck", "quickcheck_macros", "smallvec", "tree_hash", @@ -663,7 +664,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.43", + "time 0.1.44", "winapi", ] @@ -678,9 +679,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" +checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" dependencies = [ "glob", "libc", @@ -739,7 +740,7 @@ dependencies = [ "lighthouse_network", "monitoring_api", "network", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sensitive_url", "serde", "serde_derive", @@ -889,9 +890,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" dependencies = [ "cfg-if", "crossbeam-utils", @@ -910,26 +911,26 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "lazy_static", "memoffset", + "once_cell", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" dependencies = [ "cfg-if", - "lazy_static", + "once_cell", ] [[package]] @@ -1013,11 +1014,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.1" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19c6cedffdc8c03a3346d723eb20bd85a13362bb96dc2ac000842c6381ec7bf" +checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" dependencies = [ - "nix", + "nix 0.24.1", "winapi", ] @@ -1049,9 +1050,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.13.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e92cb285610dd935f60ee8b4d62dd1988bd12b7ea50579bd6a138201525318e" +checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ "darling_core", "darling_macro", @@ -1059,9 +1060,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.13.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c29e95ab498b18131ea460b2c0baa18cbf041231d122b0b7bfebef8c8e88989" +checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", @@ -1073,9 +1074,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.13.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b21dd6b221dd547528bd6fb15f1a3b7ab03b9a06f76bff288a8c629bcfbe7f0e" +checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", "quote", @@ -1178,9 +1179,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e23c06c035dac87bd802d98f368df73a7f2cb05a66ffbd1f377e821fac4af9" +checksum = "8728db27dd9033a7456655aaeb35fde74425d0f130b4cb18a19171ef38a1b454" dependencies = [ "proc-macro2", "quote", @@ -1296,7 +1297,7 @@ dependencies = [ "smallvec", "tokio", "tokio-stream", - "tokio-util 0.6.9", + "tokio-util 0.6.10", "tracing", "tracing-subscriber", "uint", @@ -1335,9 +1336,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d5c4b5e5959dc2c2b89918d8e2cc40fcdd623cef026ed09d2f0ee05199dc8e4" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ "signature", ] @@ -1417,7 +1418,7 @@ dependencies = [ "base16ct", "crypto-bigint", "der 0.5.1", - "ff 0.11.0", + "ff 0.11.1", "generic-array", "group 0.11.0", "rand_core 0.6.3", @@ -1428,9 +1429,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ "cfg-if", ] @@ -1535,7 +1536,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "merkle_proof", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "reqwest", "sensitive_url", "serde", @@ -1762,9 +1763,9 @@ dependencies = [ [[package]] name = "ethabi" -version = "17.0.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69517146dfab88e9238c00c724fd8e277951c3cc6f22b016d72f422a832213e" +checksum = "f186de076b3e77b8e6d73c99d1b52edc2a229e604f4b5eb6992c06c11d79d537" dependencies = [ "ethereum-types 0.13.1", "hex", @@ -1839,7 +1840,7 @@ dependencies = [ "arrayvec", "bytes", "elliptic-curve 0.11.12", - "ethabi 17.0.0", + "ethabi 17.1.0", "generic-array", "hex", "k256 0.10.4", @@ -1887,7 +1888,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.5", "reqwest", "sensitive_url", @@ -1955,9 +1956,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" dependencies = [ "rand_core 0.6.3", "subtle", @@ -2007,13 +2008,11 @@ checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if", "crc32fast", - "libc", "libz-sys", "miniz_oxide", ] @@ -2150,7 +2149,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01fe9932a224b72b45336d96040aa86386d674a31d0af27d800ea7bc8ca97fe" dependencies = [ "futures-io", - "rustls 0.20.4", + "rustls 0.20.6", "webpki 0.22.0", ] @@ -2185,7 +2184,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", "pin-utils", "slab", ] @@ -2236,13 +2235,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -2306,7 +2305,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" dependencies = [ - "ff 0.11.0", + "ff 0.11.1", "rand_core 0.6.3", "subtle", ] @@ -2326,7 +2325,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.1", + "tokio-util 0.7.3", "tracing", ] @@ -2345,13 +2344,19 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" + [[package]] name = "hashlink" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" dependencies = [ - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -2360,7 +2365,7 @@ version = "0.2.0" dependencies = [ "futures", "tokio", - "tokio-util 0.6.9", + "tokio-util 0.6.10", ] [[package]] @@ -2478,24 +2483,24 @@ dependencies = [ [[package]] name = "http" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa 1.0.2", ] [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", ] [[package]] @@ -2517,7 +2522,7 @@ dependencies = [ "lighthouse_version", "logging", "network", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "safe_arith", "sensitive_url", "serde", @@ -2557,9 +2562,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" +checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" [[package]] name = "httpdate" @@ -2575,9 +2580,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" dependencies = [ "bytes", "futures-channel", @@ -2588,8 +2593,8 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.1", - "pin-project-lite 0.2.8", + "itoa 1.0.2", + "pin-project-lite 0.2.9", "socket2", "tokio", "tower-service", @@ -2686,7 +2691,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.1.2", + "parity-scale-codec 3.1.5", ] [[package]] @@ -2720,12 +2725,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" dependencies = [ "autocfg 1.1.0", - "hashbrown", + "hashbrown 0.12.1", ] [[package]] @@ -2769,9 +2774,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e70ee094dc02fd9c13fdad4940090f22dbd6ac7c9e7094a46cf0232a50bc7c" +checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" @@ -2790,15 +2795,15 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" dependencies = [ "wasm-bindgen", ] @@ -2820,9 +2825,9 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.0.1" +version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "012bb02250fdd38faa5feee63235f7a459974440b9b57593822414c31f92839e" +checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" dependencies = [ "base64", "pem", @@ -2859,9 +2864,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" [[package]] name = "lazy_static" @@ -2936,9 +2941,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.121" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libflate" @@ -2988,7 +2993,7 @@ dependencies = [ "indexmap", "libc", "mdbx-sys", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "thiserror", ] @@ -3001,7 +3006,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.6", + "getrandom 0.2.7", "instant", "lazy_static", "libp2p-core 0.33.0", @@ -3018,7 +3023,7 @@ dependencies = [ "libp2p-websocket", "libp2p-yamux", "multiaddr 0.14.0", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project 1.0.10", "rand 0.7.3", "smallvec", @@ -3079,7 +3084,7 @@ dependencies = [ "multiaddr 0.14.0", "multihash 0.16.2", "multistream-select 0.11.0", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project 1.0.10", "prost 0.10.4", "prost-build 0.10.4", @@ -3103,16 +3108,16 @@ dependencies = [ "futures", "libp2p-core 0.33.0", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "smallvec", "trust-dns-resolver", ] [[package]] name = "libp2p-gossipsub" -version = "0.38.0" +version = "0.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9be947d8cea8e6b469201314619395826896d2c051053c3723910ba98e68e04" +checksum = "43e064ba4d7832e01c738626c6b274ae100baba05f5ffcc7b265c2a3ed398108" dependencies = [ "asynchronous-codec", "base64", @@ -3182,7 +3187,7 @@ dependencies = [ "libp2p-core 0.33.0", "log", "nohash-hasher", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "smallvec", "unsigned-varint 0.7.1", @@ -3249,9 +3254,9 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf2fe8c80b43561355f4d51875273b5b6dfbac37952e8f64b1270769305c9d7" +checksum = "4f693c8c68213034d472cbb93a379c63f4f307d97c06f1c41e4985de481687a5" dependencies = [ "quote", "syn", @@ -3285,7 +3290,7 @@ dependencies = [ "futures-rustls", "libp2p-core 0.33.0", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "quicksink", "rw-stream-sink 0.3.0", "soketto", @@ -3301,7 +3306,7 @@ checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" dependencies = [ "futures", "libp2p-core 0.33.0", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "thiserror", "yamux", ] @@ -3367,9 +3372,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f35facd4a5673cb5a48822be2be1d4236c1c99cb4113cab7061ac720d5bf859" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "pkg-config", @@ -3443,7 +3448,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "prometheus-client", "rand 0.8.5", "regex", @@ -3462,7 +3467,7 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-io-timeout", - "tokio-util 0.6.9", + "tokio-util 0.6.10", "types", "unsigned-varint 0.6.0", "unused_port", @@ -3504,9 +3509,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] @@ -3524,11 +3529,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32613e41de4c47ab04970c348ca7ae7382cf116625755af070b008a15516a889" +checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" dependencies = [ - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -3563,7 +3568,7 @@ dependencies = [ "lazy_static", "libc", "lighthouse_metrics", - "parking_lot 0.12.0", + "parking_lot 0.12.1", ] [[package]] @@ -3607,9 +3612,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -3627,7 +3632,7 @@ dependencies = [ "eth2_hashing", "ethereum-types 0.12.1", "lazy_static", - "quickcheck 0.9.2", + "quickcheck", "quickcheck_macros", "safe_arith", ] @@ -3668,35 +3673,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ "adler", - "autocfg 1.1.0", ] [[package]] name = "mio" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" dependencies = [ "libc", "log", - "miow", - "ntapi", "wasi 0.11.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "windows-sys", ] [[package]] @@ -3918,7 +3911,7 @@ dependencies = [ "task_executor", "tokio", "tokio-stream", - "tokio-util 0.6.9", + "tokio-util 0.6.10", "types", ] @@ -3935,6 +3928,17 @@ dependencies = [ "memoffset", ] +[[package]] +name = "nix" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +dependencies = [ + "bitflags", + "cfg-if", + "libc", +] + [[package]] name = "node_test_rig" version = "0.2.0" @@ -3971,15 +3975,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "ntapi" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" -dependencies = [ - "winapi", -] - [[package]] name = "num-bigint" version = "0.4.3" @@ -4012,9 +4007,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg 1.1.0", "num-traits", @@ -4022,9 +4017,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg 1.1.0", "num-integer", @@ -4033,9 +4028,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg 1.1.0", ] @@ -4052,27 +4047,27 @@ dependencies = [ [[package]] name = "num_threads" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba1801fb138d8e85e11d0fc70baf4fe1cdfffda7c6cd34a854905df588e5ed0" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "object" -version = "0.27.1" +version = "0.28.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" +checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] name = "oorandom" @@ -4088,18 +4083,30 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.38" +version = "0.10.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" +checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" dependencies = [ "bitflags", "cfg-if", "foreign-types", "libc", "once_cell", + "openssl-macros", "openssl-sys", ] +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl-probe" version = "0.1.5" @@ -4108,18 +4115,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.18.0+1.1.1n" +version = "111.20.0+1.1.1o" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7897a926e1e8d00219127dc020130eca4292e5ca666dd592480d72c3eca2ff6c" +checksum = "92892c4f87d56e376e469ace79f1128fdaded07646ddf73aa0be4706ff712dec" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.72" +version = "0.9.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" +checksum = "835363342df5fba8354c5b453325b110ffd54044e588c539cf2f20a8014e4cb1" dependencies = [ "autocfg 1.1.0", "cc", @@ -4140,7 +4147,7 @@ dependencies = [ "itertools", "lazy_static", "lighthouse_metrics", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rayon", "serde", "serde_derive", @@ -4174,15 +4181,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.1.2" +version = "3.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" +checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" dependencies = [ "arrayvec", "bitvec 1.0.0", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.1.2", + "parity-scale-codec-derive 3.1.3", "serde", ] @@ -4200,9 +4207,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.2" +version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45ed1f39709f5a89338fab50e59816b2e8815f5bb58276e7ddf9afd495f73f8" +checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -4223,12 +4230,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.2", + "parking_lot_core 0.9.3", ] [[package]] @@ -4247,9 +4254,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "995f667a6c822200b0433ac218e05582f0e2efa1b922a3fd2fbaadc5f87bab37" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ "cfg-if", "libc", @@ -4314,9 +4321,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" +checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" dependencies = [ "fixedbitset", "indexmap", @@ -4370,9 +4377,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -4538,11 +4545,11 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -4559,15 +4566,15 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" +checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "protobuf", "thiserror", ] @@ -4579,7 +4586,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" dependencies = [ "dtoa", - "itoa 1.0.1", + "itoa 1.0.2", "owning_ref", "prometheus-client-derive-text-encode", ] @@ -4745,7 +4752,7 @@ dependencies = [ "derive_more", "glob", "mach", - "nix", + "nix 0.23.1", "num_cpus", "once_cell", "platforms", @@ -4771,15 +4778,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "quickcheck" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" -dependencies = [ - "rand 0.8.5", -] - [[package]] name = "quickcheck_macros" version = "0.9.1" @@ -4804,9 +4802,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" +checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ "proc-macro2", ] @@ -4903,7 +4901,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -4926,9 +4924,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ "autocfg 1.1.0", "crossbeam-deque", @@ -4938,14 +4936,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] @@ -4964,16 +4961,16 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "redox_syscall", "thiserror", ] [[package]] name = "regex" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" dependencies = [ "aho-corasick", "memchr", @@ -4991,9 +4988,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" [[package]] name = "remove_dir_all" @@ -5006,9 +5003,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.10" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb" +checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" dependencies = [ "base64", "bytes", @@ -5027,13 +5024,14 @@ dependencies = [ "mime", "native-tls", "percent-encoding", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-util 0.6.9", + "tokio-util 0.7.3", + "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -5171,7 +5169,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.7", + "semver 1.0.10", ] [[package]] @@ -5189,9 +5187,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.4" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" +checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", @@ -5229,9 +5227,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" [[package]] name = "safe_arith" @@ -5263,21 +5261,21 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" dependencies = [ - "parking_lot 0.11.2", + "parking_lot 0.12.1", ] [[package]] @@ -5398,9 +5396,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.7" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" +checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c" [[package]] name = "semver-parser" @@ -5427,9 +5425,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" dependencies = [ "serde_derive", ] @@ -5456,9 +5454,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" dependencies = [ "proc-macro2", "quote", @@ -5467,20 +5465,20 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ - "itoa 1.0.1", + "itoa 1.0.2", "ryu", "serde", ] [[package]] name = "serde_repr" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98d0516900518c29efa217c298fa1f4e6c6ffc85ae29fd7f4ee48f176e1a9ed5" +checksum = "a2ad84e47328a31223de7fed7a4f5087f2d6ddfe586cf3ca25b7a165bc0a5aed" dependencies = [ "proc-macro2", "quote", @@ -5494,16 +5492,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa 1.0.2", "ryu", "serde", ] [[package]] name = "serde_yaml" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" +checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" dependencies = [ "indexmap", "ryu", @@ -5617,9 +5615,9 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a762b1c38b9b990c694b9c2f8abe3372ce6a9ceaae6bca39cfc46e054f45745" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", @@ -5637,7 +5635,7 @@ dependencies = [ "eth1_test_rig", "futures", "node_test_rig", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rayon", "sensitive_url", "tokio", @@ -5666,7 +5664,7 @@ dependencies = [ "logging", "lru", "maplit", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.5", "rayon", "safe_arith", @@ -5821,7 +5819,7 @@ version = "0.2.0" dependencies = [ "lazy_static", "lighthouse_metrics", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "types", ] @@ -5966,7 +5964,7 @@ dependencies = [ "leveldb", "lighthouse_metrics", "lru", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "serde", "serde_derive", "slog", @@ -5991,9 +5989,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96acfc1b70604b8b2f1ffa4c57e59176c7dbb05d556c71ecd2f5498a1dee7f8" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] @@ -6042,13 +6040,13 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.90" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704df27628939572cd88d33f171cd6f896f4eaca85252c6e0a72d8d8287ee86f" +checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -6162,18 +6160,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", @@ -6200,11 +6198,12 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] @@ -6214,10 +6213,9 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" dependencies = [ - "itoa 1.0.1", + "itoa 1.0.2", "libc", "num_threads", - "quickcheck 1.0.3", "time-macros", ] @@ -6278,9 +6276,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -6293,9 +6291,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.17.0" +version = "1.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" +checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" dependencies = [ "bytes", "libc", @@ -6303,8 +6301,8 @@ dependencies = [ "mio", "num_cpus", "once_cell", - "parking_lot 0.12.0", - "pin-project-lite 0.2.8", + "parking_lot 0.12.1", + "pin-project-lite 0.2.9", "signal-hook-registry", "socket2", "tokio-macros", @@ -6317,15 +6315,15 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", "tokio", ] [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -6355,14 +6353,14 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" dependencies = [ "futures-core", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.6.9", + "tokio-util 0.7.3", ] [[package]] @@ -6380,67 +6378,67 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" +checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", "slab", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", "futures-sink", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", "tokio", "tracing", ] [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.32" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.9", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ "proc-macro2", "quote", @@ -6449,19 +6447,19 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.24" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90442985ee2f57c9e1b548ee72ae842f4a9a20e3f417cc38dbc5dc684d9bb4ee" +checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] [[package]] name = "tracing-log" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", @@ -6470,9 +6468,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9df98b037d039d03400d9dd06b0f8ce05486b5f25e9a2d7d36196e142ebbc52" +checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" dependencies = [ "ansi_term", "lazy_static", @@ -6566,7 +6564,7 @@ dependencies = [ "lazy_static", "log", "lru-cache", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "resolv-conf", "smallvec", "thiserror", @@ -6638,7 +6636,7 @@ dependencies = [ "itertools", "lazy_static", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.5", "rand_xorshift", "rayon", @@ -6696,9 +6694,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + +[[package]] +name = "unicode-ident" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unicode-normalization" @@ -6723,9 +6727,9 @@ checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "universal-hash" @@ -6744,7 +6748,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" dependencies = [ "bytes", - "tokio-util 0.6.9", + "tokio-util 0.6.10", ] [[package]] @@ -6791,7 +6795,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "serde", ] @@ -6824,7 +6828,7 @@ dependencies = [ "lockfile", "logging", "monitoring_api", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.5", "reqwest", "ring", @@ -6940,7 +6944,7 @@ dependencies = [ "tokio-rustls", "tokio-stream", "tokio-tungstenite", - "tokio-util 0.6.9", + "tokio-util 0.6.10", "tower-service", "tracing", ] @@ -6971,9 +6975,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" @@ -6983,9 +6987,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6993,9 +6997,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" dependencies = [ "bumpalo", "lazy_static", @@ -7008,9 +7012,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" dependencies = [ "cfg-if", "js-sys", @@ -7020,9 +7024,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7030,9 +7034,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2", "quote", @@ -7043,15 +7047,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" [[package]] name = "wasm-bindgen-test" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c8d417d87eefa0087e62e3c75ad086be39433449e2961add9a5d9ce5acc2f1" +checksum = "68b30cf2cba841a812f035c40c50f53eb9c56181192a9dd2c71b65e6a87a05ba" dependencies = [ "console_error_panic_hook", "js-sys", @@ -7063,9 +7067,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0e560d44db5e73b69a9757a15512fe7e1ef93ed2061c928871a4025798293dd" +checksum = "88ad594bf33e73cafcac2ae9062fc119d4f75f9c77e25022f91c9a64bd5b6463" dependencies = [ "proc-macro2", "quote", @@ -7088,9 +7092,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" dependencies = [ "js-sys", "wasm-bindgen", @@ -7116,7 +7120,7 @@ dependencies = [ "jsonrpc-core", "log", "once_cell", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project 1.0.10", "reqwest", "rlp", @@ -7126,7 +7130,7 @@ dependencies = [ "soketto", "tiny-keccak", "tokio", - "tokio-util 0.6.9", + "tokio-util 0.6.10", "url", "web3-async-native-tls", ] @@ -7190,9 +7194,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" dependencies = [ "webpki 0.22.0", ] @@ -7265,9 +7269,9 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5acdd78cb4ba54c0045ac14f62d8f94a03d10047904ae2a40afa1e99d8f70825" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ "windows_aarch64_msvc", "windows_i686_gnu", @@ -7278,33 +7282,33 @@ dependencies = [ [[package]] name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_i686_gnu" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] name = "winreg" @@ -7383,7 +7387,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.5", "static_assertions", ] @@ -7420,5 +7424,5 @@ dependencies = [ "crc32fast", "flate2", "thiserror", - "time 0.1.43", + "time 0.1.44", ] From f4287197619b6fcf280ce34e9d778573f641e279 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sun, 19 Jun 2022 23:13:40 +0000 Subject: [PATCH 031/184] Do not penalize peers on execution layer offline errors (#3258) ## Issue Addressed Partly resolves https://github.com/sigp/lighthouse/issues/3032 ## Proposed Changes Extracts some of the functionality of #3094 into a separate PR as the original PR requires a bit more work. Do not unnecessarily penalize peers when we fail to validate received execution payloads because our execution layer is offline. --- .../network/src/beacon_processor/mod.rs | 4 +- .../beacon_processor/worker/gossip_methods.rs | 10 ++++ .../src/beacon_processor/worker/mod.rs | 2 +- .../beacon_processor/worker/sync_methods.rs | 56 +++++++++++++++++++ .../network/src/sync/backfill_sync/mod.rs | 4 +- .../network/src/sync/block_lookups/mod.rs | 49 ++++++++++++++-- beacon_node/network/src/sync/manager.rs | 3 +- .../network/src/sync/range_sync/chain.rs | 4 +- 8 files changed, 121 insertions(+), 11 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 4aa7c76924..6d7375cca7 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -75,7 +75,9 @@ mod work_reprocessing_queue; mod worker; use crate::beacon_processor::work_reprocessing_queue::QueuedBlock; -pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; +pub use worker::{ + ChainSegmentProcessId, FailureMode, GossipAggregatePackage, GossipAttestationPackage, +}; /// The maximum size of the channel for work events to the `BeaconProcessor`. /// diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index b367f7f6d2..aa01841106 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -943,6 +943,16 @@ impl Worker { ); self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); } + Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) + | Err( + e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection), + ) => { + debug!( + self.log, + "Failed to verify execution payload"; + "error" => %e + ); + } other => { debug!( self.log, diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index f907c49b7d..04147245ea 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -10,7 +10,7 @@ mod rpc_methods; mod sync_methods; pub use gossip_methods::{GossipAggregatePackage, GossipAttestationPackage}; -pub use sync_methods::ChainSegmentProcessId; +pub use sync_methods::{ChainSegmentProcessId, FailureMode}; pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 943ee9cdaf..04ed1ff608 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -6,6 +6,7 @@ use crate::beacon_processor::DuplicateCache; use crate::metrics; use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; +use beacon_chain::ExecutionPayloadError; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; @@ -31,6 +32,15 @@ struct ChainSegmentFailed { message: String, /// Used to penalize peers. peer_action: Option, + /// Failure mode + mode: FailureMode, +} + +/// Represents if a block processing failure was on the consensus or execution side. +#[derive(Debug)] +pub enum FailureMode { + ExecutionLayer { pause_sync: bool }, + ConsensusLayer, } impl Worker { @@ -128,6 +138,7 @@ impl Worker { BatchProcessResult::Failed { imported_blocks: imported_blocks > 0, peer_action: e.peer_action, + mode: e.mode, } } } @@ -158,6 +169,7 @@ impl Worker { BatchProcessResult::Failed { imported_blocks: false, peer_action: e.peer_action, + mode: e.mode, } } } @@ -177,6 +189,7 @@ impl Worker { BatchProcessResult::Failed { imported_blocks: imported_blocks > 0, peer_action: e.peer_action, + mode: e.mode, } } (imported_blocks, Ok(_)) => { @@ -257,6 +270,7 @@ impl Worker { message: String::from("mismatched_block_root"), // The peer is faulty if they send blocks with bad roots. peer_action: Some(PeerAction::LowToleranceError), + mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::InvalidSignature @@ -271,6 +285,7 @@ impl Worker { message: "invalid_signature".into(), // The peer is faulty if they bad signatures. peer_action: Some(PeerAction::LowToleranceError), + mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::ValidatorPubkeyCacheTimeout => { @@ -284,6 +299,7 @@ impl Worker { message: "pubkey_cache_timeout".into(), // This is an internal error, do not penalize the peer. peer_action: None, + mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::NoAnchorInfo => { @@ -294,6 +310,7 @@ impl Worker { // There is no need to do a historical sync, this is not a fault of // the peer. peer_action: None, + mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::IndexOutOfBounds => { @@ -306,6 +323,7 @@ impl Worker { message: String::from("logic_error"), // This should never occur, don't penalize the peer. peer_action: None, + mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::BlockOutOfRange { .. } => { @@ -318,6 +336,7 @@ impl Worker { message: String::from("unexpected_error"), // This should never occur, don't penalize the peer. peer_action: None, + mode: FailureMode::ConsensusLayer, } } }, @@ -327,6 +346,7 @@ impl Worker { message: format!("{:?}", other), // This is an internal error, don't penalize the peer. peer_action: None, + mode: FailureMode::ConsensusLayer, } } }; @@ -365,6 +385,7 @@ impl Worker { message: format!("Block has an unknown parent: {}", block.parent_root()), // Peers are faulty if they send non-sequential blocks. peer_action: Some(PeerAction::LowToleranceError), + mode: FailureMode::ConsensusLayer, }) } BlockError::BlockIsAlreadyKnown => { @@ -402,6 +423,7 @@ impl Worker { ), // Peers are faulty if they send blocks from the future. peer_action: Some(PeerAction::LowToleranceError), + mode: FailureMode::ConsensusLayer, }) } BlockError::WouldRevertFinalizedSlot { .. } => { @@ -423,8 +445,41 @@ impl Worker { message: format!("Internal error whilst processing block: {:?}", e), // Do not penalize peers for internal errors. peer_action: None, + mode: FailureMode::ConsensusLayer, }) } + BlockError::ExecutionPayloadError(e) => match &e { + ExecutionPayloadError::NoExecutionConnection { .. } + | ExecutionPayloadError::RequestFailed { .. } => { + // These errors indicate an issue with the EL and not the `ChainSegment`. + // Pause the syncing while the EL recovers + debug!(self.log, + "Execution layer verification failed"; + "outcome" => "pausing sync", + "err" => ?e + ); + Err(ChainSegmentFailed { + message: format!("Execution layer offline. Reason: {:?}", e), + // Do not penalize peers for internal errors. + peer_action: None, + mode: FailureMode::ExecutionLayer { pause_sync: true }, + }) + } + err => { + debug!(self.log, + "Invalid execution payload"; + "error" => ?err + ); + Err(ChainSegmentFailed { + message: format!( + "Peer sent a block containing invalid execution payload. Reason: {:?}", + err + ), + peer_action: Some(PeerAction::LowToleranceError), + mode: FailureMode::ExecutionLayer { pause_sync: false }, + }) + } + }, other => { debug!( self.log, "Invalid block received"; @@ -436,6 +491,7 @@ impl Worker { message: format!("Peer sent invalid block. Reason: {:?}", other), // Do not penalize peers for internal errors. peer_action: None, + mode: FailureMode::ConsensusLayer, }) } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index be750e25f0..d6bb802a21 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -8,7 +8,7 @@ //! If a batch fails, the backfill sync cannot progress. In this scenario, we mark the backfill //! sync as failed, log an error and attempt to retry once a new peer joins the node. -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::beacon_processor::{ChainSegmentProcessId, FailureMode, WorkEvent as BeaconWorkEvent}; use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::network_context::SyncNetworkContext; use crate::sync::range_sync::{BatchConfig, BatchId, BatchInfo, BatchProcessingResult, BatchState}; @@ -554,6 +554,7 @@ impl BackFillSync { imported_blocks: false, // The beacon processor queue is full, no need to penalize the peer. peer_action: None, + mode: FailureMode::ConsensusLayer, }, ) } else { @@ -638,6 +639,7 @@ impl BackFillSync { BatchProcessResult::Failed { imported_blocks, peer_action, + mode: _, } => { let batch = match self.batches.get_mut(&batch_id) { Some(v) => v, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index ece923ef59..2770171be9 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,7 +1,7 @@ use std::collections::hash_map::Entry; use std::time::Duration; -use beacon_chain::{BeaconChainTypes, BlockError}; +use beacon_chain::{BeaconChainTypes, BlockError, ExecutionPayloadError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; @@ -10,7 +10,7 @@ use smallvec::SmallVec; use store::{Hash256, SignedBeaconBlock}; use tokio::sync::mpsc; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; +use crate::beacon_processor::{ChainSegmentProcessId, FailureMode, WorkEvent}; use crate::metrics; use self::{ @@ -420,6 +420,20 @@ impl BlockLookups { BlockError::ParentUnknown(block) => { self.search_parent(block, peer_id, cx); } + + e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_)) + | e @ BlockError::ExecutionPayloadError( + ExecutionPayloadError::NoExecutionConnection, + ) => { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Single block lookup failed. Execution layer is offline"; + "root" => %root, + "error" => ?e + ); + } other => { warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); cx.report_peer( @@ -506,6 +520,19 @@ impl BlockLookups { } } } + Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) + | Err( + e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection), + ) => { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Parent lookup failed. Execution layer is offline"; + "chain_hash" => %chain_hash, + "error" => ?e + ); + } Err(outcome) => { // all else we consider the chain a failure and downvote the peer that sent // us the last block @@ -561,11 +588,21 @@ impl BlockLookups { BatchProcessResult::Failed { imported_blocks: _, peer_action, + mode, } => { - self.failed_chains.insert(parent_lookup.chain_hash()); - if let Some(peer_action) = peer_action { - for &peer_id in parent_lookup.used_peers() { - cx.report_peer(peer_id, peer_action, "parent_chain_failure") + if let FailureMode::ExecutionLayer { pause_sync: _ } = mode { + debug!( + self.log, + "Chain segment processing failed. Execution layer is offline"; + "chain_hash" => %chain_hash, + "error" => ?mode + ); + } else { + self.failed_chains.insert(parent_lookup.chain_hash()); + if let Some(peer_action) = peer_action { + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, peer_action, "parent_chain_failure") + } } } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 0003db6ab0..311fbf67c4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,7 @@ use super::block_lookups::BlockLookups; use super::network_context::SyncNetworkContext; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::beacon_processor::{ChainSegmentProcessId, FailureMode, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; @@ -137,6 +137,7 @@ pub enum BatchProcessResult { Failed { imported_blocks: bool, peer_action: Option, + mode: FailureMode, }, } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 88837d0e12..0f5d63ea6d 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,6 +1,6 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; -use crate::beacon_processor::ChainSegmentProcessId; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; +use crate::beacon_processor::{ChainSegmentProcessId, FailureMode}; use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; @@ -320,6 +320,7 @@ impl SyncingChain { &BatchProcessResult::Failed { imported_blocks: false, peer_action: None, + mode: FailureMode::ConsensusLayer, }, ) } else { @@ -499,6 +500,7 @@ impl SyncingChain { BatchProcessResult::Failed { imported_blocks, peer_action, + mode: _, } => { let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { RemoveChain::WrongChainState(format!( From a9e158663b16e5c097e809d0e6c021cd6f4b83a8 Mon Sep 17 00:00:00 2001 From: eklm Date: Mon, 20 Jun 2022 04:06:30 +0000 Subject: [PATCH 032/184] Fix validator_monitor_prev_epoch_ metrics (#2911) ## Issue Addressed #2820 ## Proposed Changes The problem is that validator_monitor_prev_epoch metrics are updated only if there is EpochSummary present in summaries map for the previous epoch and it is not the case for the offline validator. Ensure that EpochSummary is inserted into summaries map also for the offline validators. --- beacon_node/beacon_chain/src/validator_monitor.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 6292409d7f..06734d3e6e 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -231,6 +231,11 @@ impl MonitoredValidator { } } } + + /// Ensure epoch summary is added to the summaries map + fn touch_epoch_summary(&self, epoch: Epoch) { + self.with_epoch_summary(epoch, |_| {}); + } } /// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P @@ -306,6 +311,7 @@ impl ValidatorMonitor { // Update metrics for individual validators. for monitored_validator in self.validators.values() { if let Some(i) = monitored_validator.index { + monitored_validator.touch_epoch_summary(current_epoch); let i = i as usize; let id = &monitored_validator.id; From efebf712dd2ff272e75dd3d59c56a8219d34a5a1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Jun 2022 23:20:29 +0000 Subject: [PATCH 033/184] Avoid cloning snapshots during sync (#3271) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/2944 ## Proposed Changes Remove snapshots from the cache during sync rather than cloning them. This reduces unnecessary cloning and memory fragmentation during sync. ## Additional Info This PR relies on the fact that the `block_delay` cache is not populated for blocks from sync. Relying on block delay may have the side effect that a change in `block_delay` calculation could lead to: a) more clones, if block delays are added for syncing blocks or b) less clones, if blocks near the head are erroneously provided without a `block_delay`. Case (a) would be a regression to the current status quo, and (b) is low-risk given we know that the snapshot cache is current susceptible to misses (hence `tree-states`). --- beacon_node/beacon_chain/src/snapshot_cache.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 5585581362..d5b41366cc 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -253,12 +253,11 @@ impl SnapshotCache { .position(|snapshot| snapshot.beacon_block_root == block_root) .map(|i| { if let Some(cache) = self.snapshots.get(i) { - if block_slot > cache.beacon_block.slot() + 1 { - return (cache.clone_as_pre_state(), true); - } + // Avoid cloning the block during sync (when the `block_delay` is `None`). if let Some(delay) = block_delay { if delay >= MINIMUM_BLOCK_DELAY_FOR_CLONE && delay <= Duration::from_secs(spec.seconds_per_slot) * 4 + || block_slot > cache.beacon_block.slot() + 1 { return (cache.clone_as_pre_state(), true); } From 8faaa35b58599b64f07befeb8fd676b25518f2f9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Jun 2022 23:20:30 +0000 Subject: [PATCH 034/184] Enable malloc metrics for the VC (#3279) ## Issue Addressed Following up from https://github.com/sigp/lighthouse/pull/3223#issuecomment-1158718102, it has been observed that the validator client uses vastly more memory in some compilation configurations than others. Compiling with Cross and then putting the binary into an Ubuntu 22.04 image seems to use 3x more memory than compiling with Cargo directly on Debian bullseye. ## Proposed Changes Enable malloc metrics for the validator client. This will hopefully allow us to see the difference between the two compilation configs and compare heap fragmentation. This PR doesn't enable malloc tuning for the VC because it was found to perform significantly worse. The `--disable-malloc-tuning` flag is repurposed to just disable the metrics. --- Cargo.lock | 1 + lighthouse/tests/validator_client.rs | 11 ++++++++--- validator_client/Cargo.toml | 1 + validator_client/src/config.rs | 7 ++++++- validator_client/src/http_metrics/metrics.rs | 7 +++++++ validator_client/src/http_metrics/mod.rs | 2 ++ 6 files changed, 25 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c147af0927..de385f22cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6827,6 +6827,7 @@ dependencies = [ "lighthouse_version", "lockfile", "logging", + "malloc_utils", "monitoring_api", "parking_lot 0.12.1", "rand 0.8.5", diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index c14f5d27ba..61c239f86d 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -426,9 +426,14 @@ fn metrics_allow_origin_all_flag() { pub fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) - // Simply ensure that the node can start with this flag, it's very difficult to observe the - // effects of it. - .run(); + .run() + .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, false)); +} +#[test] +pub fn malloc_tuning_default() { + CommandLineTest::new() + .run() + .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, true)); } #[test] fn doppelganger_protection_flag() { diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 9833c046f5..8a3c8303a9 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -58,3 +58,4 @@ sensitive_url = { path = "../common/sensitive_url" } task_executor = { path = "../common/task_executor" } reqwest = { version = "0.11.0", features = ["json","stream"] } url = "2.2.2" +malloc_utils = { path = "../common/malloc_utils" } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 45e10e39e8..e56e64f5ad 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -2,7 +2,7 @@ use crate::fee_recipient_file::FeeRecipientFile; use crate::graffiti_file::GraffitiFile; use crate::{http_api, http_metrics}; use clap::ArgMatches; -use clap_utils::{parse_optional, parse_required}; +use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_optional, parse_required}; use directory::{ get_network_dir, DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, DEFAULT_VALIDATOR_DIR, @@ -293,6 +293,11 @@ impl Config { config.http_metrics.allow_origin = Some(allow_origin.to_string()); } + + if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + config.http_metrics.allocator_metrics_enabled = false; + } + /* * Explorer metrics */ diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 56c1299b3f..f405f1a2b3 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -1,4 +1,5 @@ use super::Context; +use malloc_utils::scrape_allocator_metrics; use slot_clock::SlotClock; use std::time::{SystemTime, UNIX_EPOCH}; use types::EthSpec; @@ -206,6 +207,12 @@ pub fn gather_prometheus_metrics( } } + // It's important to ensure these metrics are explicitly enabled in the case that users aren't + // using glibc and this function causes panics. + if ctx.config.allocator_metrics_enabled { + scrape_allocator_metrics(); + } + warp_utils::metrics::scrape_health_metrics(); encoder diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index 51a2d3f8a5..c30d603447 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -56,6 +56,7 @@ pub struct Config { pub listen_addr: IpAddr, pub listen_port: u16, pub allow_origin: Option, + pub allocator_metrics_enabled: bool, } impl Default for Config { @@ -65,6 +66,7 @@ impl Default for Config { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 5064, allow_origin: None, + allocator_metrics_enabled: true, } } } From 2063c0fa0de95e257be0ee99025855a72d03c469 Mon Sep 17 00:00:00 2001 From: Divma Date: Wed, 22 Jun 2022 14:27:16 +0000 Subject: [PATCH 035/184] Initial work to remove engines fallback from the `execution_layer` crate (#3257) ## Issue Addressed Part of #3160 ## Proposed Changes Use only the first url given in the execution engine, if more than one is provided log it. This change only moves having multiple engines to one. The amount of code cleanup that can and should be done forward is not small and would interfere with ongoing PRs. I'm keeping the changes intentionally very very minimal. ## Additional Info Future works: - In [ `EngineError` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L173-L177) the id is not needed since it now has no meaning. - the [ `first_success_without_retry` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L348-L351) function can return a single error. - the [`first_success`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L324) function can return a single error. - After the redundancy is removed for the builders, we can probably make the [ `EngineErrors` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/lib.rs#L69) carry a single error. - Merge the [`Engines`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L161-L165) struct and [`Engine` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L62-L67) - Fix the associated configurations and cli params. Not sure if both are done in https://github.com/sigp/lighthouse/pull/3214 In general I think those changes can be done incrementally and in individual pull requests. --- beacon_node/execution_layer/src/engines.rs | 275 +++++++++------------ beacon_node/execution_layer/src/lib.rs | 211 +++++++--------- 2 files changed, 201 insertions(+), 285 deletions(-) diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 719db74c54..d3c4d0e421 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -156,10 +156,11 @@ impl Builder for Engine { } } -/// Holds multiple execution engines and provides functionality for managing them in a fallback -/// manner. +// This structure used to hold multiple execution engines managed in a fallback manner. This +// functionality has been removed following https://github.com/sigp/lighthouse/issues/3118 and this +// struct will likely be removed in the future. pub struct Engines { - pub engines: Vec>, + pub engine: Engine, pub latest_forkchoice_state: RwLock>, pub log: Logger, } @@ -185,7 +186,7 @@ impl Engines { *self.latest_forkchoice_state.write().await = Some(state); } - async fn send_latest_forkchoice_state(&self, engine: &Engine) { + async fn send_latest_forkchoice_state(&self) { let latest_forkchoice_state = self.get_latest_forkchoice_state().await; if let Some(forkchoice_state) = latest_forkchoice_state { @@ -194,7 +195,7 @@ impl Engines { self.log, "No need to call forkchoiceUpdated"; "msg" => "head does not have execution enabled", - "id" => &engine.id, + "id" => &self.engine.id, ); return; } @@ -203,12 +204,13 @@ impl Engines { self.log, "Issuing forkchoiceUpdated"; "forkchoice_state" => ?forkchoice_state, - "id" => &engine.id, + "id" => &self.engine.id, ); // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = engine + if let Err(e) = self + .engine .api .forkchoice_updated_v1(forkchoice_state, None) .await @@ -217,98 +219,77 @@ impl Engines { self.log, "Failed to issue latest head to engine"; "error" => ?e, - "id" => &engine.id, + "id" => &self.engine.id, ); } } else { debug!( self.log, "No head, not sending to engine"; - "id" => &engine.id, + "id" => &self.engine.id, ); } } - /// Returns `true` if there is at least one engine with a "synced" status. - pub async fn any_synced(&self) -> bool { - for engine in &self.engines { - if *engine.state.read().await == EngineState::Synced { - return true; - } - } - false + /// Returns `true` if the engine has a "synced" status. + pub async fn is_synced(&self) -> bool { + *self.engine.state.read().await == EngineState::Synced } - - /// Run the `EngineApi::upcheck` function on all nodes which are currently offline. - /// - /// This can be used to try and recover any offline nodes. + /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This + /// might be used to recover the node if offline. pub async fn upcheck_not_synced(&self, logging: Logging) { - let upcheck_futures = self.engines.iter().map(|engine| async move { - let mut state_lock = engine.state.write().await; - if *state_lock != EngineState::Synced { - match engine.api.upcheck().await { - Ok(()) => { - if logging.is_enabled() { - info!( - self.log, - "Execution engine online"; - "id" => &engine.id - ); - } - - // Send the node our latest forkchoice_state. - self.send_latest_forkchoice_state(engine).await; - - *state_lock = EngineState::Synced + let mut state_lock = self.engine.state.write().await; + if *state_lock != EngineState::Synced { + match self.engine.api.upcheck().await { + Ok(()) => { + if logging.is_enabled() { + info!( + self.log, + "Execution engine online"; + ); } - Err(EngineApiError::IsSyncing) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine syncing"; - "id" => &engine.id - ) - } + // Send the node our latest forkchoice_state. + self.send_latest_forkchoice_state().await; - // Send the node our latest forkchoice_state, it may assist with syncing. - self.send_latest_forkchoice_state(engine).await; - - *state_lock = EngineState::Syncing + *state_lock = EngineState::Synced + } + Err(EngineApiError::IsSyncing) => { + if logging.is_enabled() { + warn!( + self.log, + "Execution engine syncing"; + ) } - Err(EngineApiError::Auth(err)) => { - if logging.is_enabled() { - warn!( - self.log, - "Failed jwt authorization"; - "error" => ?err, - "id" => &engine.id - ); - } - *state_lock = EngineState::AuthFailed + // Send the node our latest forkchoice_state, it may assist with syncing. + self.send_latest_forkchoice_state().await; + + *state_lock = EngineState::Syncing + } + Err(EngineApiError::Auth(err)) => { + if logging.is_enabled() { + warn!( + self.log, + "Failed jwt authorization"; + "error" => ?err, + ); } - Err(e) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine offline"; - "error" => ?e, - "id" => &engine.id - ) - } + + *state_lock = EngineState::AuthFailed + } + Err(e) => { + if logging.is_enabled() { + warn!( + self.log, + "Execution engine offline"; + "error" => ?e, + ) } } } - *state_lock - }); + } - let num_synced = join_all(upcheck_futures) - .await - .into_iter() - .filter(|state: &EngineState| *state == EngineState::Synced) - .count(); - - if num_synced == 0 && logging.is_enabled() { + if *state_lock != EngineState::Synced && logging.is_enabled() { crit!( self.log, "No synced execution engines"; @@ -355,111 +336,89 @@ impl Engines { { let mut errors = vec![]; - for engine in &self.engines { - let (engine_synced, engine_auth_failed) = { - let state = engine.state.read().await; - ( - *state == EngineState::Synced, - *state == EngineState::AuthFailed, - ) - }; - if engine_synced { - match func(engine).await { - Ok(result) => return Ok(result), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, - "id" => &engine.id - ); - *engine.state.write().await = EngineState::Offline; - errors.push(EngineError::Api { - id: engine.id.clone(), - error, - }) - } + let (engine_synced, engine_auth_failed) = { + let state = self.engine.state.read().await; + ( + *state == EngineState::Synced, + *state == EngineState::AuthFailed, + ) + }; + if engine_synced { + match func(&self.engine).await { + Ok(result) => return Ok(result), + Err(error) => { + debug!( + self.log, + "Execution engine call failed"; + "error" => ?error, + "id" => &&self.engine.id + ); + *self.engine.state.write().await = EngineState::Offline; + errors.push(EngineError::Api { + id: self.engine.id.clone(), + error, + }) } - } else if engine_auth_failed { - errors.push(EngineError::Auth { - id: engine.id.clone(), - }) - } else { - errors.push(EngineError::Offline { - id: engine.id.clone(), - }) } + } else if engine_auth_failed { + errors.push(EngineError::Auth { + id: self.engine.id.clone(), + }) + } else { + errors.push(EngineError::Offline { + id: self.engine.id.clone(), + }) } Err(errors) } - /// Runs `func` on all nodes concurrently, returning all results. Any nodes that are offline - /// will be ignored, however all synced or unsynced nodes will receive the broadcast. + /// Runs `func` on the node. /// /// This function might try to run `func` twice. If all nodes return an error on the first time /// it runs, it will try to upcheck all offline nodes and then run the function again. - pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Vec> + pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Result where F: Fn(&'a Engine) -> G + Copy, G: Future>, { - let first_results = self.broadcast_without_retry(func).await; - - let mut any_offline = false; - for result in &first_results { - match result { - Ok(_) => return first_results, - Err(EngineError::Offline { .. }) => any_offline = true, - _ => (), + match self.broadcast_without_retry(func).await { + Err(EngineError::Offline { .. }) => { + self.upcheck_not_synced(Logging::Enabled).await; + self.broadcast_without_retry(func).await } - } - - if any_offline { - self.upcheck_not_synced(Logging::Enabled).await; - self.broadcast_without_retry(func).await - } else { - first_results + other => other, } } - /// Runs `func` on all nodes concurrently, returning all results. - pub async fn broadcast_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Vec> + /// Runs `func` on the node if it's last state is not offline. + pub async fn broadcast_without_retry<'a, F, G, H>(&'a self, func: F) -> Result where F: Fn(&'a Engine) -> G, G: Future>, { let func = &func; - let futures = self.engines.iter().map(|engine| async move { - let is_offline = *engine.state.read().await == EngineState::Offline; - if !is_offline { - match func(engine).await { - Ok(res) => Ok(res), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, - "id" => &engine.id - ); - *engine.state.write().await = EngineState::Offline; - Err(EngineError::Api { - id: engine.id.clone(), - error, - }) - } + if *self.engine.state.read().await == EngineState::Offline { + Err(EngineError::Offline { + id: self.engine.id.clone(), + }) + } else { + match func(&self.engine).await { + Ok(res) => Ok(res), + Err(error) => { + debug!( + self.log, + "Execution engine call failed"; + "error" => ?error, + ); + *self.engine.state.write().await = EngineState::Offline; + Err(EngineError::Api { + id: self.engine.id.clone(), + error, + }) } - } else { - Err(EngineError::Offline { - id: engine.id.clone(), - }) } - }); - - join_all(futures).await + } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d6acd5fe54..cff2190272 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -158,72 +158,60 @@ impl ExecutionLayer { let Config { execution_endpoints: urls, builder_endpoints: builder_urls, - mut secret_files, + secret_files, suggested_fee_recipient, jwt_id, jwt_version, default_datadir, } = config; - if urls.is_empty() { - return Err(Error::NoEngines); + if urls.len() > 1 { + warn!(log, "Only the first execution engine url will be used"); } + let execution_url = urls.into_iter().next().ok_or(Error::NoEngines)?; - // Extend the jwt secret files with the default jwt secret path if not provided via cli. - // This ensures that we have a jwt secret for every EL. - secret_files.extend(vec![ - default_datadir.join(DEFAULT_JWT_FILE); - urls.len().saturating_sub(secret_files.len()) - ]); - - let secrets: Vec<(JwtKey, PathBuf)> = secret_files - .iter() - .map(|p| { - // Read secret from file if it already exists - if p.exists() { - std::fs::read_to_string(p) - .map_err(|e| { - format!("Failed to read JWT secret file {:?}, error: {:?}", p, e) - }) - .and_then(|ref s| { - let secret = JwtKey::from_slice( - &hex::decode(strip_prefix(s.trim_end())) - .map_err(|e| format!("Invalid hex string: {:?}", e))?, - )?; - Ok((secret, p.to_path_buf())) - }) - } else { - // Create a new file and write a randomly generated secret to it if file does not exist - std::fs::File::options() - .write(true) - .create_new(true) - .open(p) - .map_err(|e| { - format!("Failed to open JWT secret file {:?}, error: {:?}", p, e) - }) - .and_then(|mut f| { - let secret = auth::JwtKey::random(); - f.write_all(secret.hex_string().as_bytes()).map_err(|e| { - format!("Failed to write to JWT secret file: {:?}", e) - })?; - Ok((secret, p.to_path_buf())) - }) - } - }) - .collect::>() - .map_err(Error::InvalidJWTSecret)?; - - let engines: Vec> = urls + // Use the default jwt secret path if not provided via cli. + let secret_file = secret_files .into_iter() - .zip(secrets.into_iter()) - .map(|(url, (secret, path))| { - let id = url.to_string(); - let auth = Auth::new(secret, jwt_id.clone(), jwt_version.clone()); - debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?path); - let api = HttpJsonRpc::::new_with_auth(url, auth)?; - Ok(Engine::::new(id, api)) - }) - .collect::>()?; + .next() + .unwrap_or_else(|| default_datadir.join(DEFAULT_JWT_FILE)); + + let jwt_key = if secret_file.exists() { + // Read secret from file if it already exists + std::fs::read_to_string(&secret_file) + .map_err(|e| format!("Failed to read JWT secret file. Error: {:?}", e)) + .and_then(|ref s| { + let secret = JwtKey::from_slice( + &hex::decode(strip_prefix(s.trim_end())) + .map_err(|e| format!("Invalid hex string: {:?}", e))?, + )?; + Ok(secret) + }) + .map_err(Error::InvalidJWTSecret) + } else { + // Create a new file and write a randomly generated secret to it if file does not exist + std::fs::File::options() + .write(true) + .create_new(true) + .open(&secret_file) + .map_err(|e| format!("Failed to open JWT secret file. Error: {:?}", e)) + .and_then(|mut f| { + let secret = auth::JwtKey::random(); + f.write_all(secret.hex_string().as_bytes()) + .map_err(|e| format!("Failed to write to JWT secret file: {:?}", e))?; + Ok(secret) + }) + .map_err(Error::InvalidJWTSecret) + }?; + + let engine: Engine = { + let id = execution_url.to_string(); + let auth = Auth::new(jwt_key, jwt_id, jwt_version); + debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?secret_file.as_path()); + let api = HttpJsonRpc::::new_with_auth(execution_url, auth) + .map_err(Error::ApiError)?; + Engine::::new(id, api) + }; let builders: Vec> = builder_urls .into_iter() @@ -236,7 +224,7 @@ impl ExecutionLayer { let inner = Inner { engines: Engines { - engines, + engine, latest_forkchoice_state: <_>::default(), log: log.clone(), }, @@ -455,7 +443,7 @@ impl ExecutionLayer { /// Returns `true` if there is at least one synced and reachable engine. pub async fn is_synced(&self) -> bool { - self.engines().any_synced().await + self.engines().is_synced().await } /// Updates the proposer preparation data provided by validators @@ -750,7 +738,7 @@ impl ExecutionLayer { process_multiple_payload_statuses( execution_payload.block_hash, - broadcast_results.into_iter(), + Some(broadcast_results).into_iter(), self.log(), ) } @@ -903,7 +891,7 @@ impl ExecutionLayer { }; process_multiple_payload_statuses( head_block_hash, - broadcast_results + Some(broadcast_results) .into_iter() .chain(builder_broadcast_results.into_iter()) .map(|result| result.map(|response| response.payload_status)), @@ -918,49 +906,49 @@ impl ExecutionLayer { terminal_block_number: 0, }; - let broadcast_results = self + let broadcast_result = self .engines() .broadcast(|engine| engine.api.exchange_transition_configuration_v1(local)) .await; let mut errors = vec![]; - for (i, result) in broadcast_results.into_iter().enumerate() { - match result { - Ok(remote) => { - if local.terminal_total_difficulty != remote.terminal_total_difficulty - || local.terminal_block_hash != remote.terminal_block_hash - { - error!( - self.log(), - "Execution client config mismatch"; - "msg" => "ensure lighthouse and the execution client are up-to-date and \ - configured consistently", - "execution_endpoint" => i, - "remote" => ?remote, - "local" => ?local, - ); - errors.push(EngineError::Api { - id: i.to_string(), - error: ApiError::TransitionConfigurationMismatch, - }); - } else { - debug!( - self.log(), - "Execution client config is OK"; - "execution_endpoint" => i - ); - } - } - Err(e) => { + // Having no fallbacks, the id of the used node is 0 + let i = 0usize; + match broadcast_result { + Ok(remote) => { + if local.terminal_total_difficulty != remote.terminal_total_difficulty + || local.terminal_block_hash != remote.terminal_block_hash + { error!( self.log(), - "Unable to get transition config"; - "error" => ?e, + "Execution client config mismatch"; + "msg" => "ensure lighthouse and the execution client are up-to-date and \ + configured consistently", "execution_endpoint" => i, + "remote" => ?remote, + "local" => ?local, + ); + errors.push(EngineError::Api { + id: i.to_string(), + error: ApiError::TransitionConfigurationMismatch, + }); + } else { + debug!( + self.log(), + "Execution client config is OK"; + "execution_endpoint" => i ); - errors.push(e); } } + Err(e) => { + error!( + self.log(), + "Unable to get transition config"; + "error" => ?e, + "execution_endpoint" => i, + ); + errors.push(e); + } } if errors.is_empty() { @@ -1102,8 +1090,7 @@ impl ExecutionLayer { &[metrics::IS_VALID_TERMINAL_POW_BLOCK_HASH], ); - let broadcast_results = self - .engines() + self.engines() .broadcast(|engine| async move { if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? { if let Some(pow_parent) = @@ -1116,38 +1103,8 @@ impl ExecutionLayer { } Ok(None) }) - .await; - - let mut errors = vec![]; - let mut terminal = 0; - let mut not_terminal = 0; - let mut block_missing = 0; - for result in broadcast_results { - match result { - Ok(Some(true)) => terminal += 1, - Ok(Some(false)) => not_terminal += 1, - Ok(None) => block_missing += 1, - Err(e) => errors.push(e), - } - } - - if terminal > 0 && not_terminal > 0 { - crit!( - self.log(), - "Consensus failure between execution nodes"; - "method" => "is_valid_terminal_pow_block_hash" - ); - } - - if terminal > 0 { - Ok(Some(true)) - } else if not_terminal > 0 { - Ok(Some(false)) - } else if block_missing > 0 { - Ok(None) - } else { - Err(Error::EngineErrors(errors)) - } + .await + .map_err(|e| Error::EngineErrors(vec![e])) } /// This function should remain internal. From 7af57420810772b2a1b0d7d75a0d045c0333093b Mon Sep 17 00:00:00 2001 From: Divma Date: Wed, 22 Jun 2022 16:23:34 +0000 Subject: [PATCH 036/184] Deprecate step param in BlocksByRange RPC request (#3275) ## Issue Addressed Deprecates the step parameter in the blocks by range request ## Proposed Changes - Modifies the BlocksByRangeRequest type to remove the step parameter and everywhere we took it into account before - Adds a new type to still handle coding and decoding of requests that use the parameter ## Additional Info I went with a deprecation over the type itself so that requests received outside `lighthouse_network` don't even need to deal with this parameter. After the deprecation period just removing the Old blocks by range request should be straightforward --- .../lighthouse_network/src/behaviour/mod.rs | 40 +++- .../src/rpc/codec/ssz_snappy.rs | 177 +++++++++++++----- .../lighthouse_network/src/rpc/methods.rs | 16 ++ .../lighthouse_network/src/rpc/outbound.rs | 2 +- .../lighthouse_network/src/rpc/protocol.rs | 6 +- .../src/rpc/rate_limiter.rs | 24 +-- .../lighthouse_network/tests/rpc_tests.rs | 4 - .../beacon_processor/worker/rpc_methods.rs | 44 ++--- .../network/src/sync/range_sync/batch.rs | 1 - 9 files changed, 201 insertions(+), 113 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 81de3f015a..bf1918662a 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1065,11 +1065,33 @@ where // propagate the STATUS message upwards self.propagate_request(peer_request_id, peer_id, Request::Status(msg)) } - InboundRequest::BlocksByRange(req) => self.propagate_request( - peer_request_id, - peer_id, - Request::BlocksByRange(req), - ), + InboundRequest::BlocksByRange(req) => { + let methods::OldBlocksByRangeRequest { + start_slot, + mut count, + step, + } = req; + // Still disconnect the peer if the request is naughty. + if step == 0 { + self.peer_manager.handle_rpc_error( + &peer_id, + Protocol::BlocksByRange, + &RPCError::InvalidData( + "Blocks by range with 0 step parameter".into(), + ), + ConnectionDirection::Incoming, + ); + } + // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 + if step > 1 { + count = 1; + } + self.propagate_request( + peer_request_id, + peer_id, + Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }), + ); + } InboundRequest::BlocksByRoot(req) => { self.propagate_request(peer_request_id, peer_id, Request::BlocksByRoot(req)) } @@ -1313,7 +1335,13 @@ impl std::convert::From for OutboundRequest { fn from(req: Request) -> OutboundRequest { match req { Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(r) => OutboundRequest::BlocksByRange(r), + Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { + OutboundRequest::BlocksByRange(methods::OldBlocksByRangeRequest { + start_slot, + count, + step: 1, + }) + } Request::Status(s) => OutboundRequest::Status(s), } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 6bd4a96fb5..f6c3e61b0b 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -137,6 +137,9 @@ impl Decoder for SSZSnappyInboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if self.protocol.message_name == Protocol::MetaData { + return Ok(Some(InboundRequest::MetaData(PhantomData))); + } let length = match handle_length(&mut self.inner, &mut self.len, src)? { Some(len) => len, None => return Ok(None), @@ -461,7 +464,7 @@ fn handle_v1_request( GoodbyeReason::from_ssz_bytes(decoded_buffer)?, ))), Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( - BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, @@ -493,7 +496,7 @@ fn handle_v2_request( ) -> Result>, RPCError> { match protocol { Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( - BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, @@ -712,6 +715,20 @@ mod tests { } } + fn bbrange_request() -> OldBlocksByRangeRequest { + OldBlocksByRangeRequest { + start_slot: 0, + count: 10, + step: 1, + } + } + + fn bbroot_request() -> BlocksByRootRequest { + BlocksByRootRequest { + block_roots: VariableList::from(vec![Hash256::zero()]), + } + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -732,7 +749,7 @@ mod tests { } /// Encodes the given protocol response as bytes. - fn encode( + fn encode_response( protocol: Protocol, version: Version, message: RPCCodedResponse, @@ -779,7 +796,7 @@ mod tests { } /// Attempts to decode the given protocol bytes as an rpc response - fn decode( + fn decode_response( protocol: Protocol, version: Version, message: &mut BytesMut, @@ -795,21 +812,70 @@ mod tests { } /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. - fn encode_then_decode( + fn encode_then_decode_response( protocol: Protocol, version: Version, message: RPCCodedResponse, fork_name: ForkName, ) -> Result>, RPCError> { - let mut encoded = encode(protocol, version.clone(), message, fork_name)?; - decode(protocol, version, &mut encoded, fork_name) + let mut encoded = encode_response(protocol, version.clone(), message, fork_name)?; + decode_response(protocol, version, &mut encoded, fork_name) + } + + /// Verifies that requests we send are encoded in a way that we would correctly decode too. + fn encode_then_decode_request(req: OutboundRequest, fork_name: ForkName) { + let fork_context = Arc::new(fork_context(fork_name)); + let max_packet_size = max_rpc_size(&fork_context); + for protocol in req.supported_protocols() { + // Encode a request we send + let mut buf = BytesMut::new(); + let mut outbound_codec = SSZSnappyOutboundCodec::::new( + protocol.clone(), + max_packet_size, + fork_context.clone(), + ); + outbound_codec.encode(req.clone(), &mut buf).unwrap(); + + let mut inbound_codec = SSZSnappyInboundCodec::::new( + protocol.clone(), + max_packet_size, + fork_context.clone(), + ); + + let decoded = inbound_codec.decode(&mut buf).unwrap().unwrap_or_else(|| { + panic!( + "Should correctly decode the request {} over protocol {:?} and fork {}", + req, protocol, fork_name + ) + }); + match req.clone() { + OutboundRequest::Status(status) => { + assert_eq!(decoded, InboundRequest::Status(status)) + } + OutboundRequest::Goodbye(goodbye) => { + assert_eq!(decoded, InboundRequest::Goodbye(goodbye)) + } + OutboundRequest::BlocksByRange(bbrange) => { + assert_eq!(decoded, InboundRequest::BlocksByRange(bbrange)) + } + OutboundRequest::BlocksByRoot(bbroot) => { + assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) + } + OutboundRequest::Ping(ping) => { + assert_eq!(decoded, InboundRequest::Ping(ping)) + } + OutboundRequest::MetaData(metadata) => { + assert_eq!(decoded, InboundRequest::MetaData(metadata)) + } + } + } } // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v1() { assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::Status, Version::V1, RPCCodedResponse::Success(RPCResponse::Status(status_message())), @@ -819,7 +885,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::Ping, Version::V1, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), @@ -829,7 +895,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), @@ -842,7 +908,7 @@ mod tests { assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), @@ -855,7 +921,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), @@ -868,7 +934,7 @@ mod tests { assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), @@ -881,7 +947,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -891,7 +957,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -902,7 +968,7 @@ mod tests { // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), @@ -917,7 +983,7 @@ mod tests { fn test_encode_then_decode_v2() { assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::Status, Version::V2, RPCCodedResponse::Success(RPCResponse::Status(status_message())), @@ -931,7 +997,7 @@ mod tests { assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::Ping, Version::V2, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), @@ -944,7 +1010,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), @@ -959,7 +1025,7 @@ mod tests { // This is useful for checking that we allow for blocks smaller than // the current_fork's rpc limit assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), @@ -971,7 +1037,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), @@ -984,7 +1050,7 @@ mod tests { let merge_block_large = merge_block_large(&fork_context(ForkName::Merge)); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new( @@ -1003,7 +1069,7 @@ mod tests { assert!( matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut encoded, @@ -1016,7 +1082,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), @@ -1031,7 +1097,7 @@ mod tests { // This is useful for checking that we allow for blocks smaller than // the current_fork's rpc limit assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), @@ -1043,7 +1109,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), @@ -1053,7 +1119,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new( @@ -1070,7 +1136,7 @@ mod tests { assert!( matches!( - decode( + decode_response( Protocol::BlocksByRoot, Version::V2, &mut encoded, @@ -1084,7 +1150,7 @@ mod tests { // A MetaDataV1 still encodes as a MetaDataV2 since version is Version::V2 assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -1094,7 +1160,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), @@ -1110,7 +1176,7 @@ mod tests { let fork_context = fork_context(ForkName::Altair); // Removing context bytes for v2 messages should error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), @@ -1121,7 +1187,7 @@ mod tests { let _ = encoded_bytes.split_to(4); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut encoded_bytes, @@ -1131,7 +1197,7 @@ mod tests { RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), )); - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), @@ -1142,7 +1208,7 @@ mod tests { let _ = encoded_bytes.split_to(4); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut encoded_bytes, @@ -1153,7 +1219,7 @@ mod tests { )); // Trying to decode a base block with altair context bytes should give ssz decoding error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), @@ -1167,7 +1233,7 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes, @@ -1178,7 +1244,7 @@ mod tests { )); // Trying to decode an altair block with base context bytes should give ssz decoding error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), @@ -1191,7 +1257,7 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes, @@ -1205,7 +1271,7 @@ mod tests { let mut encoded_bytes = BytesMut::new(); encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); encoded_bytes.extend_from_slice( - &encode( + &encode_response( Protocol::MetaData, Version::V2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -1214,7 +1280,7 @@ mod tests { .unwrap(), ); - assert!(decode( + assert!(decode_response( Protocol::MetaData, Version::V2, &mut encoded_bytes, @@ -1223,7 +1289,7 @@ mod tests { .is_err()); // Sending context bytes which do not correspond to any fork should return an error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), @@ -1236,7 +1302,7 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes, @@ -1247,7 +1313,7 @@ mod tests { )); // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), @@ -1258,7 +1324,7 @@ mod tests { let mut part = encoded_bytes.split_to(3); assert_eq!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut part, @@ -1268,6 +1334,23 @@ mod tests { ) } + #[test] + fn test_encode_then_decode_request() { + let requests: &[OutboundRequest] = &[ + OutboundRequest::Ping(ping_message()), + OutboundRequest::Status(status_message()), + OutboundRequest::Goodbye(GoodbyeReason::Fault), + OutboundRequest::BlocksByRange(bbrange_request()), + OutboundRequest::BlocksByRoot(bbroot_request()), + OutboundRequest::MetaData(PhantomData::), + ]; + for req in requests.iter() { + for fork_name in ForkName::list_all() { + encode_then_decode_request(req.clone(), fork_name); + } + } + } + /// Test a malicious snappy encoding for a V1 `Status` message where the attacker /// sends a valid message filled with a stream of useless padding before the actual message. #[test] @@ -1319,7 +1402,7 @@ mod tests { // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( - decode(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), RPCError::InvalidData(_) )); } @@ -1376,7 +1459,7 @@ mod tests { // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut dst, @@ -1421,7 +1504,7 @@ mod tests { dst.extend_from_slice(writer.get_ref()); assert!(matches!( - decode(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), RPCError::InvalidData(_) )); } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 1ac9c9b2c0..46de772d8d 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -201,6 +201,16 @@ pub struct BlocksByRangeRequest { /// The number of blocks from the start slot. pub count: u64, +} + +/// Request a number of beacon block roots from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct OldBlocksByRangeRequest { + /// The starting slot to request blocks. + pub start_slot: u64, + + /// The number of blocks from the start slot. + pub count: u64, /// The step increment to receive blocks. /// @@ -410,6 +420,12 @@ impl std::fmt::Display for GoodbyeReason { } impl std::fmt::Display for BlocksByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count) + } +} + +impl std::fmt::Display for OldBlocksByRangeRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 17201c6cf4..7d5acc4364 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -36,7 +36,7 @@ pub struct OutboundRequestContainer { pub enum OutboundRequest { Status(StatusMessage), Goodbye(GoodbyeReason), - BlocksByRange(BlocksByRangeRequest), + BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), Ping(Ping), MetaData(PhantomData), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 1639d17941..81960214b1 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -279,8 +279,8 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::BlocksByRange => RpcLimits::new( - ::ssz_fixed_len(), - ::ssz_fixed_len(), + ::ssz_fixed_len(), + ::ssz_fixed_len(), ), Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) @@ -415,7 +415,7 @@ where pub enum InboundRequest { Status(StatusMessage), Goodbye(GoodbyeReason), - BlocksByRange(BlocksByRangeRequest), + BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), Ping(Ping), MetaData(PhantomData), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 5e1b533c60..70b14c33de 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -188,29 +188,7 @@ impl RPCRateLimiter { request: &InboundRequest, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); - let mut tokens = request.expected_responses().max(1); - - // Increase the rate limit for blocks by range requests with large step counts. - // We count to tokens as a quadratic increase with step size. - // Using (step_size/5)^2 + 1 as penalty factor allows step sizes of 1-4 to have no penalty - // but step sizes higher than this add a quadratic penalty. - // Penalty's go: - // Step size | Penalty Factor - // 1 | 1 - // 2 | 1 - // 3 | 1 - // 4 | 1 - // 5 | 2 - // 6 | 2 - // 7 | 2 - // 8 | 3 - // 9 | 4 - // 10 | 5 - - if let InboundRequest::BlocksByRange(bbr_req) = request { - let penalty_factor = (bbr_req.step as f64 / 5.0).powi(2) as u64 + 1; - tokens *= penalty_factor; - } + let tokens = request.expected_responses().max(1); let check = |limiter: &mut Limiter| limiter.allows(time_since_start, peer_id, tokens); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 5895d32d5d..973485fc4a 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -167,7 +167,6 @@ fn test_blocks_by_range_chunked_rpc() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: messages_to_send, - step: 0, }); let spec = E::default_spec(); @@ -307,7 +306,6 @@ fn test_blocks_by_range_over_limit() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: messages_to_send, - step: 0, }); // BlocksByRange Response @@ -405,7 +403,6 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: messages_to_send, - step: 0, }); // BlocksByRange Response @@ -537,7 +534,6 @@ fn test_blocks_by_range_single_empty_rpc() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: 10, - step: 0, }); // BlocksByRange Response diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 2d2196b9e9..cf113ca1fa 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -7,7 +7,7 @@ use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; -use slog::{debug, error, warn}; +use slog::{debug, error}; use slot_clock::SlotClock; use task_executor::TaskExecutor; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -196,16 +196,12 @@ impl Worker { "peer_id" => %peer_id, "count" => req.count, "start_slot" => req.start_slot, - "step" => req.step); + ); // Should not send more than max request blocks if req.count > MAX_REQUEST_BLOCKS { req.count = MAX_REQUEST_BLOCKS; } - if req.step == 0 { - self.goodbye_peer(peer_id, GoodbyeReason::Fault); - return warn!(self.log, "Peer sent invalid range request"; "error" => "Step sent was 0"); - } let forwards_block_root_iter = match self .chain @@ -229,29 +225,21 @@ impl Worker { Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), }; - // Pick out the required blocks, ignoring skip-slots and stepping by the step parameter. - // - // NOTE: We don't mind if req.count * req.step overflows as it just ends the iterator early and - // the peer will get less blocks. - // The step parameter is quadratically weighted in the filter, so large values should be - // prevented before reaching this point. + // Pick out the required blocks, ignoring skip-slots. let mut last_block_root = None; let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| { - slot.as_u64() < req.start_slot.saturating_add(req.count * req.step) - }) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .step_by(req.step as usize) - .collect::>>() + iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() }); let block_roots = match maybe_block_roots { @@ -273,7 +261,7 @@ impl Worker { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending if block.slot() >= req.start_slot - && block.slot() < req.start_slot + req.count * req.step + && block.slot() < req.start_slot + req.count { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index aaebe022c7..447f0bd11c 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -199,7 +199,6 @@ impl BatchInfo { BlocksByRangeRequest { start_slot: self.start_slot.into(), count: self.end_slot.sub(self.start_slot).into(), - step: 1, } } From 748658e32ccf5673d7ac645160b142718c3722a9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 23 Jun 2022 05:19:20 +0000 Subject: [PATCH 037/184] Add some debug logs for checkpoint sync (#3281) ## Issue Addressed NA ## Proposed Changes I used these logs when debugging a spurious failure with Infura and thought they might be nice to have around permanently. There's no changes to functionality in this PR, just some additional `debug!` logs. ## Additional Info NA --- beacon_node/client/src/builder.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 1f02ec7b3c..a6124bdfad 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -276,6 +276,8 @@ where BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); let slots_per_epoch = TEthSpec::slots_per_epoch(); + debug!(context.log(), "Downloading finalized block"); + // Find a suitable finalized block on an epoch boundary. let mut block = remote .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) @@ -290,6 +292,8 @@ where })? .ok_or("Finalized block missing from remote, it returned 404")?; + debug!(context.log(), "Downloaded finalized block"); + let mut block_slot = block.slot(); while block.slot() % slots_per_epoch != 0 { @@ -301,6 +305,12 @@ where "block_slot" => block_slot, ); + debug!( + context.log(), + "Searching for aligned checkpoint block"; + "block_slot" => block_slot + ); + if let Some(found_block) = remote .get_beacon_blocks_ssz::(BlockId::Slot(block_slot), &spec) .await @@ -312,7 +322,19 @@ where } } + debug!( + context.log(), + "Downloaded aligned finalized block"; + "block_root" => ?block.canonical_root(), + "block_slot" => block.slot(), + ); + let state_root = block.state_root(); + debug!( + context.log(), + "Downloading finalized state"; + "state_root" => ?state_root + ); let state = remote .get_debug_beacon_states_ssz::(StateId::Root(state_root), &spec) .await @@ -326,6 +348,8 @@ where format!("Checkpoint state missing from remote: {:?}", state_root) })?; + debug!(context.log(), "Downloaded finalized state"); + let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; From d21f083777601610ccaa1597b16c21ce30a5cbd7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 23 Jun 2022 05:19:21 +0000 Subject: [PATCH 038/184] Add more paths to HTTP API metrics (#3282) ## Proposed Changes Expand the set of paths tracked by the HTTP API metrics to include all paths hit by the validator client. These paths were only partially updated for Altair, so we were missing some of the sync committee and v2 APIs. --- beacon_node/http_api/src/lib.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fa3b6a9d95..be08b3f737 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -200,18 +200,29 @@ pub fn prometheus_metrics() -> warp::filters::log::Log Date: Sat, 25 Jun 2022 22:22:34 +0000 Subject: [PATCH 039/184] Test the pruning of excess peers using randomly generated input (#3248) ## Issue Addressed https://github.com/sigp/lighthouse/issues/3092 ## Proposed Changes Added property-based tests for the pruning implementation. A randomly generated input for the test contains connection direction, subnets, and scores. ## Additional Info I left some comments on this PR, what I have tried, and [a question](https://github.com/sigp/lighthouse/pull/3248#discussion_r891981969). Co-authored-by: Diva M --- Cargo.lock | 2 + beacon_node/lighthouse_network/Cargo.toml | 2 + .../src/peer_manager/mod.rs | 119 ++++++++++++++++++ .../src/peer_manager/peerdb/score.rs | 1 + 4 files changed, 124 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index de385f22cf..3455ca8efa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3450,6 +3450,8 @@ dependencies = [ "lru", "parking_lot 0.12.1", "prometheus-client", + "quickcheck", + "quickcheck_macros", "rand 0.8.5", "regex", "serde", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index e7c4781e21..bbef8a301b 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -52,6 +52,8 @@ slog-async = "2.5.0" tempfile = "3.1.0" exit-future = "0.2.0" void = "1" +quickcheck = "0.9.2" +quickcheck_macros = "0.9.1" [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 3575d9d34d..4b2b81060f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2048,4 +2048,123 @@ mod tests { assert!(connected_peers.contains(&peers[6])); assert!(connected_peers.contains(&peers[7])); } + + // Test properties PeerManager should have using randomly generated input. + #[cfg(test)] + mod property_based_tests { + use crate::peer_manager::config::DEFAULT_TARGET_PEERS; + use crate::peer_manager::tests::build_peer_manager; + use crate::rpc::MetaData; + use libp2p::PeerId; + use quickcheck::{Arbitrary, Gen, TestResult}; + use quickcheck_macros::quickcheck; + use tokio::runtime::Runtime; + use types::Unsigned; + use types::{EthSpec, MainnetEthSpec as E}; + + #[derive(Clone, Debug)] + struct PeerCondition { + outgoing: bool, + attestation_net_bitfield: Vec, + sync_committee_net_bitfield: Vec, + score: f64, + gossipsub_score: f64, + } + + impl Arbitrary for PeerCondition { + fn arbitrary(g: &mut G) -> Self { + let attestation_net_bitfield = { + let len = ::SubnetBitfieldLength::to_usize(); + let mut bitfield = Vec::with_capacity(len); + for _ in 0..len { + bitfield.push(bool::arbitrary(g)); + } + bitfield + }; + + let sync_committee_net_bitfield = { + let len = ::SyncCommitteeSubnetCount::to_usize(); + let mut bitfield = Vec::with_capacity(len); + for _ in 0..len { + bitfield.push(bool::arbitrary(g)); + } + bitfield + }; + + PeerCondition { + outgoing: bool::arbitrary(g), + attestation_net_bitfield, + sync_committee_net_bitfield, + score: f64::arbitrary(g), + gossipsub_score: f64::arbitrary(g), + } + } + } + + #[quickcheck] + fn prune_excess_peers(peer_conditions: Vec) -> TestResult { + let target_peer_count = DEFAULT_TARGET_PEERS; + if peer_conditions.len() < target_peer_count { + return TestResult::discard(); + } + let rt = Runtime::new().unwrap(); + + rt.block_on(async move { + let mut peer_manager = build_peer_manager(target_peer_count).await; + + // Create peers based on the randomly generated conditions. + for condition in &peer_conditions { + let peer = PeerId::random(); + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + if condition.outgoing { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } else { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } + + for (i, value) in condition.attestation_net_bitfield.iter().enumerate() { + attnets.set(i, *value).unwrap(); + } + + for (i, value) in condition.sync_committee_net_bitfield.iter().enumerate() { + syncnets.set(i, *value).unwrap(); + } + + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets, + }; + + let mut peer_db = peer_manager.network_globals.peers.write(); + let peer_info = peer_db.peer_info_mut(&peer).unwrap(); + peer_info.set_meta_data(MetaData::V2(metadata)); + peer_info.set_gossipsub_score(condition.gossipsub_score); + peer_info.add_to_score(condition.score); + + for subnet in peer_info.long_lived_subnets() { + peer_db.add_subscription(&peer, subnet); + } + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + TestResult::from_bool( + peer_manager.network_globals.connected_or_dialing_peers() + == target_peer_count.min(peer_conditions.len()), + ) + }) + } + } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index 3b67c442d7..accc0b60c5 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -223,6 +223,7 @@ impl RealScore { #[cfg(test)] pub fn set_gossipsub_score(&mut self, score: f64) { self.gossipsub_score = score; + self.update_state(); } /// Applies time-based logic such as decay rates to the score. From 7acfbd89ee8a528a51c84515cb058a8343fb242f Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sun, 26 Jun 2022 23:10:58 +0000 Subject: [PATCH 040/184] Recover from NonConsecutive eth1 errors (#3273) ## Issue Addressed Fixes #1864 and a bunch of other closed but unresolved issues. ## Proposed Changes Allows the deposit caching to recover from `NonConsecutive` deposit errors by resetting the last processed block to the last valid deposit's block number. Still not sure of the underlying cause of this error, but this should recover the cache so we don't need `--eth1-purge-cache` anymore :tada: A huge thanks to @one-three-three-seven for reproducing the error and providing the data that helped testing out the fix :raised_hands: Still needs a few more tests. --- beacon_node/eth1/src/service.rs | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index a35d574037..15e2123e8a 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -768,12 +768,32 @@ impl Service { *self.inner.remote_head_block.write() = Some(remote_head_block); let update_deposit_cache = async { - let outcome = self + let outcome_result = self .update_deposit_cache(Some(new_block_numbers_deposit), &endpoints) - .await - .map_err(|e| { - format!("Failed to update eth1 deposit cache: {:?}", process_err(e)) - })?; + .await; + + // Reset the `last_procesed block` to the last valid deposit's block number. + // This will ensure that the next batch of blocks fetched is immediately after + // the last cached valid deposit allowing us to recover from scenarios where + // the deposit cache gets corrupted due to invalid responses from eth1 nodes. + if let Err(Error::FailedToInsertDeposit(DepositCacheError::NonConsecutive { + log_index: _, + expected: _, + })) = &outcome_result + { + let mut deposit_cache = self.inner.deposit_cache.write(); + debug!( + self.log, + "Resetting last processed block"; + "old_block_number" => deposit_cache.last_processed_block, + "new_block_number" => deposit_cache.cache.latest_block_number(), + ); + deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); + } + + let outcome = outcome_result.map_err(|e| { + format!("Failed to update eth1 deposit cache: {:?}", process_err(e)) + })?; trace!( self.log, @@ -1206,7 +1226,7 @@ impl Service { "latest_block_age" => latest_block_mins, "latest_block" => block_cache.highest_block_number(), "total_cached_blocks" => block_cache.len(), - "new" => blocks_imported + "new" => %blocks_imported ); } else { debug!( From f3a1b5da3190b1a3d838c9955f2da6c6830bd9e3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 27 Jun 2022 22:50:27 +0000 Subject: [PATCH 041/184] Update Sepolia TTD (#3288) ## Issue Addressed NA ## Proposed Changes Update Sepolia TTD as per https://github.com/eth-clients/merge-testnets/pull/21 ## Additional Info NA --- .../built_in_network_configs/sepolia/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 95587c2908..4c3e4bb6ec 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -24,7 +24,7 @@ ALTAIR_FORK_EPOCH: 50 # Merge BELLATRIX_FORK_VERSION: 0x90000071 BELLATRIX_FORK_EPOCH: 100 -TERMINAL_TOTAL_DIFFICULTY: 100000000000000000000000 +TERMINAL_TOTAL_DIFFICULTY: 17000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 From 45b2eb18bc4ca6a8e06fc66d3139358b38418ed2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 28 Jun 2022 03:03:30 +0000 Subject: [PATCH 042/184] v2.3.2-rc.0 (#3289) ## Issue Addressed NA ## Proposed Changes Bump versions ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3455ca8efa..1554ff5564 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -328,7 +328,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "beacon_chain", "clap", @@ -485,7 +485,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "beacon_node", "clap", @@ -2885,7 +2885,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "account_utils", "bls", @@ -3383,7 +3383,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 081e91aba8..ccb145caf9 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.3.1" +version = "2.3.2-rc.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index c5a5bc57e8..e4a6bd0179 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.3.1-", - fallback = "Lighthouse/v2.3.1" + prefix = "Lighthouse/v2.3.2-rc.0-", + fallback = "Lighthouse/v2.3.2-rc.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 5dfcba8fa1..037171097d 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.3.1" +version = "2.3.2-rc.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 35fee80315..d9bd4334cf 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.3.1" +version = "2.3.2-rc.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From 36453929d55568a714fd98d60087cad0f042f711 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 29 Jun 2022 04:50:36 +0000 Subject: [PATCH 043/184] Update Cross config for v0.2.2 (#3286) ## Proposed Changes Update `Cross.toml` for the recently released Cross v0.2.2. This allows us to remove the dependency on my fork of the Cross Docker image, which was a maintenance burden and prone to bit-rot. This PR puts us back in sync with upstream Cross. ## Additional Info Due to some bindgen errors on the default Cross images we seemingly need a full `clang-3.9` install. The `libclang-3.9-dev` package was found to be insufficient due to `stdarg.h` being missing. In order to continue building locally all Lighthouse devs should update their local cross version with `cargo install cross`. --- Cross.toml | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/Cross.toml b/Cross.toml index 2db3992464..d5f7a5d506 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,15 +1,5 @@ -[build.env] -passthrough = [ - "RUSTFLAGS", -] - -# These custom images are required to work around the lack of Clang in the default `cross` images. -# We need Clang to run `bindgen` for MDBX, and the `BINDGEN_EXTRA_CLANG_ARGS` flags must also be set -# while cross-compiling for ARM to prevent bindgen from attempting to include headers from the host. -# -# For more information see https://github.com/rust-embedded/cross/pull/608 [target.x86_64-unknown-linux-gnu] -image = "michaelsproul/cross-clang:x86_64-latest" +pre-build = ["apt-get install -y cmake clang-3.9"] [target.aarch64-unknown-linux-gnu] -image = "michaelsproul/cross-clang:aarch64-latest" +pre-build = ["apt-get install -y cmake clang-3.9"] From 53b2b500dbb9f00b5d6dd4da065e52d7ba186f9a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 29 Jun 2022 04:50:37 +0000 Subject: [PATCH 044/184] Extend block reward APIs (#3290) ## Proposed Changes Add a new HTTP endpoint `POST /lighthouse/analysis/block_rewards` which takes a vec of `BeaconBlock`s as input and outputs the `BlockReward`s for them. Augment the `BlockReward` struct with the attestation data for attestations in the block, which simplifies access to this information from blockprint. Using attestation data I've been able to make blockprint up to 95% accurate across Prysm/Lighthouse/Teku/Nimbus. I hope to go even higher using a bunch of synthetic blocks produced for Prysm/Nimbus/Lodestar, which are underrepresented in the current training data. --- Cargo.lock | 1 + beacon_node/beacon_chain/src/block_reward.rs | 19 +++- .../beacon_chain/src/block_verification.rs | 2 +- beacon_node/http_api/Cargo.toml | 2 +- beacon_node/http_api/src/block_rewards.rs | 100 +++++++++++++++++- beacon_node/http_api/src/lib.rs | 15 ++- common/eth2/src/lighthouse/block_rewards.rs | 8 +- 7 files changed, 136 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1554ff5564..d461027e77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2521,6 +2521,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "logging", + "lru", "network", "parking_lot 0.12.1", "safe_arith", diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 74a27d5f75..4b8b809d3f 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -2,7 +2,7 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; use operation_pool::{AttMaxCover, MaxCover}; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; -use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256, RelativeEpoch}; +use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; impl BeaconChain { pub fn compute_block_reward>( @@ -10,13 +10,13 @@ impl BeaconChain { block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, state: &BeaconState, + include_attestations: bool, ) -> Result { if block.slot() != state.slot() { return Err(BeaconChainError::BlockRewardSlotError); } - let active_indices = state.get_cached_active_validator_indices(RelativeEpoch::Current)?; - let total_active_balance = state.get_total_balance(active_indices, &self.spec)?; + let total_active_balance = state.get_total_active_balance()?; let mut per_attestation_rewards = block .body() .attestations() @@ -60,11 +60,24 @@ impl BeaconChain { .map(|cover| cover.fresh_validators_rewards) .collect(); + // Add the attestation data if desired. + let attestations = if include_attestations { + block + .body() + .attestations() + .iter() + .map(|a| a.data.clone()) + .collect() + } else { + vec![] + }; + let attestation_rewards = AttestationRewards { total: attestation_total, prev_epoch_total, curr_epoch_total, per_attestation_rewards, + attestations, }; // Sync committee rewards. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index afdbaf13ee..c791a35f68 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1235,7 +1235,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { if let Some(ref event_handler) = chain.event_handler { if event_handler.has_block_reward_subscribers() { let block_reward = - chain.compute_block_reward(block.message(), block_root, &state)?; + chain.compute_block_reward(block.message(), block_root, &state, true)?; event_handler.register(EventKind::BlockReward(block_reward)); } } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index a34618c2ef..9dd2af7d17 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -31,7 +31,7 @@ execution_layer = {path = "../execution_layer"} parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } - +lru = "0.7.7" [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 154773aa95..0555037210 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -1,10 +1,17 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; -use slog::{warn, Logger}; +use lru::LruCache; +use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; -use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; +use types::BeaconBlock; +use warp_utils::reject::{ + beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, +}; +const STATE_CACHE_SIZE: usize = 2; + +/// Fetch block rewards for blocks from the canonical chain. pub fn get_block_rewards( query: BlockRewardsQuery, chain: Arc>, @@ -50,8 +57,12 @@ pub fn get_block_rewards( let block_replayer = BlockReplayer::new(state, &chain.spec) .pre_block_hook(Box::new(|state, block| { // Compute block reward. - let block_reward = - chain.compute_block_reward(block.message(), block.canonical_root(), state)?; + let block_reward = chain.compute_block_reward( + block.message(), + block.canonical_root(), + state, + query.include_attestations, + )?; block_rewards.push(block_reward); Ok(()) })) @@ -78,3 +89,84 @@ pub fn get_block_rewards( Ok(block_rewards) } + +/// Compute block rewards for blocks passed in as input. +pub fn compute_block_rewards( + blocks: Vec>, + chain: Arc>, + log: Logger, +) -> Result, warp::Rejection> { + let mut block_rewards = Vec::with_capacity(blocks.len()); + let mut state_cache = LruCache::new(STATE_CACHE_SIZE); + + for block in blocks { + let parent_root = block.parent_root(); + + // Check LRU cache for a constructed state from a previous iteration. + let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) { + debug!( + log, + "Re-using cached state for block rewards"; + "parent_root" => ?parent_root, + "slot" => block.slot(), + ); + state + } else { + debug!( + log, + "Fetching state for block rewards"; + "parent_root" => ?parent_root, + "slot" => block.slot() + ); + let parent_block = chain + .get_blinded_block(&parent_root) + .map_err(beacon_chain_error)? + .ok_or_else(|| { + custom_bad_request(format!( + "parent block not known or not canonical: {:?}", + parent_root + )) + })?; + + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .map_err(beacon_chain_error)? + .ok_or_else(|| { + custom_bad_request(format!( + "no state known for parent block: {:?}", + parent_root + )) + })?; + + let block_replayer = BlockReplayer::new(parent_state, &chain.spec) + .no_signature_verification() + .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .map_err(beacon_chain_error)?; + + if block_replayer.state_root_miss() { + warn!( + log, + "Block reward state root miss"; + "parent_slot" => parent_block.slot(), + "slot" => block.slot(), + ); + } + + state_cache + .get_or_insert((parent_root, block.slot()), || block_replayer.into_state()) + .ok_or_else(|| { + custom_server_error("LRU cache insert should always succeed".into()) + })? + }; + + // Compute block reward. + let block_reward = chain + .compute_block_reward(block.to_ref(), block.canonical_root(), state, true) + .map_err(beacon_chain_error)?; + block_rewards.push(block_reward); + } + + Ok(block_rewards) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index be08b3f737..379033a113 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2823,6 +2823,18 @@ pub fn serve( blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) }); + // POST lighthouse/analysis/block_rewards + let post_lighthouse_block_rewards = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("block_rewards")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and_then(|blocks, chain, log| { + blocking_json_task(move || block_rewards::compute_block_rewards(blocks, chain, log)) + }); + // GET lighthouse/analysis/attestation_performance/{index} let get_lighthouse_attestation_performance = warp::path("lighthouse") .and(warp::path("analysis")) @@ -2998,7 +3010,8 @@ pub fn serve( .or(post_validator_prepare_beacon_proposer.boxed()) .or(post_lighthouse_liveness.boxed()) .or(post_lighthouse_database_reconstruct.boxed()) - .or(post_lighthouse_database_historical_blocks.boxed()), + .or(post_lighthouse_database_historical_blocks.boxed()) + .or(post_lighthouse_block_rewards.boxed()), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs index 186cbd888c..38070f3539 100644 --- a/common/eth2/src/lighthouse/block_rewards.rs +++ b/common/eth2/src/lighthouse/block_rewards.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use types::{Hash256, Slot}; +use types::{AttestationData, Hash256, Slot}; /// Details about the rewards paid to a block proposer for proposing a block. /// @@ -42,6 +42,9 @@ pub struct AttestationRewards { /// /// Each element of the vec is a map from validator index to reward. pub per_attestation_rewards: Vec>, + /// The attestations themselves (optional). + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub attestations: Vec, } /// Query parameters for the `/lighthouse/block_rewards` endpoint. @@ -51,4 +54,7 @@ pub struct BlockRewardsQuery { pub start_slot: Slot, /// Upper slot limit for block rewards returned (inclusive). pub end_slot: Slot, + /// Include the full attestations themselves? + #[serde(default)] + pub include_attestations: bool, } From 5de00b7ee821dc28f80285acbce083edda4f14a1 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 29 Jun 2022 09:07:09 +0000 Subject: [PATCH 045/184] Unify execution layer endpoints (#3214) ## Issue Addressed Resolves #3069 ## Proposed Changes Unify the `eth1-endpoints` and `execution-endpoints` flags in a backwards compatible way as described in https://github.com/sigp/lighthouse/issues/3069#issuecomment-1134219221 Users have 2 options: 1. Use multiple non auth execution endpoints for deposit processing pre-merge 2. Use a single jwt authenticated execution endpoint for both execution layer and deposit processing post merge Related https://github.com/sigp/lighthouse/issues/3118 To enable jwt authenticated deposit processing, this PR removes the calls to `net_version` as the `net` namespace is not exposed in the auth server in execution clients. Moving away from using `networkId` is a good step in my opinion as it doesn't provide us with any added guarantees over `chainId`. See https://github.com/ethereum/consensus-specs/issues/2163 and https://github.com/sigp/lighthouse/issues/2115 Co-authored-by: Paul Hauner --- Cargo.lock | 11 +- beacon_node/client/Cargo.toml | 2 +- beacon_node/client/src/config.rs | 5 +- beacon_node/eth1/Cargo.toml | 3 +- beacon_node/eth1/src/deposit_cache.rs | 31 +- beacon_node/eth1/src/deposit_log.rs | 107 ---- beacon_node/eth1/src/http.rs | 489 ------------------ beacon_node/eth1/src/lib.rs | 8 +- beacon_node/eth1/src/service.rs | 186 ++++--- beacon_node/eth1/tests/test.rs | 246 ++++----- beacon_node/execution_layer/Cargo.toml | 4 +- beacon_node/execution_layer/src/engine_api.rs | 2 +- .../execution_layer/src/engine_api/auth.rs | 33 ++ .../execution_layer/src/engine_api/http.rs | 484 ++++++++++++++++- beacon_node/execution_layer/src/lib.rs | 15 +- .../genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/genesis/src/lib.rs | 1 + beacon_node/genesis/tests/tests.rs | 9 +- beacon_node/src/cli.rs | 66 +-- beacon_node/src/config.rs | 145 ++++-- lcli/src/eth1_genesis.rs | 5 +- lighthouse/Cargo.toml | 1 + lighthouse/tests/beacon_node.rs | 191 +++++-- scripts/local_testnet/ganache_test_node.sh | 3 +- scripts/local_testnet/setup.sh | 2 +- scripts/local_testnet/vars.env | 2 +- scripts/tests/vars.env | 2 +- testing/eth1_test_rig/src/ganache.rs | 22 +- testing/eth1_test_rig/src/lib.rs | 4 +- testing/simulator/Cargo.toml | 1 + testing/simulator/src/eth1_sim.rs | 23 +- 31 files changed, 1113 insertions(+), 992 deletions(-) delete mode 100644 beacon_node/eth1/src/deposit_log.rs delete mode 100644 beacon_node/eth1/src/http.rs diff --git a/Cargo.lock b/Cargo.lock index d461027e77..3dbe005658 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -744,6 +744,7 @@ dependencies = [ "sensitive_url", "serde", "serde_derive", + "serde_yaml", "slasher", "slasher_service", "slog", @@ -753,7 +754,6 @@ dependencies = [ "time 0.3.9", "timer", "tokio", - "toml", "types", ] @@ -1530,6 +1530,7 @@ dependencies = [ "eth2", "eth2_ssz", "eth2_ssz_derive", + "execution_layer", "fallback", "futures", "hex", @@ -1541,12 +1542,12 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "serde_yaml", "slog", "sloggers", "state_processing", "task_executor", "tokio", - "toml", "tree_hash", "types", "web3", @@ -1877,8 +1878,9 @@ dependencies = [ "async-trait", "bytes", "environment", - "eth1", + "eth2", "eth2_serde_utils", + "eth2_ssz", "eth2_ssz_types", "ethers-core", "exit-future", @@ -1896,6 +1898,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", + "state_processing", "task_executor", "tempfile", "tokio", @@ -3397,6 +3400,7 @@ dependencies = [ "directory", "env_logger 0.9.0", "environment", + "eth1", "eth2_hashing", "eth2_network_config", "futures", @@ -5636,6 +5640,7 @@ dependencies = [ "env_logger 0.9.0", "eth1", "eth1_test_rig", + "execution_layer", "futures", "node_test_rig", "parking_lot 0.12.1", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3079d7744e..d01f2505cc 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dev-dependencies] -toml = "0.5.6" +serde_yaml = "0.8.13" [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 13614af12e..b13ca8f489 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -198,7 +198,8 @@ mod tests { #[test] fn serde() { let config = Config::default(); - let serialized = toml::to_string(&config).expect("should serde encode default config"); - toml::from_str::(&serialized).expect("should serde decode default config"); + let serialized = + serde_yaml::to_string(&config).expect("should serde encode default config"); + serde_yaml::from_str::(&serialized).expect("should serde decode default config"); } } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index ecf3c19e30..403869cc9c 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -6,13 +6,14 @@ edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } -toml = "0.5.6" +serde_yaml = "0.8.13" web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } sloggers = { version = "2.1.1", features = ["json"] } environment = { path = "../../lighthouse/environment" } [dependencies] reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } +execution_layer = { path = "../execution_layer" } futures = "0.3.7" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 7c67893fb3..078e3602f5 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,4 +1,4 @@ -use crate::DepositLog; +use execution_layer::http::deposit_log::DepositLog; use ssz_derive::{Decode, Encode}; use state_processing::common::DepositDataTree; use std::cmp::Ordering; @@ -297,12 +297,37 @@ impl DepositCache { #[cfg(test)] pub mod tests { use super::*; - use crate::deposit_log::tests::EXAMPLE_LOG; - use crate::http::Log; + use execution_layer::http::deposit_log::Log; use types::{EthSpec, MainnetEthSpec}; pub const TREE_DEPTH: usize = 32; + /// The data from a deposit event, using the v0.8.3 version of the deposit contract. + pub const EXAMPLE_LOG: &[u8] = &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, + 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, + 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, + 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, + 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, + 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, + 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, + 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + fn example_log() -> DepositLog { let spec = MainnetEthSpec::default_spec(); diff --git a/beacon_node/eth1/src/deposit_log.rs b/beacon_node/eth1/src/deposit_log.rs deleted file mode 100644 index 1b3cfa01a0..0000000000 --- a/beacon_node/eth1/src/deposit_log.rs +++ /dev/null @@ -1,107 +0,0 @@ -use super::http::Log; -use ssz::Decode; -use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; -use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; - -pub use eth2::lighthouse::DepositLog; - -/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The -/// event bytes are formatted according to the Ethereum ABI. -const PUBKEY_START: usize = 192; -const PUBKEY_LEN: usize = 48; -const CREDS_START: usize = PUBKEY_START + 64 + 32; -const CREDS_LEN: usize = 32; -const AMOUNT_START: usize = CREDS_START + 32 + 32; -const AMOUNT_LEN: usize = 8; -const SIG_START: usize = AMOUNT_START + 32 + 32; -const SIG_LEN: usize = 96; -const INDEX_START: usize = SIG_START + 96 + 32; -const INDEX_LEN: usize = 8; - -impl Log { - /// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`. - pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result { - let bytes = &self.data; - - let pubkey = bytes - .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) - .ok_or("Insufficient bytes for pubkey")?; - let withdrawal_credentials = bytes - .get(CREDS_START..CREDS_START + CREDS_LEN) - .ok_or("Insufficient bytes for withdrawal credential")?; - let amount = bytes - .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) - .ok_or("Insufficient bytes for amount")?; - let signature = bytes - .get(SIG_START..SIG_START + SIG_LEN) - .ok_or("Insufficient bytes for signature")?; - let index = bytes - .get(INDEX_START..INDEX_START + INDEX_LEN) - .ok_or("Insufficient bytes for index")?; - - let deposit_data = DepositData { - pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) - .map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?, - withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials) - .map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?, - amount: u64::from_ssz_bytes(amount) - .map_err(|e| format!("Invalid amount ssz: {:?}", e))?, - signature: SignatureBytes::from_ssz_bytes(signature) - .map_err(|e| format!("Invalid signature ssz: {:?}", e))?, - }; - - let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) - .map_or(false, |(public_key, signature, msg)| { - signature.verify(&public_key, msg) - }); - - Ok(DepositLog { - deposit_data, - block_number: self.block_number, - index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?, - signature_is_valid, - }) - } -} - -#[cfg(test)] -pub mod tests { - use crate::http::Log; - use types::{EthSpec, MainnetEthSpec}; - - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. - pub const EXAMPLE_LOG: &[u8] = &[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, - 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, - 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, - 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, - 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, - 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, - 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, - 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]; - - #[test] - fn can_parse_example_log() { - let log = Log { - block_number: 42, - data: EXAMPLE_LOG.to_vec(), - }; - log.to_deposit_log(&MainnetEthSpec::default_spec()) - .expect("should decode log"); - } -} diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs deleted file mode 100644 index 71b1b5b4b2..0000000000 --- a/beacon_node/eth1/src/http.rs +++ /dev/null @@ -1,489 +0,0 @@ -//! Provides a very minimal set of functions for interfacing with the eth2 deposit contract via an -//! eth1 HTTP JSON-RPC endpoint. -//! -//! All remote functions return a future (i.e., are async). -//! -//! Does not use a web3 library, instead it uses `reqwest` (`hyper`) to call the remote endpoint -//! and `serde` to decode the response. -//! -//! ## Note -//! -//! There is no ABI parsing here, all function signatures and topics are hard-coded as constants. - -use futures::future::TryFutureExt; -use reqwest::{header::CONTENT_TYPE, ClientBuilder, StatusCode}; -use sensitive_url::SensitiveUrl; -use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; -use std::fmt; -use std::ops::Range; -use std::str::FromStr; -use std::time::Duration; -use types::Hash256; - -/// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")` -pub const DEPOSIT_EVENT_TOPIC: &str = - "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; -/// `keccak("get_deposit_root()")[0..4]` -pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f"; -/// `keccak("get_deposit_count()")[0..4]` -pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; - -/// Number of bytes in deposit contract deposit root response. -pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; -/// Number of bytes in deposit contract deposit root (value only). -pub const DEPOSIT_ROOT_BYTES: usize = 32; - -/// This error is returned during a `chainId` call by Geth. -pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; - -/// Represents an eth1 chain/network id. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub enum Eth1Id { - Goerli, - Mainnet, - Custom(u64), -} - -/// Used to identify a block when querying the Eth1 node. -#[derive(Clone, Copy)] -pub enum BlockQuery { - Number(u64), - Latest, -} - -/// Represents an error received from a remote procecdure call. -#[derive(Debug, Serialize, Deserialize)] -pub enum RpcError { - NoResultField, - Eip155Error, - InvalidJson(String), - Error(String), -} - -impl fmt::Display for RpcError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - RpcError::NoResultField => write!(f, "No result field in response"), - RpcError::Eip155Error => write!(f, "Not synced past EIP-155"), - RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e), - RpcError::Error(s) => write!(f, "{}", s), - } - } -} - -impl From for String { - fn from(e: RpcError) -> String { - e.to_string() - } -} - -impl Into for Eth1Id { - fn into(self) -> u64 { - match self { - Eth1Id::Mainnet => 1, - Eth1Id::Goerli => 5, - Eth1Id::Custom(id) => id, - } - } -} - -impl From for Eth1Id { - fn from(id: u64) -> Self { - let into = |x: Eth1Id| -> u64 { x.into() }; - match id { - id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, - id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, - id => Eth1Id::Custom(id), - } - } -} - -impl FromStr for Eth1Id { - type Err = String; - - fn from_str(s: &str) -> Result { - s.parse::() - .map(Into::into) - .map_err(|e| format!("Failed to parse eth1 network id {}", e)) - } -} - -/// Get the eth1 network id of the given endpoint. -pub async fn get_network_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body = send_rpc_request(endpoint, "net_version", json!([]), timeout).await?; - Eth1Id::from_str( - response_result_or_error(&response_body)? - .as_str() - .ok_or("Data was not string")?, - ) -} - -/// Get the eth1 chain id of the given endpoint. -pub async fn get_chain_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body: String = - send_rpc_request(endpoint, "eth_chainId", json!([]), timeout).await?; - - match response_result_or_error(&response_body) { - Ok(chain_id) => { - hex_to_u64_be(chain_id.as_str().ok_or("Data was not string")?).map(|id| id.into()) - } - // Geth returns this error when it's syncing lower blocks. Simply map this into `0` since - // Lighthouse does not raise errors for `0`, it simply waits for it to change. - Err(RpcError::Eip155Error) => Ok(Eth1Id::Custom(0)), - Err(e) => Err(e.to_string()), - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct Block { - pub hash: Hash256, - pub timestamp: u64, - pub number: u64, -} - -/// Returns the current block number. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_block_number(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body = send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout).await?; - hex_to_u64_be( - response_result_or_error(&response_body) - .map_err(|e| format!("eth_blockNumber failed: {}", e))? - .as_str() - .ok_or("Data was not string")?, - ) - .map_err(|e| format!("Failed to get block number: {}", e)) -} - -/// Gets a block hash by block number. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_block( - endpoint: &SensitiveUrl, - query: BlockQuery, - timeout: Duration, -) -> Result { - let query_param = match query { - BlockQuery::Number(block_number) => format!("0x{:x}", block_number), - BlockQuery::Latest => "latest".to_string(), - }; - let params = json!([ - query_param, - false // do not return full tx objects. - ]); - - let response_body = send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout).await?; - let response = response_result_or_error(&response_body) - .map_err(|e| format!("eth_getBlockByNumber failed: {}", e))?; - - let hash: Vec = hex_to_bytes( - response - .get("hash") - .ok_or("No hash for block")? - .as_str() - .ok_or("Block hash was not string")?, - )?; - let hash: Hash256 = if hash.len() == 32 { - Hash256::from_slice(&hash) - } else { - return Err(format!("Block has was not 32 bytes: {:?}", hash)); - }; - - let timestamp = hex_to_u64_be( - response - .get("timestamp") - .ok_or("No timestamp for block")? - .as_str() - .ok_or("Block timestamp was not string")?, - )?; - - let number = hex_to_u64_be( - response - .get("number") - .ok_or("No number for block")? - .as_str() - .ok_or("Block number was not string")?, - )?; - - if number <= usize::max_value() as u64 { - Ok(Block { - hash, - timestamp, - number, - }) - } else { - Err(format!("Block number {} is larger than a usize", number)) - } - .map_err(|e| format!("Failed to get block number: {}", e)) -} - -/// Returns the value of the `get_deposit_count()` call at the given `address` for the given -/// `block_number`. -/// -/// Assumes that the `address` has the same ABI as the eth2 deposit contract. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_count( - endpoint: &SensitiveUrl, - address: &str, - block_number: u64, - timeout: Duration, -) -> Result, String> { - let result = call( - endpoint, - address, - DEPOSIT_COUNT_FN_SIGNATURE, - block_number, - timeout, - ) - .await?; - match result { - None => Err("Deposit root response was none".to_string()), - Some(bytes) => { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { - let mut array = [0; 8]; - array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); - Ok(Some(u64::from_le_bytes(array))) - } else { - Err(format!( - "Deposit count response was not {} bytes: {:?}", - DEPOSIT_COUNT_RESPONSE_BYTES, bytes - )) - } - } - } -} - -/// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. -/// -/// Assumes that the `address` has the same ABI as the eth2 deposit contract. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_root( - endpoint: &SensitiveUrl, - address: &str, - block_number: u64, - timeout: Duration, -) -> Result, String> { - let result = call( - endpoint, - address, - DEPOSIT_ROOT_FN_SIGNATURE, - block_number, - timeout, - ) - .await?; - match result { - None => Err("Deposit root response was none".to_string()), - Some(bytes) => { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_ROOT_BYTES { - Ok(Some(Hash256::from_slice(&bytes))) - } else { - Err(format!( - "Deposit root response was not {} bytes: {:?}", - DEPOSIT_ROOT_BYTES, bytes - )) - } - } - } -} - -/// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed -/// `hex_data`. -/// -/// Returns bytes, if any. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -async fn call( - endpoint: &SensitiveUrl, - address: &str, - hex_data: &str, - block_number: u64, - timeout: Duration, -) -> Result>, String> { - let params = json! ([ - { - "to": address, - "data": hex_data, - }, - format!("0x{:x}", block_number) - ]); - - let response_body = send_rpc_request(endpoint, "eth_call", params, timeout).await?; - - match response_result_or_error(&response_body) { - Ok(result) => { - let hex = result - .as_str() - .map(|s| s.to_string()) - .ok_or("'result' value was not a string")?; - - Ok(Some(hex_to_bytes(&hex)?)) - } - // It's valid for `eth_call` to return without a result. - Err(RpcError::NoResultField) => Ok(None), - Err(e) => Err(format!("eth_call failed: {}", e)), - } -} - -/// A reduced set of fields from an Eth1 contract log. -#[derive(Debug, PartialEq, Clone)] -pub struct Log { - pub(crate) block_number: u64, - pub(crate) data: Vec, -} - -/// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given -/// `block_height_range`. -/// -/// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_logs_in_range( - endpoint: &SensitiveUrl, - address: &str, - block_height_range: Range, - timeout: Duration, -) -> Result, String> { - let params = json! ([{ - "address": address, - "topics": [DEPOSIT_EVENT_TOPIC], - "fromBlock": format!("0x{:x}", block_height_range.start), - "toBlock": format!("0x{:x}", block_height_range.end), - }]); - - let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?; - response_result_or_error(&response_body) - .map_err(|e| format!("eth_getLogs failed: {}", e))? - .as_array() - .cloned() - .ok_or("'result' value was not an array")? - .into_iter() - .map(|value| { - let block_number = value - .get("blockNumber") - .ok_or("No block number field in log")? - .as_str() - .ok_or("Block number was not string")?; - - let data = value - .get("data") - .ok_or("No block number field in log")? - .as_str() - .ok_or("Data was not string")?; - - Ok(Log { - block_number: hex_to_u64_be(block_number)?, - data: hex_to_bytes(data)?, - }) - }) - .collect::, String>>() - .map_err(|e| format!("Failed to get logs in range: {}", e)) -} - -/// Sends an RPC request to `endpoint`, using a POST with the given `body`. -/// -/// Tries to receive the response and parse the body as a `String`. -pub async fn send_rpc_request( - endpoint: &SensitiveUrl, - method: &str, - params: Value, - timeout: Duration, -) -> Result { - let body = json! ({ - "jsonrpc": "2.0", - "method": method, - "params": params, - "id": 1 - }) - .to_string(); - - // Note: it is not ideal to create a new client for each request. - // - // A better solution would be to create some struct that contains a built client and pass it - // around (similar to the `web3` crate's `Transport` structs). - let response = ClientBuilder::new() - .timeout(timeout) - .build() - .expect("The builder should always build a client") - .post(endpoint.full.clone()) - .header(CONTENT_TYPE, "application/json") - .body(body) - .send() - .map_err(|e| format!("Request failed: {:?}", e)) - .await?; - if response.status() != StatusCode::OK { - return Err(format!( - "Response HTTP status was not 200 OK: {}.", - response.status() - )); - }; - let encoding = response - .headers() - .get(CONTENT_TYPE) - .ok_or("No content-type header in response")? - .to_str() - .map(|s| s.to_string()) - .map_err(|e| format!("Failed to parse content-type header: {}", e))?; - - response - .bytes() - .map_err(|e| format!("Failed to receive body: {:?}", e)) - .await - .and_then(move |bytes| match encoding.as_str() { - "application/json" => Ok(bytes), - "application/json; charset=utf-8" => Ok(bytes), - other => Err(format!("Unsupported encoding: {}", other)), - }) - .map(|bytes| String::from_utf8_lossy(&bytes).into_owned()) - .map_err(|e| format!("Failed to receive body: {:?}", e)) -} - -/// Accepts an entire HTTP body (as a string) and returns either the `result` field or the `error['message']` field, as a serde `Value`. -fn response_result_or_error(response: &str) -> Result { - let json = serde_json::from_str::(response) - .map_err(|e| RpcError::InvalidJson(e.to_string()))?; - - if let Some(error) = json.get("error").and_then(|e| e.get("message")) { - let error = error.to_string(); - if error.contains(EIP155_ERROR_STR) { - Err(RpcError::Eip155Error) - } else { - Err(RpcError::Error(error)) - } - } else { - json.get("result").cloned().ok_or(RpcError::NoResultField) - } -} - -/// Parses a `0x`-prefixed, **big-endian** hex string as a u64. -/// -/// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian. -/// Therefore, this function is only useful for numbers encoded by the JSON RPC. -/// -/// E.g., `0x01 == 1` -fn hex_to_u64_be(hex: &str) -> Result { - u64::from_str_radix(strip_prefix(hex)?, 16) - .map_err(|e| format!("Failed to parse hex as u64: {:?}", e)) -} - -/// Parses a `0x`-prefixed, big-endian hex string as bytes. -/// -/// E.g., `0x0102 == vec![1, 2]` -fn hex_to_bytes(hex: &str) -> Result, String> { - hex::decode(strip_prefix(hex)?).map_err(|e| format!("Failed to parse hex as bytes: {:?}", e)) -} - -/// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. -fn strip_prefix(hex: &str) -> Result<&str, String> { - if let Some(stripped) = hex.strip_prefix("0x") { - Ok(stripped) - } else { - Err("Hex string did not start with `0x`".to_string()) - } -} diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index cf724201a4..f99d085250 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -3,17 +3,15 @@ extern crate lazy_static; mod block_cache; mod deposit_cache; -mod deposit_log; -pub mod http; mod inner; mod metrics; mod service; pub use block_cache::{BlockCache, Eth1Block}; pub use deposit_cache::DepositCache; -pub use deposit_log::DepositLog; +pub use execution_layer::http::deposit_log::DepositLog; pub use inner::SszEth1Cache; pub use service::{ - BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_CHAIN_ID, - DEFAULT_NETWORK_ID, + BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, + DEFAULT_CHAIN_ID, }; diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 15e2123e8a..36a637d2ae 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -2,12 +2,13 @@ use crate::metrics; use crate::{ block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, deposit_cache::{DepositCacheInsertOutcome, Error as DepositCacheError}, - http::{ - get_block, get_block_number, get_chain_id, get_deposit_logs_in_range, get_network_id, - BlockQuery, Eth1Id, - }, inner::{DepositUpdater, Inner}, }; +use execution_layer::auth::Auth; +use execution_layer::http::{ + deposit_methods::{BlockQuery, Eth1Id}, + HttpJsonRpc, +}; use fallback::{Fallback, FallbackError}; use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; @@ -17,14 +18,13 @@ use slog::{crit, debug, error, info, trace, warn, Logger}; use std::fmt::Debug; use std::future::Future; use std::ops::{Range, RangeInclusive}; +use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::RwLock as TRwLock; use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, EthSpec, Unsigned}; -/// Indicates the default eth1 network id we use for the deposit contract. -pub const DEFAULT_NETWORK_ID: Eth1Id = Eth1Id::Goerli; /// Indicates the default eth1 chain id we use for the deposit contract. pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; /// Indicates the default eth1 endpoint. @@ -63,14 +63,14 @@ pub enum EndpointError { type EndpointState = Result<(), EndpointError>; pub struct EndpointWithState { - endpoint: SensitiveUrl, + client: HttpJsonRpc, state: TRwLock>, } impl EndpointWithState { - pub fn new(endpoint: SensitiveUrl) -> Self { + pub fn new(client: HttpJsonRpc) -> Self { Self { - endpoint, + client, state: TRwLock::new(None), } } @@ -89,7 +89,6 @@ async fn get_state(endpoint: &EndpointWithState) -> Option { /// is not usable. pub struct EndpointsCache { pub fallback: Fallback, - pub config_network_id: Eth1Id, pub config_chain_id: Eth1Id, pub log: Logger, } @@ -107,20 +106,14 @@ impl EndpointsCache { } crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_REQUESTS, - &[&endpoint.endpoint.to_string()], + &[&endpoint.client.to_string()], ); - let state = endpoint_state( - &endpoint.endpoint, - &self.config_network_id, - &self.config_chain_id, - &self.log, - ) - .await; + let state = endpoint_state(&endpoint.client, &self.config_chain_id, &self.log).await; *value = Some(state.clone()); if state.is_err() { crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_ERRORS, - &[&endpoint.endpoint.to_string()], + &[&endpoint.client.to_string()], ); crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); } else { @@ -136,7 +129,7 @@ impl EndpointsCache { func: F, ) -> Result<(O, usize), FallbackError> where - F: Fn(&'a SensitiveUrl) -> R, + F: Fn(&'a HttpJsonRpc) -> R, R: Future>, { let func = &func; @@ -144,12 +137,12 @@ impl EndpointsCache { .first_success(|endpoint| async move { match self.state(endpoint).await { Ok(()) => { - let endpoint_str = &endpoint.endpoint.to_string(); + let endpoint_str = &endpoint.client.to_string(); crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_REQUESTS, &[endpoint_str], ); - match func(&endpoint.endpoint).await { + match func(&endpoint.client).await { Ok(t) => Ok(t), Err(t) => { crate::metrics::inc_counter_vec( @@ -186,8 +179,7 @@ impl EndpointsCache { /// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and /// chain id. Otherwise it returns `Err`. async fn endpoint_state( - endpoint: &SensitiveUrl, - config_network_id: &Eth1Id, + endpoint: &HttpJsonRpc, config_chain_id: &Eth1Id, log: &Logger, ) -> EndpointState { @@ -200,21 +192,9 @@ async fn endpoint_state( ); EndpointError::RequestFailed(e) }; - let network_id = get_network_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) - .await - .map_err(error_connecting)?; - if &network_id != config_network_id { - warn!( - log, - "Invalid eth1 network id on endpoint. Please switch to correct network id"; - "endpoint" => %endpoint, - "action" => "trying fallbacks", - "expected" => format!("{:?}",config_network_id), - "received" => format!("{:?}",network_id), - ); - return Err(EndpointError::WrongNetworkId); - } - let chain_id = get_chain_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) + + let chain_id = endpoint + .get_chain_id(Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) .await .map_err(error_connecting)?; // Eth1 nodes return chain_id = 0 if the node is not synced @@ -253,7 +233,7 @@ pub enum HeadType { /// Returns the head block and the new block ranges relevant for deposits and the block cache /// from the given endpoint. async fn get_remote_head_and_new_block_ranges( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, service: &Service, node_far_behind_seconds: u64, ) -> Result< @@ -315,14 +295,14 @@ async fn get_remote_head_and_new_block_ranges( /// Returns the range of new block numbers to be considered for the given head type from the given /// endpoint. async fn relevant_new_block_numbers_from_endpoint( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, service: &Service, head_type: HeadType, ) -> Result>, SingleEndpointError> { - let remote_highest_block = - get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(SingleEndpointError::GetBlockNumberFailed) - .await?; + let remote_highest_block = endpoint + .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) + .map_err(SingleEndpointError::GetBlockNumberFailed) + .await?; service.relevant_new_block_numbers(remote_highest_block, None, head_type) } @@ -379,14 +359,41 @@ pub struct DepositCacheUpdateOutcome { pub logs_imported: usize, } +/// Supports either one authenticated jwt JSON-RPC endpoint **or** +/// multiple non-authenticated endpoints with fallback. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum Eth1Endpoint { + Auth { + endpoint: SensitiveUrl, + jwt_path: PathBuf, + jwt_id: Option, + jwt_version: Option, + }, + NoAuth(Vec), +} + +impl Eth1Endpoint { + fn len(&self) -> usize { + match &self { + Self::Auth { .. } => 1, + Self::NoAuth(urls) => urls.len(), + } + } + + pub fn get_endpoints(&self) -> Vec { + match &self { + Self::Auth { endpoint, .. } => vec![endpoint.clone()], + Self::NoAuth(endpoints) => endpoints.clone(), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. - pub endpoints: Vec, + pub endpoints: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, - /// The eth1 network id where the deposit contract is deployed (Goerli/Mainnet). - pub network_id: Eth1Id, /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). pub chain_id: Eth1Id, /// Defines the first block that the `DepositCache` will start searching for deposit logs. @@ -461,10 +468,9 @@ impl Config { impl Default for Config { fn default() -> Self { Self { - endpoints: vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) - .expect("The default Eth1 endpoint must always be a valid URL.")], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) + .expect("The default Eth1 endpoint must always be a valid URL.")]), deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), - network_id: DEFAULT_NETWORK_ID, chain_id: DEFAULT_CHAIN_ID, deposit_contract_deploy_block: 1, lowest_cached_block_number: 1, @@ -673,27 +679,45 @@ impl Service { } /// Builds a new `EndpointsCache` with empty states. - pub fn init_endpoints(&self) -> Arc { + pub fn init_endpoints(&self) -> Result, String> { let endpoints = self.config().endpoints.clone(); - let config_network_id = self.config().network_id.clone(); let config_chain_id = self.config().chain_id.clone(); + + let servers = match endpoints { + Eth1Endpoint::Auth { + jwt_path, + endpoint, + jwt_id, + jwt_version, + } => { + let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) + .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; + vec![HttpJsonRpc::new_with_auth(endpoint, auth) + .map_err(|e| format!("Failed to build auth enabled json rpc {:?}", e))?] + } + Eth1Endpoint::NoAuth(urls) => urls + .into_iter() + .map(|url| { + HttpJsonRpc::new(url).map_err(|e| format!("Failed to build json rpc {:?}", e)) + }) + .collect::>()?, + }; let new_cache = Arc::new(EndpointsCache { - fallback: Fallback::new(endpoints.into_iter().map(EndpointWithState::new).collect()), - config_network_id, + fallback: Fallback::new(servers.into_iter().map(EndpointWithState::new).collect()), config_chain_id, log: self.log.clone(), }); let mut endpoints_cache = self.inner.endpoints_cache.write(); *endpoints_cache = Some(new_cache.clone()); - new_cache + Ok(new_cache) } /// Returns the cached `EndpointsCache` if it exists or builds a new one. - pub fn get_endpoints(&self) -> Arc { + pub fn get_endpoints(&self) -> Result, String> { let endpoints_cache = self.inner.endpoints_cache.read(); if let Some(cache) = endpoints_cache.clone() { - cache + Ok(cache) } else { drop(endpoints_cache); self.init_endpoints() @@ -711,7 +735,7 @@ impl Service { pub async fn update( &self, ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { - let endpoints = self.get_endpoints(); + let endpoints = self.get_endpoints()?; // Reset the state of any endpoints which have errored so their state can be redetermined. endpoints.reset_errorred_endpoints().await; @@ -738,7 +762,7 @@ impl Service { } } } - endpoints.fallback.map_format_error(|s| &s.endpoint, e) + endpoints.fallback.map_format_error(|s| &s.client, e) }; let process_err = |e: Error| match &e { @@ -988,15 +1012,15 @@ impl Service { */ let block_range_ref = &block_range; let logs = endpoints - .first_success(|e| async move { - get_deposit_logs_in_range( - e, - deposit_contract_address_ref, - block_range_ref.clone(), - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .await - .map_err(SingleEndpointError::GetDepositLogsFailed) + .first_success(|endpoint| async move { + endpoint + .get_deposit_logs_in_range( + deposit_contract_address_ref, + block_range_ref.clone(), + Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), + ) + .await + .map_err(SingleEndpointError::GetDepositLogsFailed) }) .await .map(|(res, _)| res) @@ -1305,7 +1329,7 @@ fn relevant_block_range( /// /// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. async fn download_eth1_block( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, cache: Arc, block_number_opt: Option, ) -> Result { @@ -1326,15 +1350,15 @@ async fn download_eth1_block( }); // Performs a `get_blockByNumber` call to an eth1 node. - let http_block = get_block( - endpoint, - block_number_opt - .map(BlockQuery::Number) - .unwrap_or_else(|| BlockQuery::Latest), - Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), - ) - .map_err(SingleEndpointError::BlockDownloadFailed) - .await?; + let http_block = endpoint + .get_block( + block_number_opt + .map(BlockQuery::Number) + .unwrap_or_else(|| BlockQuery::Latest), + Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), + ) + .map_err(SingleEndpointError::BlockDownloadFailed) + .await?; Ok(Eth1Block { hash: http_block.hash, @@ -1359,8 +1383,8 @@ mod tests { #[test] fn serde_serialize() { let serialized = - toml::to_string(&Config::default()).expect("Should serde encode default config"); - toml::from_str::(&serialized).expect("Should serde decode default config"); + serde_yaml::to_string(&Config::default()).expect("Should serde encode default config"); + serde_yaml::from_str::(&serialized).expect("Should serde decode default config"); } #[test] diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 3fe3b3ca52..f7f3b6e703 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -1,9 +1,9 @@ #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; -use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root, Block, Log}; -use eth1::{Config, Service}; -use eth1::{DepositCache, DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Config, Eth1Endpoint, Service}; +use eth1::{DepositCache, DEFAULT_CHAIN_ID}; use eth1_test_rig::GanacheEth1Instance; +use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; use slog::Logger; @@ -51,39 +51,39 @@ fn random_deposit_data() -> DepositData { } /// Blocking operation to get the deposit logs from the `deposit_contract`. -async fn blocking_deposit_logs(eth1: &GanacheEth1Instance, range: Range) -> Vec { - get_deposit_logs_in_range( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - range, - timeout(), - ) - .await - .expect("should get logs") +async fn blocking_deposit_logs( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + range: Range, +) -> Vec { + client + .get_deposit_logs_in_range(ð1.deposit_contract.address(), range, timeout()) + .await + .expect("should get logs") } /// Blocking operation to get the deposit root from the `deposit_contract`. -async fn blocking_deposit_root(eth1: &GanacheEth1Instance, block_number: u64) -> Option { - get_deposit_root( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - block_number, - timeout(), - ) - .await - .expect("should get deposit root") +async fn blocking_deposit_root( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + client + .get_deposit_root(ð1.deposit_contract.address(), block_number, timeout()) + .await + .expect("should get deposit root") } /// Blocking operation to get the deposit count from the `deposit_contract`. -async fn blocking_deposit_count(eth1: &GanacheEth1Instance, block_number: u64) -> Option { - get_deposit_count( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - block_number, - timeout(), - ) - .await - .expect("should get deposit count") +async fn blocking_deposit_count( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + client + .get_deposit_count(ð1.deposit_contract.address(), block_number, timeout()) + .await + .expect("should get deposit count") } async fn get_block_number(web3: &Web3) -> u64 { @@ -95,7 +95,7 @@ async fn get_block_number(web3: &Web3) -> u64 { } async fn new_ganache_instance() -> Result { - GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await + GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await } mod eth1_cache { @@ -117,7 +117,10 @@ mod eth1_cache { let initial_block_number = get_block_number(&web3).await; let config = Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance, @@ -146,7 +149,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -198,7 +201,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -215,7 +221,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -252,7 +258,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -267,7 +276,7 @@ mod eth1_cache { for _ in 0..cache_len / 2 { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -302,7 +311,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -316,7 +328,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); futures::try_join!( service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints) @@ -354,7 +366,10 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, follow_distance: 0, @@ -374,7 +389,7 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -434,7 +449,10 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, lowest_cached_block_number: start_block, @@ -454,7 +472,7 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); futures::try_join!( service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints) @@ -484,6 +502,8 @@ mod deposit_tree { let mut deposit_roots = vec![]; let mut deposit_counts = vec![]; + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { deposit_contract @@ -492,12 +512,12 @@ mod deposit_tree { .expect("should perform a deposit"); let block_number = get_block_number(&web3).await; deposit_roots.push( - blocking_deposit_root(ð1, block_number) + blocking_deposit_root(&client, ð1, block_number) .await .expect("should get root if contract exists"), ); deposit_counts.push( - blocking_deposit_count(ð1, block_number) + blocking_deposit_count(&client, ð1, block_number) .await .expect("should get count if contract exists"), ); @@ -507,7 +527,7 @@ mod deposit_tree { // Pull all the deposit logs from the contract. let block_number = get_block_number(&web3).await; - let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number) + let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number) .await .iter() .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) @@ -570,16 +590,12 @@ mod deposit_tree { /// Tests for the base HTTP requests and response handlers. mod http { use super::*; - use eth1::http::BlockQuery; - async fn get_block(eth1: &GanacheEth1Instance, block_number: u64) -> Block { - eth1::http::get_block( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - BlockQuery::Number(block_number), - timeout(), - ) - .await - .expect("should get block number") + async fn get_block(client: &HttpJsonRpc, block_number: u64) -> Block { + client + .get_block(BlockQuery::Number(block_number), timeout()) + .await + .expect("should get block number") } #[tokio::test] @@ -590,17 +606,18 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let block_number = get_block_number(&web3).await; - let logs = blocking_deposit_logs(ð1, 0..block_number).await; + let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), 0); - let mut old_root = blocking_deposit_root(ð1, block_number).await; - let mut old_block = get_block(ð1, block_number).await; + let mut old_root = blocking_deposit_root(&client, ð1, block_number).await; + let mut old_block = get_block(&client, block_number).await; let mut old_block_number = block_number; assert_eq!( - blocking_deposit_count(ð1, block_number).await, + blocking_deposit_count(&client, ð1, block_number).await, Some(0), "should have deposit count zero" ); @@ -618,18 +635,18 @@ mod http { // Check the logs. let block_number = get_block_number(&web3).await; - let logs = blocking_deposit_logs(ð1, 0..block_number).await; + let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), i, "the number of logs should be as expected"); // Check the deposit count. assert_eq!( - blocking_deposit_count(ð1, block_number).await, + blocking_deposit_count(&client, ð1, block_number).await, Some(i as u64), "should have a correct deposit count" ); // Check the deposit root. - let new_root = blocking_deposit_root(ð1, block_number).await; + let new_root = blocking_deposit_root(&client, ð1, block_number).await; assert_ne!( new_root, old_root, "deposit root should change with each deposit" @@ -637,7 +654,7 @@ mod http { old_root = new_root; // Check the block hash. - let new_block = get_block(ð1, block_number).await; + let new_block = get_block(&client, block_number).await; assert_ne!( new_block.hash, old_block.hash, "block hash should change with each deposit" @@ -689,7 +706,10 @@ mod fast { let now = get_block_number(&web3).await; let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -700,6 +720,7 @@ mod fast { log, MainnetEthSpec::default_spec(), ); + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -711,7 +732,7 @@ mod fast { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -723,8 +744,9 @@ mod fast { ); for block_num in 0..=get_block_number(&web3).await { - let expected_deposit_count = blocking_deposit_count(ð1, block_num).await; - let expected_deposit_root = blocking_deposit_root(ð1, block_num).await; + let expected_deposit_count = + blocking_deposit_count(&client, ð1, block_num).await; + let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await; let deposit_count = service .deposits() @@ -765,7 +787,10 @@ mod persist { let now = get_block_number(&web3).await; let config = Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -783,7 +808,7 @@ mod persist { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -874,10 +899,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, @@ -909,82 +934,13 @@ mod fallbacks { .await; } - #[tokio::test] - async fn test_fallback_when_wrong_network_id() { - async { - let log = null_logger(); - let correct_network_id: u64 = DEFAULT_NETWORK_ID.into(); - let wrong_network_id = correct_network_id + 1; - let endpoint1 = GanacheEth1Instance::new(wrong_network_id, DEFAULT_CHAIN_ID.into()) - .await - .expect("should start eth1 environment"); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - //additional blocks for endpoint1 to be able to distinguish - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let service = Service::new( - Config { - endpoints: vec![ - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ], - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3()).await; - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - assert!(endpoint2_block_number < endpoint1_block_number); - //the call will fallback to endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } - #[tokio::test] async fn test_fallback_when_wrong_chain_id() { async { let log = null_logger(); let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into(); let wrong_chain_id = correct_chain_id + 1; - let endpoint1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), wrong_chain_id) + let endpoint1 = GanacheEth1Instance::new(wrong_chain_id) .await .expect("should start eth1 environment"); let endpoint2 = new_ganache_instance() @@ -1021,10 +977,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, @@ -1076,10 +1032,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0351b5e433..dbd6324680 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -16,14 +16,16 @@ reqwest = { version = "0.11.0", features = ["json","stream"] } eth2_serde_utils = "0.1.1" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } -eth1 = { path = "../eth1" } warp = { version = "0.3.2", features = ["tls"] } jsonwebtoken = "8" environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" +eth2_ssz = "0.4.1" eth2_ssz_types = "0.2.2" +eth2 = { path = "../../common/eth2" } +state_processing = { path = "../../consensus/state_processing" } lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 9eb98cecb9..5f3edb78bf 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,7 +1,7 @@ use crate::engines::ForkChoiceState; use async_trait::async_trait; -use eth1::http::RpcError; pub use ethers_core::types::Transaction; +use http::deposit_methods::RpcError; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index a4050a25c0..560e43585b 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use jsonwebtoken::{encode, get_current_timestamp, Algorithm, EncodingKey, Header}; use rand::Rng; use serde::{Deserialize, Serialize}; @@ -13,6 +15,7 @@ pub const JWT_SECRET_LENGTH: usize = 32; pub enum Error { JWT(jsonwebtoken::errors::Error), InvalidToken, + InvalidKey(String), } impl From for Error { @@ -57,6 +60,14 @@ impl JwtKey { } } +pub fn strip_prefix(s: &str) -> &str { + if let Some(stripped) = s.strip_prefix("0x") { + stripped + } else { + s + } +} + /// Contains the JWT secret and claims parameters. pub struct Auth { key: EncodingKey, @@ -73,6 +84,28 @@ impl Auth { } } + /// Create a new `Auth` struct given the path to the file containing the hex + /// encoded jwt key. + pub fn new_with_path( + jwt_path: PathBuf, + id: Option, + clv: Option, + ) -> Result { + std::fs::read_to_string(&jwt_path) + .map_err(|e| { + Error::InvalidKey(format!( + "Failed to read JWT secret file {:?}, error: {:?}", + jwt_path, e + )) + }) + .and_then(|ref s| { + let secret_bytes = hex::decode(strip_prefix(s.trim_end())) + .map_err(|e| Error::InvalidKey(format!("Invalid hex string: {:?}", e)))?; + let secret = JwtKey::from_slice(&secret_bytes).map_err(Error::InvalidKey)?; + Ok(Self::new(secret, id, clv)) + }) + } + /// Generate a JWT token with `claims.iat` set to current time. pub fn generate_token(&self) -> Result { let claims = self.generate_claims_at_timestamp(); diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 179045ccf8..157f9a3054 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,15 +3,16 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; -use eth1::http::EIP155_ERROR_STR; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; use std::marker::PhantomData; + use std::time::Duration; use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock}; +pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; const STATIC_ID: u32 = 1; @@ -48,6 +49,480 @@ pub const BUILDER_GET_PAYLOAD_HEADER_TIMEOUT: Duration = Duration::from_secs(2); pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1"; pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2); +/// This error is returned during a `chainId` call by Geth. +pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; + +/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object. +pub mod deposit_log { + use ssz::Decode; + use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; + use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; + + pub use eth2::lighthouse::DepositLog; + + /// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The + /// event bytes are formatted according to the Ethereum ABI. + const PUBKEY_START: usize = 192; + const PUBKEY_LEN: usize = 48; + const CREDS_START: usize = PUBKEY_START + 64 + 32; + const CREDS_LEN: usize = 32; + const AMOUNT_START: usize = CREDS_START + 32 + 32; + const AMOUNT_LEN: usize = 8; + const SIG_START: usize = AMOUNT_START + 32 + 32; + const SIG_LEN: usize = 96; + const INDEX_START: usize = SIG_START + 96 + 32; + const INDEX_LEN: usize = 8; + + /// A reduced set of fields from an Eth1 contract log. + #[derive(Debug, PartialEq, Clone)] + pub struct Log { + pub block_number: u64, + pub data: Vec, + } + + impl Log { + /// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`. + pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result { + let bytes = &self.data; + + let pubkey = bytes + .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) + .ok_or("Insufficient bytes for pubkey")?; + let withdrawal_credentials = bytes + .get(CREDS_START..CREDS_START + CREDS_LEN) + .ok_or("Insufficient bytes for withdrawal credential")?; + let amount = bytes + .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) + .ok_or("Insufficient bytes for amount")?; + let signature = bytes + .get(SIG_START..SIG_START + SIG_LEN) + .ok_or("Insufficient bytes for signature")?; + let index = bytes + .get(INDEX_START..INDEX_START + INDEX_LEN) + .ok_or("Insufficient bytes for index")?; + + let deposit_data = DepositData { + pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) + .map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?, + withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials) + .map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?, + amount: u64::from_ssz_bytes(amount) + .map_err(|e| format!("Invalid amount ssz: {:?}", e))?, + signature: SignatureBytes::from_ssz_bytes(signature) + .map_err(|e| format!("Invalid signature ssz: {:?}", e))?, + }; + + let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) + .map_or(false, |(public_key, signature, msg)| { + signature.verify(&public_key, msg) + }); + + Ok(DepositLog { + deposit_data, + block_number: self.block_number, + index: u64::from_ssz_bytes(index) + .map_err(|e| format!("Invalid index ssz: {:?}", e))?, + signature_is_valid, + }) + } + } + + #[cfg(test)] + pub mod tests { + use super::*; + use types::{EthSpec, MainnetEthSpec}; + + /// The data from a deposit event, using the v0.8.3 version of the deposit contract. + pub const EXAMPLE_LOG: &[u8] = &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, + 3, 51, 6, 4, 158, 232, 82, 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, + 64, 213, 43, 52, 175, 154, 239, 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, + 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, 30, 63, 215, 238, 113, 60, + 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, 119, 88, 51, 80, 101, + 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, 187, 22, 95, 4, 211, + 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, 149, 250, 251, + 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, 18, 113, + 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + + #[test] + fn can_parse_example_log() { + let log = Log { + block_number: 42, + data: EXAMPLE_LOG.to_vec(), + }; + log.to_deposit_log(&MainnetEthSpec::default_spec()) + .expect("should decode log"); + } + } +} + +/// Contains subset of the HTTP JSON-RPC methods used to query an execution node for +/// state of the deposit contract. +pub mod deposit_methods { + use super::Log; + use crate::{EngineApi, HttpJsonRpc}; + use serde::{Deserialize, Serialize}; + use serde_json::{json, Value}; + use std::fmt; + use std::ops::Range; + use std::str::FromStr; + use std::time::Duration; + use types::Hash256; + + /// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")` + pub const DEPOSIT_EVENT_TOPIC: &str = + "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; + /// `keccak("get_deposit_root()")[0..4]` + pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f"; + /// `keccak("get_deposit_count()")[0..4]` + pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; + + /// Number of bytes in deposit contract deposit root response. + pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; + /// Number of bytes in deposit contract deposit root (value only). + pub const DEPOSIT_ROOT_BYTES: usize = 32; + + /// Represents an eth1 chain/network id. + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] + pub enum Eth1Id { + Goerli, + Mainnet, + Custom(u64), + } + + #[derive(Debug, PartialEq, Clone)] + pub struct Block { + pub hash: Hash256, + pub timestamp: u64, + pub number: u64, + } + + /// Used to identify a block when querying the Eth1 node. + #[derive(Clone, Copy)] + pub enum BlockQuery { + Number(u64), + Latest, + } + + impl Into for Eth1Id { + fn into(self) -> u64 { + match self { + Eth1Id::Mainnet => 1, + Eth1Id::Goerli => 5, + Eth1Id::Custom(id) => id, + } + } + } + + impl From for Eth1Id { + fn from(id: u64) -> Self { + let into = |x: Eth1Id| -> u64 { x.into() }; + match id { + id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, + id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, + id => Eth1Id::Custom(id), + } + } + } + + impl FromStr for Eth1Id { + type Err = String; + + fn from_str(s: &str) -> Result { + s.parse::() + .map(Into::into) + .map_err(|e| format!("Failed to parse eth1 network id {}", e)) + } + } + + /// Represents an error received from a remote procecdure call. + #[derive(Debug, Serialize, Deserialize)] + pub enum RpcError { + NoResultField, + Eip155Error, + InvalidJson(String), + Error(String), + } + + impl fmt::Display for RpcError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RpcError::NoResultField => write!(f, "No result field in response"), + RpcError::Eip155Error => write!(f, "Not synced past EIP-155"), + RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e), + RpcError::Error(s) => write!(f, "{}", s), + } + } + } + + impl From for String { + fn from(e: RpcError) -> String { + e.to_string() + } + } + + /// Parses a `0x`-prefixed, **big-endian** hex string as a u64. + /// + /// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian. + /// Therefore, this function is only useful for numbers encoded by the JSON RPC. + /// + /// E.g., `0x01 == 1` + fn hex_to_u64_be(hex: &str) -> Result { + u64::from_str_radix(strip_prefix(hex)?, 16) + .map_err(|e| format!("Failed to parse hex as u64: {:?}", e)) + } + + /// Parses a `0x`-prefixed, big-endian hex string as bytes. + /// + /// E.g., `0x0102 == vec![1, 2]` + fn hex_to_bytes(hex: &str) -> Result, String> { + hex::decode(strip_prefix(hex)?) + .map_err(|e| format!("Failed to parse hex as bytes: {:?}", e)) + } + + /// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. + fn strip_prefix(hex: &str) -> Result<&str, String> { + if let Some(stripped) = hex.strip_prefix("0x") { + Ok(stripped) + } else { + Err("Hex string did not start with `0x`".to_string()) + } + } + + impl HttpJsonRpc { + /// Get the eth1 chain id of the given endpoint. + pub async fn get_chain_id(&self, timeout: Duration) -> Result { + let chain_id: String = self + .rpc_request("eth_chainId", json!([]), timeout) + .await + .map_err(|e| format!("eth_chainId call failed {:?}", e))?; + hex_to_u64_be(chain_id.as_str()).map(|id| id.into()) + } + + /// Returns the current block number. + pub async fn get_block_number(&self, timeout: Duration) -> Result { + let response: String = self + .rpc_request("eth_blockNumber", json!([]), timeout) + .await + .map_err(|e| format!("eth_blockNumber call failed {:?}", e))?; + hex_to_u64_be(response.as_str()) + .map_err(|e| format!("Failed to get block number: {}", e)) + } + + /// Gets a block hash by block number. + pub async fn get_block( + &self, + query: BlockQuery, + timeout: Duration, + ) -> Result { + let query_param = match query { + BlockQuery::Number(block_number) => format!("0x{:x}", block_number), + BlockQuery::Latest => "latest".to_string(), + }; + let params = json!([ + query_param, + false // do not return full tx objects. + ]); + + let response: Value = self + .rpc_request("eth_getBlockByNumber", params, timeout) + .await + .map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?; + + let hash: Vec = hex_to_bytes( + response + .get("hash") + .ok_or("No hash for block")? + .as_str() + .ok_or("Block hash was not string")?, + )?; + let hash: Hash256 = if hash.len() == 32 { + Hash256::from_slice(&hash) + } else { + return Err(format!("Block hash was not 32 bytes: {:?}", hash)); + }; + + let timestamp = hex_to_u64_be( + response + .get("timestamp") + .ok_or("No timestamp for block")? + .as_str() + .ok_or("Block timestamp was not string")?, + )?; + + let number = hex_to_u64_be( + response + .get("number") + .ok_or("No number for block")? + .as_str() + .ok_or("Block number was not string")?, + )?; + + if number <= usize::max_value() as u64 { + Ok(Block { + hash, + timestamp, + number, + }) + } else { + Err(format!("Block number {} is larger than a usize", number)) + } + .map_err(|e| format!("Failed to get block number: {}", e)) + } + + /// Returns the value of the `get_deposit_count()` call at the given `address` for the given + /// `block_number`. + /// + /// Assumes that the `address` has the same ABI as the eth2 deposit contract. + pub async fn get_deposit_count( + &self, + address: &str, + block_number: u64, + timeout: Duration, + ) -> Result, String> { + let result = self + .call(address, DEPOSIT_COUNT_FN_SIGNATURE, block_number, timeout) + .await?; + match result { + None => Err("Deposit root response was none".to_string()), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { + let mut array = [0; 8]; + array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); + Ok(Some(u64::from_le_bytes(array))) + } else { + Err(format!( + "Deposit count response was not {} bytes: {:?}", + DEPOSIT_COUNT_RESPONSE_BYTES, bytes + )) + } + } + } + } + + /// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. + /// + /// Assumes that the `address` has the same ABI as the eth2 deposit contract. + pub async fn get_deposit_root( + &self, + address: &str, + block_number: u64, + timeout: Duration, + ) -> Result, String> { + let result = self + .call(address, DEPOSIT_ROOT_FN_SIGNATURE, block_number, timeout) + .await?; + match result { + None => Err("Deposit root response was none".to_string()), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_ROOT_BYTES { + Ok(Some(Hash256::from_slice(&bytes))) + } else { + Err(format!( + "Deposit root response was not {} bytes: {:?}", + DEPOSIT_ROOT_BYTES, bytes + )) + } + } + } + } + + /// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed + /// `hex_data`. + /// + /// Returns bytes, if any. + async fn call( + &self, + address: &str, + hex_data: &str, + block_number: u64, + timeout: Duration, + ) -> Result>, String> { + let params = json! ([ + { + "to": address, + "data": hex_data, + }, + format!("0x{:x}", block_number) + ]); + + let response: Option = self + .rpc_request("eth_call", params, timeout) + .await + .map_err(|e| format!("eth_call call failed {:?}", e))?; + + response.map(|s| hex_to_bytes(&s)).transpose() + } + + /// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given + /// `block_height_range`. + /// + /// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. + pub async fn get_deposit_logs_in_range( + &self, + address: &str, + block_height_range: Range, + timeout: Duration, + ) -> Result, String> { + let params = json! ([{ + "address": address, + "topics": [DEPOSIT_EVENT_TOPIC], + "fromBlock": format!("0x{:x}", block_height_range.start), + "toBlock": format!("0x{:x}", block_height_range.end), + }]); + + let response: Value = self + .rpc_request("eth_getLogs", params, timeout) + .await + .map_err(|e| format!("eth_getLogs call failed {:?}", e))?; + response + .as_array() + .cloned() + .ok_or("'result' value was not an array")? + .into_iter() + .map(|value| { + let block_number = value + .get("blockNumber") + .ok_or("No block number field in log")? + .as_str() + .ok_or("Block number was not string")?; + + let data = value + .get("data") + .ok_or("No block number field in log")? + .as_str() + .ok_or("Data was not string")?; + + Ok(Log { + block_number: hex_to_u64_be(block_number)?, + data: hex_to_bytes(data)?, + }) + }) + .collect::, String>>() + .map_err(|e| format!("Failed to get logs in range: {}", e)) + } + } +} + pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, @@ -117,6 +592,12 @@ impl HttpJsonRpc { } } +impl std::fmt::Display for HttpJsonRpc { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, auth={}", self.url, self.auth.is_some()) + } +} + impl HttpJsonRpc { pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self @@ -289,6 +770,7 @@ impl HttpJsonRpc { Ok(response.into()) } } + #[cfg(test)] mod test { use super::auth::JwtKey; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cff2190272..4b29887675 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -6,10 +6,10 @@ use crate::engine_api::Builder; use crate::engines::Builders; -use auth::{Auth, JwtKey}; +use auth::{strip_prefix, Auth, JwtKey}; use engine_api::Error as ApiError; pub use engine_api::*; -pub use engine_api::{http, http::HttpJsonRpc}; +pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; use engines::{Engine, EngineError, Engines, Logging}; use lru::LruCache; @@ -42,6 +42,9 @@ mod metrics; mod payload_status; pub mod test_utils; +/// Indicates the default jwt authenticated execution endpoint. +pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; + /// Name for the default file used for the jwt secret. pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; @@ -130,14 +133,6 @@ pub struct Config { pub default_datadir: PathBuf, } -fn strip_prefix(s: &str) -> &str { - if let Some(stripped) = s.strip_prefix("0x") { - stripped - } else { - s - } -} - /// Provides access to one or more execution engines and provides a neat interface for consumption /// by the `BeaconChain`. /// diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index aac13a324f..089f79aa11 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -112,7 +112,7 @@ impl Eth1GenesisService { "Importing eth1 deposit logs"; ); - let endpoints = eth1_service.init_endpoints(); + let endpoints = eth1_service.init_endpoints()?; loop { let update_result = eth1_service diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index ccf8fe10c9..1233d99fd3 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -3,6 +3,7 @@ mod eth1_genesis_service; mod interop; pub use eth1::Config as Eth1Config; +pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 8b77c89471..74a054fcc0 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -4,7 +4,7 @@ //! dir in the root of the `lighthouse` repo. #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; -use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; @@ -29,7 +29,7 @@ fn basic() { let mut spec = env.eth2_config().spec.clone(); env.runtime().block_on(async { - let eth1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()) + let eth1 = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()) .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; @@ -44,7 +44,10 @@ fn basic() { let service = Eth1GenesisService::new( Eth1Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 3102018e3e..a0cc124d47 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -409,45 +409,46 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("merge") .long("merge") - .help("Enable the features necessary to run merge testnets. This feature \ - is unstable and is for developers only.") - .takes_value(false), + .help("Deprecated. The feature activates automatically when --execution-endpoint \ + is supplied.") + .takes_value(false) ) .arg( - Arg::with_name("execution-endpoints") - .long("execution-endpoints") - .value_name("EXECUTION-ENDPOINTS") - .help("One or more comma-delimited server endpoints for HTTP JSON-RPC connection. \ - If multiple endpoints are given the endpoints are used as fallback in the \ - given order. Also enables the --merge flag. \ - If this flag is omitted and the --eth1-endpoints is supplied, those values \ - will be used. Defaults to http://127.0.0.1:8545.") + Arg::with_name("execution-endpoint") + .long("execution-endpoint") + .value_name("EXECUTION-ENDPOINT") + .alias("execution-endpoints") + .help("Server endpoint for an execution layer jwt authenticated HTTP \ + JSON-RPC connection. Uses the same endpoint to populate the \ + deposit cache. Also enables the --merge flag.\ + If not provided, uses the default value of http://127.0.0.1:8551") + .takes_value(true) + .requires("execution-jwt") + ) + .arg( + Arg::with_name("execution-jwt") + .long("execution-jwt") + .value_name("EXECUTION-JWT") + .alias("jwt-secrets") + .help("File path which contains the hex-encoded JWT secret for the \ + execution endpoint provided in the --execution-endpoint flag.") .takes_value(true) ) .arg( - Arg::with_name("jwt-secrets") - .long("jwt-secrets") - .value_name("JWT-SECRETS") - .help("One or more comma-delimited file paths which contain the corresponding hex-encoded \ - JWT secrets for each execution endpoint provided in the --execution-endpoints flag. \ - The number of paths should be in the same order and strictly equal to the number \ - of execution endpoints provided.") - .takes_value(true) - .requires("execution-endpoints") - ) - .arg( - Arg::with_name("jwt-id") - .long("jwt-id") - .value_name("JWT-ID") + Arg::with_name("execution-jwt-id") + .long("execution-jwt-id") + .value_name("EXECUTION-JWT-ID") + .alias("jwt-id") .help("Used by the beacon node to communicate a unique identifier to execution nodes \ during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ Set to empty by deafult") .takes_value(true) ) .arg( - Arg::with_name("jwt-version") - .long("jwt-version") - .value_name("JWT-VERSION") + Arg::with_name("execution-jwt-version") + .long("execution-jwt-version") + .value_name("EXECUTION-JWT-VERSION") + .alias("jwt-version") .help("Used by the beacon node to communicate a client version to execution nodes \ during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ Set to empty by deafult") @@ -461,14 +462,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { collected from any blocks produced by this node. Defaults to a junk \ address whilst the merge is in development stages. THE DEFAULT VALUE \ WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") - .requires("merge") + .requires("execution-endpoint") .takes_value(true) ) .arg( - Arg::with_name("payload-builders") - .long("payload-builders") + Arg::with_name("payload-builder") + .long("payload-builder") + .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") - .requires("merge") + .requires("execution-endpoint") .takes_value(true) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index db765100c3..0421df3429 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -3,12 +3,14 @@ use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; +use genesis::Eth1Endpoint; use http_api::TlsConfig; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use slog::{info, warn, Logger}; use std::cmp; use std::cmp::max; +use std::fmt::Debug; use std::fs; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; @@ -215,15 +217,18 @@ pub fn get_config( "msg" => "please use --eth1-endpoints instead" ); client_config.sync_eth1_chain = true; - client_config.eth1.endpoints = vec![SensitiveUrl::parse(endpoint) + + let endpoints = vec![SensitiveUrl::parse(endpoint) .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?]; + client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); } else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") { client_config.sync_eth1_chain = true; - client_config.eth1.endpoints = endpoints + let endpoints = endpoints .split(',') .map(SensitiveUrl::parse) .collect::>() .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; + client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); } if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { @@ -242,47 +247,79 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if cli_args.is_present("merge") || cli_args.is_present("execution-endpoints") { + if cli_args.is_present("merge") { + if cli_args.is_present("execution-endpoint") { + warn!( + log, + "The --merge flag is deprecated"; + "info" => "the --execution-endpoint flag automatically enables this feature" + ) + } else { + return Err("The --merge flag is deprecated. \ + Supply a value to --execution-endpoint instead." + .into()); + } + } + + if let Some(endpoints) = cli_args.value_of("execution-endpoint") { let mut el_config = execution_layer::Config::default(); - if let Some(endpoints) = cli_args.value_of("execution-endpoints") { - client_config.sync_eth1_chain = true; - el_config.execution_endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() - .map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?; - } else if cli_args.is_present("merge") { - el_config.execution_endpoints = client_config.eth1.endpoints.clone(); - } - - if let Some(endpoints) = cli_args.value_of("payload-builders") { - el_config.builder_endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() - .map_err(|e| format!("payload-builders contains an invalid URL {:?}", e))?; - } - - if let Some(secrets) = cli_args.value_of("jwt-secrets") { - let secret_files: Vec<_> = secrets.split(',').map(PathBuf::from).collect(); - if !secret_files.is_empty() && secret_files.len() != el_config.execution_endpoints.len() - { - return Err(format!( - "{} execution-endpoints supplied with {} jwt-secrets. Lengths \ - must match or jwt-secrets must be empty.", - el_config.execution_endpoints.len(), - secret_files.len(), - )); - } - el_config.secret_files = secret_files; + // Always follow the deposit contract when there is an execution endpoint. + // + // This is wasteful for non-staking nodes as they have no need to process deposit contract + // logs and build an "eth1" cache. The alternative is to explicitly require the `--eth1` or + // `--staking` flags, however that poses a risk to stakers since they cannot produce blocks + // without "eth1". + // + // The waste for non-staking nodes is relatively small so we err on the side of safety for + // stakers. The merge is already complicated enough. + client_config.sync_eth1_chain = true; + + // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. + let execution_endpoint = + parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?; + + // Parse a single JWT secret, logging warnings if multiple are supplied. + // + // JWTs are required if `--execution-endpoint` is supplied. + let secret_files: String = clap_utils::parse_required(cli_args, "execution-jwt")?; + let secret_file = + parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?; + + // Parse and set the payload builder, if any. + if let Some(endpoints) = cli_args.value_of("payload-builder") { + let payload_builder = + parse_only_one_value(endpoints, SensitiveUrl::parse, "--payload-builder", log)?; + el_config.builder_endpoints = vec![payload_builder]; } + // Set config values from parse values. + el_config.secret_files = vec![secret_file.clone()]; + el_config.execution_endpoints = vec![execution_endpoint.clone()]; el_config.suggested_fee_recipient = clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; - el_config.jwt_id = clap_utils::parse_optional(cli_args, "jwt-id")?; - el_config.jwt_version = clap_utils::parse_optional(cli_args, "jwt-version")?; + el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; + el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; el_config.default_datadir = client_config.data_dir.clone(); + + // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and + // use `--execution-endpoint` instead. Also, log a deprecation warning. + if cli_args.is_present("eth1-endpoints") || cli_args.is_present("eth1-endpoint") { + warn!( + log, + "Ignoring --eth1-endpoints flag"; + "info" => "the value for --execution-endpoint will be used instead. \ + --eth1-endpoints has been deprecated for post-merge configurations" + ); + } + client_config.eth1.endpoints = Eth1Endpoint::Auth { + endpoint: execution_endpoint, + jwt_path: secret_file, + jwt_id: el_config.jwt_id.clone(), + jwt_version: el_config.jwt_version.clone(), + }; + + // Store the EL config in the client config. client_config.execution_layer = Some(el_config); } @@ -344,7 +381,6 @@ pub fn get_config( client_config.eth1.follow_distance = spec.eth1_follow_distance; client_config.eth1.node_far_behind_seconds = max(5, spec.eth1_follow_distance / 2) * spec.seconds_per_eth1_block; - client_config.eth1.network_id = spec.deposit_network_id.into(); client_config.eth1.chain_id = spec.deposit_chain_id.into(); client_config.eth1.set_block_cache_truncation::(spec); @@ -844,3 +880,38 @@ pub fn get_slots_per_restore_point( Ok((default, false)) } } + +/// Parses the `cli_value` as a comma-separated string of values to be parsed with `parser`. +/// +/// If there is more than one value, log a warning. If there are no values, return an error. +pub fn parse_only_one_value( + cli_value: &str, + parser: F, + flag_name: &str, + log: &Logger, +) -> Result +where + F: Fn(&str) -> Result, + E: Debug, +{ + let values = cli_value + .split(',') + .map(parser) + .collect::, _>>() + .map_err(|e| format!("{} contains an invalid value {:?}", flag_name, e))?; + + if values.len() > 1 { + warn!( + log, + "Multiple values provided"; + "info" => "multiple values are deprecated, only the first value will be used", + "count" => values.len(), + "flag" => flag_name + ); + } + + values + .into_iter() + .next() + .ok_or(format!("Must provide at least one value to {}", flag_name)) +} diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 689107228e..1046241953 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -1,7 +1,7 @@ use clap::ArgMatches; use environment::Environment; use eth2_network_config::Eth2NetworkConfig; -use genesis::{Eth1Config, Eth1GenesisService}; +use genesis::{Eth1Config, Eth1Endpoint, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use ssz::Encode; use std::cmp::max; @@ -35,11 +35,12 @@ pub fn run( let mut config = Eth1Config::default(); if let Some(v) = endpoints.clone() { - config.endpoints = v + let endpoints = v .iter() .map(|s| SensitiveUrl::parse(s)) .collect::>() .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; + config.endpoints = Eth1Endpoint::NoAuth(endpoints); } config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index d9bd4334cf..f7742ef0b9 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -55,6 +55,7 @@ validator_dir = { path = "../common/validator_dir" } slashing_protection = { path = "../validator_client/slashing_protection" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } sensitive_url = { path = "../common/sensitive_url" } +eth1 = { path = "../beacon_node/eth1" } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index effccbbd66..443c442027 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,6 +1,7 @@ use beacon_node::ClientConfig as Config; use crate::exec::{CommandLineTestExec, CompletedTest}; +use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; use std::io::Write; @@ -66,7 +67,10 @@ fn staking_flag() { .with_config(|config| { assert!(config.http_api.enabled); assert!(config.sync_eth1_chain); - assert_eq!(config.eth1.endpoints[0].to_string(), DEFAULT_ETH1_ENDPOINT); + assert_eq!( + config.eth1.endpoints.get_endpoints()[0].to_string(), + DEFAULT_ETH1_ENDPOINT + ); }); } @@ -196,18 +200,21 @@ fn eth1_endpoints_flag() { .run_with_zero_port() .with_config(|config| { assert_eq!( - config.eth1.endpoints[0].full.to_string(), + config.eth1.endpoints.get_endpoints()[0].full.to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints[0].to_string(), + config.eth1.endpoints.get_endpoints()[0].to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints[1].full.to_string(), + config.eth1.endpoints.get_endpoints()[1].full.to_string(), "https://infura.io/secret" ); - assert_eq!(config.eth1.endpoints[1].to_string(), "https://infura.io/"); + assert_eq!( + config.eth1.endpoints.get_endpoints()[1].to_string(), + "https://infura.io/" + ); assert!(config.sync_eth1_chain); }); } @@ -246,45 +253,107 @@ fn eth1_cache_follow_distance_manual() { } // Tests for Bellatrix flags. -#[test] -fn merge_flag() { - CommandLineTest::new() - .flag("merge", None) - .run_with_zero_port() - .with_config(|config| assert!(config.execution_layer.is_some())); -} -#[test] -fn merge_execution_endpoints_flag() { +fn run_merge_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; - let endpoints = urls - .iter() - .map(|s| SensitiveUrl::parse(s).unwrap()) - .collect::>(); + // we don't support redundancy for execution-endpoints + // only the first provided endpoint is parsed. + let mut endpoint_arg = urls[0].to_string(); - for url in urls.into_iter().skip(1) { + for url in urls.iter().skip(1) { endpoint_arg.push(','); endpoint_arg.push_str(url); } + + let (_dirs, jwts): (Vec<_>, Vec<_>) = (0..2) + .map(|i| { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let path = dir.path().join(format!("jwt-{}", i)); + (dir, path) + }) + .unzip(); + + let mut jwts_arg = jwts[0].as_os_str().to_str().unwrap().to_string(); + for jwt in jwts.iter().skip(1) { + jwts_arg.push(','); + jwts_arg.push_str(jwt.as_os_str().to_str().unwrap()); + } + // this is way better but intersperse is still a nightly feature :/ // let endpoint_arg: String = urls.into_iter().intersperse(",").collect(); CommandLineTest::new() - .flag("merge", None) - .flag("execution-endpoints", Some(&endpoint_arg)) + .flag(flag, Some(&endpoint_arg)) + .flag("execution-jwt", Some(&jwts_arg)) .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoints, endpoints) + assert_eq!(config.execution_endpoints.len(), 1); + assert_eq!( + config.execution_endpoints[0], + SensitiveUrl::parse(&urls[0]).unwrap() + ); + // Only the first secret file should be used. + assert_eq!(config.secret_files, vec![jwts[0].clone()]); }); } #[test] +fn merge_execution_endpoints_flag() { + run_merge_execution_endpoints_flag_test("execution-endpoints") +} +#[test] +fn merge_execution_endpoint_flag() { + run_merge_execution_endpoints_flag_test("execution-endpoint") +} +fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execution_flag: &str) { + use sensitive_url::SensitiveUrl; + + let eth1_endpoint = "http://bad.bad"; + let execution_endpoint = "http://good.good"; + + assert!(eth1_endpoint != execution_endpoint); + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let jwt_path = dir.path().join("jwt-file"); + + CommandLineTest::new() + .flag(eth1_flag, Some(ð1_endpoint)) + .flag(execution_flag, Some(&execution_endpoint)) + .flag("execution-jwt", jwt_path.as_os_str().to_str()) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.execution_layer.as_ref().unwrap().execution_endpoints, + vec![SensitiveUrl::parse(execution_endpoint).unwrap()] + ); + + // The eth1 endpoint should have been set to the --execution-endpoint value in defiance + // of --eth1-endpoints. + assert_eq!( + config.eth1.endpoints, + Eth1Endpoint::Auth { + endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), + jwt_path: jwt_path.clone(), + jwt_id: None, + jwt_version: None, + } + ); + }); +} +#[test] +fn execution_endpoints_overrides_eth1_endpoints() { + run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoints", "execution-endpoints"); +} +#[test] +fn execution_endpoint_overrides_eth1_endpoint() { + run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoint", "execution-endpoint"); +} +#[test] fn merge_jwt_secrets_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file"); file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33") .expect("Unable to write to file"); CommandLineTest::new() - .flag("merge", None) .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( "jwt-secrets", @@ -302,8 +371,13 @@ fn merge_jwt_secrets_flag() { } #[test] fn merge_fee_recipient_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() - .flag("merge", None) + .flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) .flag( "suggested-fee-recipient", Some("0x00000000219ab540356cbb839cbe05303d7705fa"), @@ -317,19 +391,74 @@ fn merge_fee_recipient_flag() { ); }); } -#[test] -fn jwt_optional_flags() { +fn run_payload_builder_flag_test(flag: &str, builders: &str) { + use sensitive_url::SensitiveUrl; + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let all_builders: Vec<_> = builders + .split(",") + .map(|builder| SensitiveUrl::parse(builder).expect("valid builder url")) + .collect(); CommandLineTest::new() - .flag("merge", None) - .flag("jwt-id", Some("bn-1")) - .flag("jwt-version", Some("Lighthouse-v2.1.3")) + .flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .flag(flag, Some(builders)) .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.jwt_id, Some("bn-1".to_string())); - assert_eq!(config.jwt_version, Some("Lighthouse-v2.1.3".to_string())); + // Only first provided endpoint is parsed as we don't support + // redundancy. + assert_eq!(&config.builder_endpoints, &all_builders[..1]); }); } + +#[test] +fn payload_builder_flags() { + run_payload_builder_flag_test("payload-builder", "http://meow.cats"); + run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); + run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); +} + +fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { + use sensitive_url::SensitiveUrl; + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let execution_endpoint = "http://meow.cats"; + let jwt_file = "jwt-file"; + let id = "bn-1"; + let version = "Lighthouse-v2.1.3"; + CommandLineTest::new() + .flag("execution-endpoint", Some(execution_endpoint.clone())) + .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) + .flag(jwt_id_flag, Some(id)) + .flag(jwt_version_flag, Some(version)) + .run_with_zero_port() + .with_config(|config| { + let el_config = config.execution_layer.as_ref().unwrap(); + assert_eq!(el_config.jwt_id, Some(id.to_string())); + assert_eq!(el_config.jwt_version, Some(version.to_string())); + assert_eq!( + config.eth1.endpoints, + Eth1Endpoint::Auth { + endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), + jwt_path: dir.path().join(jwt_file), + jwt_id: Some(id.to_string()), + jwt_version: Some(version.to_string()), + } + ); + }); +} +#[test] +fn jwt_optional_flags() { + run_jwt_optional_flags_test("execution-jwt", "execution-jwt-id", "execution-jwt-version"); +} +#[test] +fn jwt_optional_alias_flags() { + run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); +} #[test] fn terminal_total_difficulty_override_flag() { use beacon_node::beacon_chain::types::Uint256; diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh index 7d97f2196a..a489c33224 100755 --- a/scripts/local_testnet/ganache_test_node.sh +++ b/scripts/local_testnet/ganache_test_node.sh @@ -11,5 +11,4 @@ exec ganache \ --mnemonic "$ETH1_NETWORK_MNEMONIC" \ --port 8545 \ --blockTime $SECONDS_PER_ETH1_BLOCK \ - --networkId "$NETWORK_ID" \ - --chain.chainId "$NETWORK_ID" + --chain.chainId "$CHAIN_ID" diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index 6f0b070915..a1348363a9 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -32,7 +32,7 @@ lcli \ --genesis-delay $GENESIS_DELAY \ --genesis-fork-version $GENESIS_FORK_VERSION \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \ - --eth1-id $NETWORK_ID \ + --eth1-id $CHAIN_ID \ --eth1-follow-distance 1 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 208fbb6d85..efb1046452 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -30,7 +30,7 @@ GENESIS_DELAY=0 BOOTNODE_PORT=4242 # Network ID and Chain ID of local eth1 test network -NETWORK_ID=4242 +CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 6cc0dd3b8a..d51fe2aef2 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -30,7 +30,7 @@ GENESIS_DELAY=0 BOOTNODE_PORT=4242 # Network ID and Chain ID of local eth1 test network -NETWORK_ID=4242 +CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index c82277dc75..9b6a33ff59 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -16,17 +16,11 @@ pub struct GanacheInstance { pub port: u16, child: Child, pub web3: Web3, - network_id: u64, chain_id: u64, } impl GanacheInstance { - fn new_from_child( - mut child: Child, - port: u16, - network_id: u64, - chain_id: u64, - ) -> Result { + fn new_from_child(mut child: Child, port: u16, chain_id: u64) -> Result { let stdout = child .stdout .ok_or("Unable to get stdout for ganache child process")?; @@ -64,14 +58,13 @@ impl GanacheInstance { port, child, web3, - network_id, chain_id, }) } /// Start a new `ganache` process, waiting until it indicates that it is ready to accept /// RPC connections. - pub fn new(network_id: u64, chain_id: u64) -> Result { + pub fn new(chain_id: u64) -> Result { let port = unused_tcp_port()?; let binary = match cfg!(windows) { true => "ganache.cmd", @@ -89,8 +82,6 @@ impl GanacheInstance { .arg(format!("{}", port)) .arg("--mnemonic") .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") - .arg("--networkId") - .arg(format!("{}", network_id)) .arg("--chain.chainId") .arg(format!("{}", chain_id)) .spawn() @@ -102,7 +93,7 @@ impl GanacheInstance { ) })?; - Self::new_from_child(child, port, network_id, chain_id) + Self::new_from_child(child, port, chain_id) } pub fn fork(&self) -> Result { @@ -128,7 +119,7 @@ impl GanacheInstance { ) })?; - Self::new_from_child(child, port, self.network_id, self.chain_id) + Self::new_from_child(child, port, self.chain_id) } /// Returns the endpoint that this instance is listening on. @@ -136,11 +127,6 @@ impl GanacheInstance { endpoint(self.port) } - /// Returns the network id of the ganache instance - pub fn network_id(&self) -> u64 { - self.network_id - } - /// Returns the chain id of the ganache instance pub fn chain_id(&self) -> u64 { self.chain_id diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 52ae3922bc..42081a60e7 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -30,8 +30,8 @@ pub struct GanacheEth1Instance { } impl GanacheEth1Instance { - pub async fn new(network_id: u64, chain_id: u64) -> Result { - let ganache = GanacheInstance::new(network_id, chain_id)?; + pub async fn new(chain_id: u64) -> Result { + let ganache = GanacheInstance::new(chain_id)?; DepositContract::deploy(ganache.web3.clone(), 0, None) .await .map(|deposit_contract| Self { diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 6770508435..a01c133fd9 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" [dependencies] node_test_rig = { path = "../node_test_rig" } eth1 = {path = "../../beacon_node/eth1"} +execution_layer = {path = "../../beacon_node/execution_layer"} types = { path = "../../consensus/types" } parking_lot = "0.12.0" futures = "0.3.7" diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 80fc755d52..4c773c70bf 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,9 +1,10 @@ use crate::local_network::INVALID_ADDRESS; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; -use eth1::http::Eth1Id; -use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::GanacheEth1Instance; + +use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, @@ -92,10 +93,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit * validators. */ - let ganache_eth1_instance = - GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await?; + let ganache_eth1_instance = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; let deposit_contract = ganache_eth1_instance.deposit_contract; - let network_id = ganache_eth1_instance.ganache.network_id(); let chain_id = ganache_eth1_instance.ganache.chain_id(); let ganache = ganache_eth1_instance.ganache; let eth1_endpoint = SensitiveUrl::parse(ganache.endpoint().as_str()) @@ -124,7 +123,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut beacon_config = testing_client_config(); beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoints = vec![eth1_endpoint]; + beacon_config.eth1.endpoints = Eth1Endpoint::NoAuth(vec![eth1_endpoint]); beacon_config.eth1.deposit_contract_address = deposit_contract_address; beacon_config.eth1.deposit_contract_deploy_block = 0; beacon_config.eth1.lowest_cached_block_number = 0; @@ -133,7 +132,6 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.dummy_eth1_backend = false; beacon_config.sync_eth1_chain = true; beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; - beacon_config.eth1.network_id = Eth1Id::from(network_id); beacon_config.eth1.chain_id = Eth1Id::from(chain_id); beacon_config.network.target_peers = node_count - 1; @@ -150,10 +148,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { for i in 0..node_count - 1 { let mut config = beacon_config.clone(); if i % 2 == 0 { - config.eth1.endpoints.insert( - 0, - SensitiveUrl::parse(INVALID_ADDRESS).expect("Unable to parse invalid address"), - ); + if let Eth1Endpoint::NoAuth(endpoints) = &mut config.eth1.endpoints { + endpoints.insert( + 0, + SensitiveUrl::parse(INVALID_ADDRESS) + .expect("Unable to parse invalid address"), + ) + } } network.add_beacon_node(config).await?; } From f6ec44f0dd38ff86309ace4a4e246c0ea42f4e86 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Thu, 30 Jun 2022 00:49:21 +0000 Subject: [PATCH 046/184] Register validator api (#3194) ## Issue Addressed Lays the groundwork for builder API changes by implementing the beacon-API's new `register_validator` endpoint ## Proposed Changes - Add a routine in the VC that runs on startup (re-try until success), once per epoch or whenever `suggested_fee_recipient` is updated, signing `ValidatorRegistrationData` and sending it to the BN. - TODO: `gas_limit` config options https://github.com/ethereum/builder-specs/issues/17 - BN only sends VC registration data to builders on demand, but VC registration data *does update* the BN's prepare proposer cache and send an updated fcU to a local EE. This is necessary for fee recipient consistency between the blinded and full block flow in the event of fallback. Having the BN only send registration data to builders on demand gives feedback directly to the VC about relay status. Also, since the BN has no ability to sign these messages anyways (so couldn't refresh them if it wanted), and validator registration is independent of the BN head, I think this approach makes sense. - Adds upcoming consensus spec changes for this PR https://github.com/ethereum/consensus-specs/pull/2884 - I initially applied the bit mask based on a configured application domain.. but I ended up just hard coding it here instead because that's how it's spec'd in the builder repo. - Should application mask appear in the api? Co-authored-by: realbigsean --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 86 ++++++- beacon_node/http_api/tests/tests.rs | 76 ++++++ book/src/api-vc-endpoints.md | 1 + common/eth2/src/lib.rs | 17 ++ consensus/types/src/application_domain.rs | 16 ++ consensus/types/src/chain_spec.rs | 50 ++++ consensus/types/src/config_and_preset.rs | 4 + consensus/types/src/lib.rs | 3 + .../types/src/validator_registration_data.rs | 23 ++ testing/web3signer_tests/src/lib.rs | 33 +++ validator_client/src/http_metrics/metrics.rs | 5 + validator_client/src/lib.rs | 5 +- validator_client/src/preparation_service.rs | 232 +++++++++++++++++- validator_client/src/signing_method.rs | 39 ++- .../src/signing_method/web3signer.rs | 3 + validator_client/src/validator_store.rs | 35 ++- 18 files changed, 603 insertions(+), 27 deletions(-) create mode 100644 consensus/types/src/application_domain.rs create mode 100644 consensus/types/src/validator_registration_data.rs diff --git a/Cargo.lock b/Cargo.lock index 3dbe005658..3bdce9138e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2530,6 +2530,7 @@ dependencies = [ "safe_arith", "sensitive_url", "serde", + "serde_json", "slog", "slot_clock", "state_processing", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 9dd2af7d17..07fb992393 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -39,6 +39,7 @@ environment = { path = "../../lighthouse/environment" } tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } logging = { path = "../../common/logging" } +serde_json = "1.0.58" [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 379033a113..06dc968764 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -49,8 +49,8 @@ use types::{ BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedVoluntaryExit, Slot, SyncCommitteeMessage, - SyncContributionData, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use version::{ add_consensus_version_header, fork_versioned_response, inconsistent_fork_rejection, @@ -2408,12 +2408,10 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(chain_filter.clone()) - .and(warp::addr::remote()) .and(log_filter.clone()) .and(warp::body::json()) .and_then( |chain: Arc>, - client_addr: Option, log: Logger, preparation_data: Vec| { blocking_json_task(move || { @@ -2430,9 +2428,6 @@ pub fn serve( log, "Received proposer preparation data"; "count" => preparation_data.len(), - "client" => client_addr - .map(|a| a.to_string()) - .unwrap_or_else(|| "unknown".to_string()), ); execution_layer @@ -2455,6 +2450,82 @@ pub fn serve( }, ); + // POST validator/register_validator + let post_validator_register_validator = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("register_validator")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and(warp::body::json()) + .and_then( + |chain: Arc>, + log: Logger, + register_val_data: Vec| { + blocking_json_task(move || { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::beacon_chain_error)? + .epoch(T::EthSpec::slots_per_epoch()); + + debug!( + log, + "Received register validator request"; + "count" => register_val_data.len(), + ); + + let preparation_data = register_val_data + .iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .map(|validator_index| ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data.message.fee_recipient, + }) + }) + .collect::>(); + + debug!( + log, + "Resolved validator request pubkeys"; + "count" => preparation_data.len() + ); + + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation_blocking(current_epoch, &preparation_data) + .map_err(|_e| { + warp_utils::reject::custom_bad_request( + "error processing proposer preparations".to_string(), + ) + })?; + + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blined block + // flow failing. + chain.prepare_beacon_proposer_blocking().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder + + Ok(()) + }) + }, + ); // POST validator/sync_committee_subscriptions let post_validator_sync_committee_subscriptions = eth1_v1 .and(warp::path("validator")) @@ -3008,6 +3079,7 @@ pub fn serve( .or(post_validator_beacon_committee_subscriptions.boxed()) .or(post_validator_sync_committee_subscriptions.boxed()) .or(post_validator_prepare_beacon_proposer.boxed()) + .or(post_validator_register_validator.boxed()) .or(post_lighthouse_liveness.boxed()) .or(post_lighthouse_database_reconstruct.boxed()) .or(post_lighthouse_database_historical_blocks.boxed()) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5f53a96156..2b0cfd7c41 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,6 +11,7 @@ use eth2::{ types::*, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; +use execution_layer::test_utils::MockExecutionLayer; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use lighthouse_network::{Enr, EnrExt, PeerId}; @@ -24,6 +25,7 @@ use task_executor::test_utils::TestRuntime; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; +use types::application_domain::ApplicationDomain; use types::{ AggregateSignature, BeaconState, BitList, Domain, EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, @@ -64,6 +66,9 @@ struct ApiTester { network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, + // This is never directly accessed, but adding it creates a payload cache, which we use in tests here. + #[allow(dead_code)] + mock_el: Option>, _runtime: TestRuntime, } @@ -80,6 +85,7 @@ impl ApiTester { .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .mock_execution_layer() .build(); harness.advance_slot(); @@ -214,6 +220,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_el: harness.mock_execution_layer, _runtime: harness.runtime, } } @@ -293,6 +300,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_el: None, _runtime: harness.runtime, } } @@ -2226,6 +2234,66 @@ impl ApiTester { self } + pub async fn test_post_validator_register_validator(self) -> Self { + let mut registrations = vec![]; + let mut fee_recipients = vec![]; + + let fork = self.chain.head().unwrap().beacon_state.fork(); + + for (val_index, keypair) in self.validator_keypairs.iter().enumerate() { + let pubkey = keypair.pk.compress(); + let fee_recipient = Address::from_low_u64_be(val_index as u64); + + let data = ValidatorRegistrationData { + fee_recipient, + gas_limit: 0, + timestamp: 0, + pubkey, + }; + let domain = self.chain.spec.get_domain( + Epoch::new(0), + Domain::ApplicationMask(ApplicationDomain::Builder), + &fork, + Hash256::zero(), + ); + let message = data.signing_root(domain); + let signature = keypair.sk.sign(message); + + fee_recipients.push(fee_recipient); + registrations.push(SignedValidatorRegistrationData { + message: data, + signature, + }); + } + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head() + .unwrap() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + assert_eq!(actual, fee_recipient); + } + + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -2973,6 +3041,14 @@ async fn get_validator_beacon_committee_subscriptions() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_validator() { + ApiTester::new() + .await + .test_post_validator_register_validator() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index ae091130f3..69cd83db5c 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -134,6 +134,7 @@ Typical Responses | 200 "DOMAIN_VOLUNTARY_EXIT": "0x04000000", "DOMAIN_SELECTION_PROOF": "0x05000000", "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", + "DOMAIN_APPLICATION_MASK": "0x00000001", "MAX_VALIDATORS_PER_COMMITTEE": "2048", "SLOTS_PER_EPOCH": "32", "EPOCHS_PER_ETH1_VOTING_PERIOD": "32", diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 3e965a2bf8..529bad1d85 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -929,6 +929,23 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST validator/register_validator` + pub async fn post_validator_register_validator( + &self, + registration_data: &[SignedValidatorRegistrationData], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("register_validator"); + + self.post(path, ®istration_data).await?; + + Ok(()) + } + /// `GET config/fork_schedule` pub async fn get_config_fork_schedule(&self) -> Result>, Error> { let mut path = self.eth_path(V1)?; diff --git a/consensus/types/src/application_domain.rs b/consensus/types/src/application_domain.rs new file mode 100644 index 0000000000..5e33f2dfd5 --- /dev/null +++ b/consensus/types/src/application_domain.rs @@ -0,0 +1,16 @@ +/// This value is an application index of 0 with the bitmask applied (so it's equivalent to the bit mask). +/// Little endian hex: 0x00000001, Binary: 1000000000000000000000000 +pub const APPLICATION_DOMAIN_BUILDER: u32 = 16777216; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum ApplicationDomain { + Builder, +} + +impl ApplicationDomain { + pub fn get_domain_constant(&self) -> u32 { + match self { + ApplicationDomain::Builder => APPLICATION_DOMAIN_BUILDER, + } + } +} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index c283d4cb48..8a69505a51 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,3 +1,4 @@ +use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; use eth2_serde_utils::quoted_u64::MaybeQuoted; use int_to_bytes::int_to_bytes4; @@ -20,6 +21,7 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, + ApplicationMask(ApplicationDomain), } /// Lighthouse's internal configuration struct. @@ -159,6 +161,11 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub random_subnets_per_validator: u64, pub epochs_per_random_subnet_subscription: u64, + + /* + * Application params + */ + pub(crate) domain_application_mask: u32, } impl ChainSpec { @@ -326,6 +333,7 @@ impl ChainSpec { Domain::SyncCommittee => self.domain_sync_committee, Domain::ContributionAndProof => self.domain_contribution_and_proof, Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, + Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), } } @@ -353,6 +361,17 @@ impl ChainSpec { self.compute_domain(Domain::Deposit, self.genesis_fork_version, Hash256::zero()) } + // This should be updated to include the current fork and the genesis validators root, but discussion is ongoing: + // + // https://github.com/ethereum/builder-specs/issues/14 + pub fn get_builder_domain(&self) -> Hash256 { + self.compute_domain( + Domain::ApplicationMask(ApplicationDomain::Builder), + self.genesis_fork_version, + Hash256::zero(), + ) + } + /// Return the 32-byte fork data root for the `current_version` and `genesis_validators_root`. /// /// This is used primarily in signature domains to avoid collisions across forks/chains. @@ -565,6 +584,11 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + + /* + * Application specific + */ + domain_application_mask: APPLICATION_DOMAIN_BUILDER, } } @@ -763,6 +787,11 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + + /* + * Application specific + */ + domain_application_mask: APPLICATION_DOMAIN_BUILDER, } } } @@ -1119,6 +1148,27 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); + + // The builder domain index is zero + let builder_domain_pre_mask = [0; 4]; + test_domain( + Domain::ApplicationMask(ApplicationDomain::Builder), + apply_bit_mask(builder_domain_pre_mask, &spec), + &spec, + ); + } + + fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { + let mut domain = [0; 4]; + let mask_bytes = int_to_bytes4(spec.domain_application_mask); + + // Apply application bit mask + for (i, (domain_byte, mask_byte)) in domain_bytes.iter().zip(mask_bytes.iter()).enumerate() + { + domain[i] = domain_byte | mask_byte; + } + + u32::from_le_bytes(domain) } // Test that `fork_name_at_epoch` and `fork_epoch` are consistent. diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index f721e6c3bb..8b3a753bd5 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -69,6 +69,10 @@ impl ConfigAndPreset { "domain_aggregate_and_proof", u32_hex(spec.domain_aggregate_and_proof), ), + ( + "domain_application_mask", + u32_hex(spec.domain_application_mask), + ), ( "target_aggregators_per_committee", spec.target_aggregators_per_committee.to_string(), diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 22e429a58c..ecfd77d7a4 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -18,6 +18,7 @@ extern crate lazy_static; pub mod test_utils; pub mod aggregate_and_proof; +pub mod application_domain; pub mod attestation; pub mod attestation_data; pub mod attestation_duty; @@ -82,6 +83,7 @@ pub mod sync_committee_message; pub mod sync_selection_proof; pub mod sync_subnet_id; mod tree_hash_impls; +pub mod validator_registration_data; pub mod slot_data; #[cfg(feature = "sqlite")] @@ -157,6 +159,7 @@ pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; pub use crate::validator::Validator; +pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs new file mode 100644 index 0000000000..5a3450df08 --- /dev/null +++ b/consensus/types/src/validator_registration_data.rs @@ -0,0 +1,23 @@ +use crate::*; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use tree_hash_derive::TreeHash; + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SignedValidatorRegistrationData { + pub message: ValidatorRegistrationData, + pub signature: Signature, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] +pub struct ValidatorRegistrationData { + pub fee_recipient: Address, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub timestamp: u64, + pub pubkey: PublicKeyBytes, +} + +impl SignedRoot for ValidatorRegistrationData {} diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 800f988654..e39e6515fc 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -67,6 +67,7 @@ mod tests { impl SignedObject for SyncSelectionProof {} impl SignedObject for SyncCommitteeMessage {} impl SignedObject for SignedContributionAndProof {} + impl SignedObject for SignedValidatorRegistrationData {} /// A file format used by Web3Signer to discover and unlock keystores. #[derive(Serialize)] @@ -448,6 +449,18 @@ mod tests { } } + //TODO: remove this once the consensys web3signer includes the `validator_registration` method + #[allow(dead_code)] + fn get_validator_registration(pubkey: PublicKeyBytes) -> ValidatorRegistrationData { + let fee_recipient = Address::repeat_byte(42); + ValidatorRegistrationData { + fee_recipient, + gas_limit: 30_000_000, + timestamp: 100, + pubkey, + } + } + /// Test all the "base" (phase 0) types. async fn test_base_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); @@ -499,6 +512,16 @@ mod tests { .await .unwrap() }) + //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method + // + // .await + // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { + // let val_reg_data = get_validator_registration(pubkey); + // validator_store + // .sign_validator_registration_data(val_reg_data) + // .await + // .unwrap() + // }) .await; } @@ -575,6 +598,16 @@ mod tests { .unwrap() }, ) + //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method + // + // .await + // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { + // let val_reg_data = get_validator_registration(pubkey); + // validator_store + // .sign_validator_registration_data(val_reg_data) + // .await + // .unwrap() + // }) .await; } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index f405f1a2b3..836aab4c1f 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -85,6 +85,11 @@ lazy_static::lazy_static! { "Total count of attempted SyncSelectionProof signings", &["status"] ); + pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result = try_create_int_counter_vec( + "builder_validator_registrations_total", + "Total count of ValidatorRegistrationData signings", + &["status"] + ); pub static ref DUTIES_SERVICE_TIMES: Result = try_create_histogram_vec( "vc_duties_service_task_times_seconds", "Duration to perform duties service tasks", diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index ce35a00351..5e45847598 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -485,7 +485,10 @@ impl ProductionValidatorClient { self.preparation_service .clone() - .start_update_service(&self.context.eth2_config.spec) + .start_update_service( + self.config.private_tx_proposals, + &self.context.eth2_config.spec, + ) .map_err(|e| format!("Unable to start preparation service: {}", e))?; if let Some(doppelganger_service) = self.doppelganger_service.clone() { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index b4b6caa05d..34201180c0 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -3,17 +3,28 @@ use crate::{ fee_recipient_file::FeeRecipientFile, validator_store::{DoppelgangerStatus, ValidatorStore}, }; +use bls::PublicKeyBytes; use environment::RuntimeContext; +use parking_lot::RwLock; use slog::{debug, error, info}; use slot_clock::SlotClock; +use std::collections::HashMap; +use std::hash::Hash; use std::ops::Deref; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{sleep, Duration}; -use types::{Address, ChainSpec, EthSpec, ProposerPreparationData}; +use types::{ + Address, ChainSpec, EthSpec, ProposerPreparationData, SignedValidatorRegistrationData, + ValidatorRegistrationData, +}; /// Number of epochs before the Bellatrix hard fork to begin posting proposer preparations. const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; +/// Number of epochs to wait before re-submitting validator registration. +const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; + /// Builds an `PreparationService`. pub struct PreparationServiceBuilder { validator_store: Option>>, @@ -83,6 +94,7 @@ impl PreparationServiceBuilder { .ok_or("Cannot build PreparationService without runtime_context")?, fee_recipient: self.fee_recipient, fee_recipient_file: self.fee_recipient_file, + validator_registration_cache: RwLock::new(HashMap::new()), }), }) } @@ -96,6 +108,32 @@ pub struct Inner { context: RuntimeContext, fee_recipient: Option
, fee_recipient_file: Option, + // Used to track unpublished validator registration changes. + validator_registration_cache: + RwLock>, +} + +#[derive(Hash, Eq, PartialEq, Debug, Clone)] +pub struct ValidatorRegistrationKey { + pub fee_recipient: Address, + pub gas_limit: u64, + pub pubkey: PublicKeyBytes, +} + +impl From for ValidatorRegistrationKey { + fn from(data: ValidatorRegistrationData) -> Self { + let ValidatorRegistrationData { + fee_recipient, + gas_limit, + timestamp: _, + pubkey, + } = data; + Self { + fee_recipient, + gas_limit, + pubkey, + } + } } /// Attempts to produce proposer preparations for all known validators at the beginning of each epoch. @@ -120,8 +158,19 @@ impl Deref for PreparationService { } impl PreparationService { + pub fn start_update_service( + self, + start_registration_service: bool, + spec: &ChainSpec, + ) -> Result<(), String> { + if start_registration_service { + self.clone().start_validator_registration_service(spec)?; + } + self.start_proposer_prepare_service(spec) + } + /// Starts the service which periodically produces proposer preparations. - pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + pub fn start_proposer_prepare_service(self, spec: &ChainSpec) -> Result<(), String> { let log = self.context.log().clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); @@ -163,6 +212,41 @@ impl PreparationService { Ok(()) } + /// Starts the service which periodically sends connected beacon nodes validator registration information. + pub fn start_validator_registration_service(self, spec: &ChainSpec) -> Result<(), String> { + let log = self.context.log().clone(); + + info!( + log, + "Validator registration service started"; + ); + + let spec = spec.clone(); + let slot_duration = Duration::from_secs(spec.seconds_per_slot); + + let executor = self.context.executor.clone(); + + let validator_registration_fut = async move { + loop { + // Poll the endpoint immediately to ensure fee recipients are received. + if let Err(e) = self.register_validators(&spec).await { + error!(log,"Error during validator registration";"error" => ?e); + } + + // Wait one slot if the register validator request fails or if we should not publish at the current slot. + if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { + sleep(duration_to_next_slot).await; + } else { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + } + } + }; + executor.spawn(validator_registration_fut, "validator_registration_service"); + Ok(()) + } + /// Return `true` if the current slot is close to or past the Bellatrix fork epoch. /// /// This avoids spamming the BN with preparations before the Bellatrix fork epoch, which may @@ -188,6 +272,33 @@ impl PreparationService { } fn collect_preparation_data(&self, spec: &ChainSpec) -> Vec { + self.collect_data(spec, |_, validator_index, fee_recipient| { + ProposerPreparationData { + validator_index, + fee_recipient, + } + }) + } + + fn collect_validator_registration_keys( + &self, + spec: &ChainSpec, + ) -> Vec { + self.collect_data(spec, |pubkey, _, fee_recipient| { + ValidatorRegistrationKey { + fee_recipient, + //TODO(sean) this is geth's default, we should make this configurable and maybe have the default be dynamic. + // Discussion here: https://github.com/ethereum/builder-specs/issues/17 + gas_limit: 30_000_000, + pubkey, + } + }) + } + + fn collect_data(&self, spec: &ChainSpec, map_fn: G) -> Vec + where + G: Fn(PublicKeyBytes, u64, Address) -> U, + { let log = self.context.log(); let fee_recipient_file = self @@ -234,10 +345,7 @@ impl PreparationService { .or(self.fee_recipient); if let Some(fee_recipient) = fee_recipient { - Some(ProposerPreparationData { - validator_index, - fee_recipient, - }) + Some(map_fn(pubkey, validator_index, fee_recipient)) } else { if spec.bellatrix_fork_epoch.is_some() { error!( @@ -284,4 +392,116 @@ impl PreparationService { } Ok(()) } + + /// Register validators with builders, used in the blinded block proposal flow. + async fn register_validators(&self, spec: &ChainSpec) -> Result<(), String> { + let registration_keys = self.collect_validator_registration_keys(spec); + + let mut changed_keys = vec![]; + + // Need to scope this so the read lock is not held across an await point (I don't know why + // but the explicit `drop` is not enough). + { + let guard = self.validator_registration_cache.read(); + for key in registration_keys.iter() { + if !guard.contains_key(key) { + changed_keys.push(key.clone()); + } + } + drop(guard); + } + + // Check if any have changed or it's been `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION`. + if let Some(slot) = self.slot_clock.now() { + if slot % (E::slots_per_epoch() * EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION) == 0 { + self.publish_validator_registration_data(registration_keys) + .await?; + } else if !changed_keys.is_empty() { + self.publish_validator_registration_data(changed_keys) + .await?; + } + } + + Ok(()) + } + + async fn publish_validator_registration_data( + &self, + registration_keys: Vec, + ) -> Result<(), String> { + let log = self.context.log(); + + let registration_data_len = registration_keys.len(); + let mut signed = Vec::with_capacity(registration_data_len); + + for key in registration_keys { + let cached_registration_opt = + self.validator_registration_cache.read().get(&key).cloned(); + + let signed_data = if let Some(signed_data) = cached_registration_opt { + signed_data + } else { + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("{e:?}"))? + .as_secs(); + + let ValidatorRegistrationKey { + fee_recipient, + gas_limit, + pubkey, + } = key.clone(); + + let signed_data = match self + .validator_store + .sign_validator_registration_data(ValidatorRegistrationData { + fee_recipient, + gas_limit, + timestamp, + pubkey, + }) + .await + { + Ok(data) => data, + Err(e) => { + error!(log, "Unable to sign validator registration data"; "error" => ?e, "pubkey" => ?pubkey); + continue; + } + }; + + self.validator_registration_cache + .write() + .insert(key, signed_data.clone()); + + signed_data + }; + signed.push(signed_data); + } + + if !signed.is_empty() { + let signed_ref = signed.as_slice(); + + match self + .beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node + .post_validator_register_validator(signed_ref) + .await + }) + .await + { + Ok(()) => debug!( + log, + "Published validator registration"; + "count" => registration_data_len, + ), + Err(e) => error!( + log, + "Unable to publish validator registration"; + "error" => %e, + ), + } + } + Ok(()) + } } diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 0daefc43c4..de69d99003 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -30,6 +30,7 @@ pub enum Error { ShuttingDown, TokioJoin(String), MergeForkNotSupported, + GenesisForkVersionRequired, } /// Enumerates all messages that can be signed by a validator. @@ -45,6 +46,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload), + ValidatorRegistration(&'a ValidatorRegistrationData), } impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { @@ -64,6 +66,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { beacon_block_root, .. } => beacon_block_root.signing_root(domain), SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), + SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), } } } @@ -129,6 +132,22 @@ impl SigningMethod { let signing_root = signable_message.signing_root(domain_hash); + let fork_info = Some(ForkInfo { + fork, + genesis_validators_root, + }); + + self.get_signature_from_root(signable_message, signing_root, executor, fork_info) + .await + } + + pub async fn get_signature_from_root>( + &self, + signable_message: SignableMessage<'_, T, Payload>, + signing_root: Hash256, + executor: &TaskExecutor, + fork_info: Option, + ) -> Result { match self { SigningMethod::LocalKeystore { voting_keypair, .. } => { let _timer = @@ -181,21 +200,21 @@ impl SigningMethod { SignableMessage::SignedContributionAndProof(c) => { Web3SignerObject::ContributionAndProof(c) } + SignableMessage::ValidatorRegistration(v) => { + Web3SignerObject::ValidatorRegistration(v) + } }; // Determine the Web3Signer message type. let message_type = object.message_type(); - // The `fork_info` field is not required for deposits since they sign across the - // genesis fork version. - let fork_info = if let Web3SignerObject::Deposit { .. } = &object { - None - } else { - Some(ForkInfo { - fork, - genesis_validators_root, - }) - }; + if matches!( + object, + Web3SignerObject::Deposit { .. } | Web3SignerObject::ValidatorRegistration(_) + ) && fork_info.is_some() + { + return Err(Error::GenesisForkVersionRequired); + } let request = SigningRequest { message_type, diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 9ac1655cce..0ab37484ba 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -17,6 +17,7 @@ pub enum MessageType { SyncCommitteeMessage, SyncCommitteeSelectionProof, SyncCommitteeContributionAndProof, + ValidatorRegistration, } #[derive(Debug, PartialEq, Copy, Clone, Serialize)] @@ -64,6 +65,7 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { }, SyncAggregatorSelectionData(&'a SyncAggregatorSelectionData), ContributionAndProof(&'a ContributionAndProof), + ValidatorRegistration(&'a ValidatorRegistrationData), } impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { @@ -93,6 +95,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { Web3SignerObject::ContributionAndProof(_) => { MessageType::SyncCommitteeContributionAndProof } + Web3SignerObject::ValidatorRegistration(_) => MessageType::ValidatorRegistration, } } } diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index b39ef9ef83..36ec5e8955 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -20,9 +20,9 @@ use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecPayload, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, Slot, - SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, + Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; use validator_dir::ValidatorDir; @@ -524,6 +524,35 @@ impl ValidatorStore { } } + pub async fn sign_validator_registration_data( + &self, + validator_registration_data: ValidatorRegistrationData, + ) -> Result { + let domain_hash = self.spec.get_builder_domain(); + let signing_root = validator_registration_data.signing_root(domain_hash); + + let signing_method = + self.doppelganger_bypassed_signing_method(validator_registration_data.pubkey)?; + let signature = signing_method + .get_signature_from_root::>( + SignableMessage::ValidatorRegistration(&validator_registration_data), + signing_root, + &self.task_executor, + None, + ) + .await?; + + metrics::inc_counter_vec( + &metrics::SIGNED_VALIDATOR_REGISTRATIONS_TOTAL, + &[metrics::SUCCESS], + ); + + Ok(SignedValidatorRegistrationData { + message: validator_registration_data, + signature, + }) + } + /// Signs an `AggregateAndProof` for a given validator. /// /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be From d40c76e667dcc2db66478d96c8ce76aafdbb747d Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 30 Jun 2022 22:51:49 +0000 Subject: [PATCH 047/184] Fix clippy lints for rust 1.62 (#3300) ## Issue Addressed Fixes some new clippy lints after the last rust release ### Lints fixed for the curious: - [cast_abs_to_unsigned](https://rust-lang.github.io/rust-clippy/master/index.html#cast_abs_to_unsigned) - [map_identity](https://rust-lang.github.io/rust-clippy/master/index.html#map_identity) - [let_unit_value](https://rust-lang.github.io/rust-clippy/master/index.html#let_unit_value) - [crate_in_macro_def](https://rust-lang.github.io/rust-clippy/master/index.html#crate_in_macro_def) - [extra_unused_lifetimes](https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes) - [format_push_string](https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string) --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 ++---- beacon_node/beacon_chain/src/block_verification.rs | 7 +++---- .../network/src/beacon_processor/worker/gossip_methods.rs | 4 ++-- .../network/src/subnet_service/attestation_subnets.rs | 2 +- beacon_node/src/config.rs | 4 +++- boot_node/src/lib.rs | 2 +- consensus/proto_array/src/fork_choice_test_definition.rs | 2 -- consensus/proto_array/src/proto_array.rs | 2 +- consensus/types/src/test_utils/macros.rs | 4 ++-- 9 files changed, 15 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d2b35727f..568179a062 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1079,12 +1079,10 @@ impl BeaconChain { } /// Apply a function to the canonical head without cloning it. - pub fn with_head( - &self, - f: impl FnOnce(&BeaconSnapshot) -> Result, - ) -> Result + pub fn with_head(&self, f: F) -> Result where E: From, + F: FnOnce(&BeaconSnapshot) -> Result, { let head_lock = self .canonical_head diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c791a35f68..a6cd98c253 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1717,14 +1717,13 @@ fn verify_header_signature( .get(header.message.proposer_index as usize) .cloned() .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; - let (fork, genesis_validators_root) = chain - .with_head(|head| { + let (fork, genesis_validators_root) = + chain.with_head::<_, BlockError, _>(|head| { Ok(( head.beacon_state.fork(), head.beacon_state.genesis_validators_root(), )) - }) - .map_err(|e: BlockError| e)?; + })?; if header.verify_signature::( &proposer_pubkey, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index aa01841106..f014af4c55 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -45,7 +45,7 @@ struct VerifiedUnaggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregate { +impl VerifiedAttestation for VerifiedUnaggregate { fn attestation(&self) -> &Attestation { &self.attestation } @@ -72,7 +72,7 @@ struct VerifiedAggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregate { +impl VerifiedAttestation for VerifiedAggregate { fn attestation(&self) -> &Attestation { &self.signed_aggregate.message.aggregate } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 2b0fe6f55a..475bd7f17d 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -623,7 +623,7 @@ impl Stream for AttestationService { // process any known validator expiries match self.known_validators.poll_next_unpin(cx) { Poll::Ready(Some(Ok(_validator_index))) => { - let _ = self.handle_known_validator_expiry(); + self.handle_known_validator_expiry(); } Poll::Ready(Some(Err(e))) => { error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0421df3429..63cc9214ff 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -11,6 +11,7 @@ use slog::{info, warn, Logger}; use std::cmp; use std::cmp::max; use std::fmt::Debug; +use std::fmt::Write; use std::fs; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; @@ -784,7 +785,8 @@ pub fn set_network_config( None }) { - addr.push_str(&format!(":{}", enr_udp_port)); + write!(addr, ":{}", enr_udp_port) + .map_err(|e| format!("Failed to write enr address {}", e))?; } else { return Err( "enr-udp-port must be set for node to be discoverable with dns address" diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index f4391f987a..3d9dada0fd 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -50,7 +50,7 @@ pub fn run( let logger = Logger::root(drain.fuse(), o!()); let _scope_guard = slog_scope::set_global_logger(logger); - let _log_guard = slog_stdlog::init_with_level(debug_level).unwrap(); + slog_stdlog::init_with_level(debug_level).unwrap(); let log = slog_scope::logger(); // Run the main function emitting any errors diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 2980c019e8..2be46cc590 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -105,7 +105,6 @@ impl ForkChoiceTestDefinition { Hash256::zero(), &spec, ) - .map_err(|e| e) .unwrap_or_else(|e| { panic!("find_head op at index {} returned error {}", op_index, e) }); @@ -132,7 +131,6 @@ impl ForkChoiceTestDefinition { proposer_boost_root, &spec, ) - .map_err(|e| e) .unwrap_or_else(|e| { panic!("find_head op at index {} returned error {}", op_index, e) }); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3f7909553b..acdb42897a 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -240,7 +240,7 @@ impl ProtoArray { // not exist. node.weight = node .weight - .checked_sub(node_delta.abs() as u64) + .checked_sub(node_delta.unsigned_abs()) .ok_or(Error::DeltaOverflow(node_index))?; } else { node.weight = node diff --git a/consensus/types/src/test_utils/macros.rs b/consensus/types/src/test_utils/macros.rs index df449c712d..1e275a5760 100644 --- a/consensus/types/src/test_utils/macros.rs +++ b/consensus/types/src/test_utils/macros.rs @@ -13,8 +13,8 @@ macro_rules! ssz_tests { ($type: ty) => { #[test] pub fn test_ssz_round_trip() { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use ssz::{ssz_encode, Decode}; + use $crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; let mut rng = XorShiftRng::from_seed([42; 16]); let original = <$type>::random_for_test(&mut rng); @@ -33,8 +33,8 @@ macro_rules! tree_hash_tests { ($type: ty) => { #[test] pub fn test_tree_hash_root() { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use tree_hash::TreeHash; + use $crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; let mut rng = XorShiftRng::from_seed([42; 16]); let original = <$type>::random_for_test(&mut rng); From a7da0677d5db0bfa747f4a1d8d6135218a0983bf Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 1 Jul 2022 01:15:19 +0000 Subject: [PATCH 048/184] Remove builder redundancy (#3294) ## Issue Addressed This PR is a subset of the changes in #3134. Unstable will still not function correctly with the new builder spec once this is merged, #3134 should be used on testnets ## Proposed Changes - Removes redundancy in "builders" (servers implementing the builder spec) - Renames `payload-builder` flag to `builder` - Moves from old builder RPC API to new HTTP API, but does not implement the validator registration API (implemented in https://github.com/sigp/lighthouse/pull/3194) Co-authored-by: sean Co-authored-by: realbigsean --- Cargo.lock | 35 ++ Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 9 +- beacon_node/beacon_chain/src/builder.rs | 4 +- .../beacon_chain/src/execution_payload.rs | 11 +- beacon_node/beacon_chain/src/test_utils.rs | 3 +- .../tests/payload_invalidation.rs | 2 +- beacon_node/builder_client/Cargo.toml | 12 + beacon_node/builder_client/src/lib.rs | 192 ++++++++++ beacon_node/client/src/builder.rs | 2 +- beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engine_api.rs | 32 +- .../execution_layer/src/engine_api/http.rs | 65 +--- beacon_node/execution_layer/src/engines.rs | 106 +---- beacon_node/execution_layer/src/lib.rs | 361 ++++++++++-------- beacon_node/execution_layer/src/metrics.rs | 1 + .../src/test_utils/mock_execution_layer.rs | 11 +- beacon_node/src/cli.rs | 5 +- beacon_node/src/config.rs | 6 +- common/eth2/src/lib.rs | 2 +- consensus/types/Cargo.toml | 1 + consensus/types/src/builder_bid.rs | 52 +++ consensus/types/src/lib.rs | 1 + lighthouse/tests/beacon_node.rs | 3 +- .../src/test_rig.rs | 20 +- 25 files changed, 564 insertions(+), 374 deletions(-) create mode 100644 beacon_node/builder_client/Cargo.toml create mode 100644 beacon_node/builder_client/src/lib.rs create mode 100644 consensus/types/src/builder_bid.rs diff --git a/Cargo.lock b/Cargo.lock index 3bdce9138e..1e9b5b4239 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -537,6 +537,17 @@ dependencies = [ "safemem", ] +[[package]] +name = "builder_client" +version = "0.1.0" +dependencies = [ + "eth2", + "reqwest", + "sensitive_url", + "serde", + "serde_json", +] + [[package]] name = "bumpalo" version = "3.10.0" @@ -1876,6 +1887,7 @@ name = "execution_layer" version = "0.1.0" dependencies = [ "async-trait", + "builder_client", "bytes", "environment", "eth2", @@ -5505,6 +5517,28 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" +dependencies = [ + "serde", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_yaml" version = "0.8.24" @@ -6655,6 +6689,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_with", "serde_yaml", "slog", "smallvec", diff --git a/Cargo.toml b/Cargo.toml index c79859d0a7..819f92d99e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "beacon_node", "beacon_node/beacon_chain", + "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", "beacon_node/lighthouse_network", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 568179a062..a64c971875 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -334,7 +334,7 @@ pub struct BeaconChain { /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. - pub execution_layer: Option, + pub execution_layer: Option>, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. pub(crate) canonical_head: TimeoutRwLock>, /// The root of the genesis block. @@ -3216,6 +3216,11 @@ impl BeaconChain { let slot = state.slot(); let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + let pubkey_opt = state + .validators() + .get(proposer_index as usize) + .map(|v| v.pubkey); + // Closure to fetch a sync aggregate in cases where it is required. let get_sync_aggregate = || -> Result, BlockProductionError> { Ok(self @@ -3274,7 +3279,7 @@ impl BeaconChain { BeaconState::Merge(_) => { let sync_aggregate = get_sync_aggregate()?; let execution_payload = - get_execution_payload::(self, &state, proposer_index)?; + get_execution_payload::(self, &state, proposer_index, pubkey_opt)?; BeaconBlock::Merge(BeaconBlockMerge { slot, proposer_index, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 361246b4d3..87f9416158 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -77,7 +77,7 @@ pub struct BeaconChainBuilder { >, op_pool: Option>, eth1_chain: Option>, - execution_layer: Option, + execution_layer: Option>, event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, @@ -481,7 +481,7 @@ where } /// Sets the `BeaconChain` execution layer. - pub fn execution_layer(mut self, execution_layer: Option) -> Self { + pub fn execution_layer(mut self, execution_layer: Option>) -> Self { self.execution_layer = execution_layer; self } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 08e4cd41ef..7085fc6500 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -247,9 +247,10 @@ pub fn get_execution_payload, state: &BeaconState, proposer_index: u64, + pubkey: Option, ) -> Result { Ok( - prepare_execution_payload_blocking::(chain, state, proposer_index)? + prepare_execution_payload_blocking::(chain, state, proposer_index, pubkey)? .unwrap_or_default(), ) } @@ -259,6 +260,7 @@ pub fn prepare_execution_payload_blocking, state: &BeaconState, proposer_index: u64, + pubkey: Option, ) -> Result, BlockProductionError> { let execution_layer = chain .execution_layer @@ -267,7 +269,7 @@ pub fn prepare_execution_payload_blocking(chain, state, proposer_index).await + prepare_execution_payload::(chain, state, proposer_index, pubkey).await }) .map_err(BlockProductionError::BlockingFailed)? } @@ -290,6 +292,7 @@ pub async fn prepare_execution_payload, state: &BeaconState, proposer_index: u64, + pubkey: Option, ) -> Result, BlockProductionError> { let spec = &chain.spec; let execution_layer = chain @@ -345,12 +348,14 @@ pub async fn prepare_execution_payload( + .get_payload::( parent_hash, timestamp, random, finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero), proposer_index, + pubkey, + state.slot(), ) .await .map_err(BlockProductionError::GetPayloadFailed)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 69ed413fd4..980de25cf3 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -147,7 +147,7 @@ pub struct Builder { store: Option>>, initial_mutator: Option>, store_mutator: Option>, - execution_layer: Option, + execution_layer: Option>, mock_execution_layer: Option>, runtime: TestRuntime, log: Logger, @@ -361,6 +361,7 @@ where DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + None, ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1aa9844a35..2a48a4b691 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -64,7 +64,7 @@ impl InvalidPayloadRig { self } - fn execution_layer(&self) -> ExecutionLayer { + fn execution_layer(&self) -> ExecutionLayer { self.harness.chain.execution_layer.clone().unwrap() } diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml new file mode 100644 index 0000000000..c4d21c59ab --- /dev/null +++ b/beacon_node/builder_client/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "builder_client" +version = "0.1.0" +edition = "2021" +authors = ["Sean Anderson "] + +[dependencies] +reqwest = { version = "0.11.0", features = ["json","stream"] } +sensitive_url = { path = "../../common/sensitive_url" } +eth2 = { path = "../../common/eth2" } +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" \ No newline at end of file diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs new file mode 100644 index 0000000000..500f5aa9ff --- /dev/null +++ b/beacon_node/builder_client/src/lib.rs @@ -0,0 +1,192 @@ +use eth2::ok_or_error; +use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::{ + BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, + ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, + Slot, +}; +pub use eth2::Error; +use reqwest::{IntoUrl, Response}; +use sensitive_url::SensitiveUrl; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::time::Duration; + +pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 500; + +#[derive(Clone)] +pub struct Timeouts { + get_header: Duration, +} + +impl Default for Timeouts { + fn default() -> Self { + Self { + get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + } + } +} + +#[derive(Clone)] +pub struct BuilderHttpClient { + client: reqwest::Client, + server: SensitiveUrl, + timeouts: Timeouts, +} + +impl BuilderHttpClient { + pub fn new(server: SensitiveUrl) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + timeouts: Timeouts::default(), + }) + } + + pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + timeouts, + }) + } + + async fn get(&self, url: U) -> Result { + self.get_response_with_timeout(url, None) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + async fn get_with_timeout( + &self, + url: U, + timeout: Duration, + ) -> Result { + self.get_response_with_timeout(url, Some(timeout)) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + /// Perform a HTTP GET request, returning the `Response` for further processing. + async fn get_response_with_timeout( + &self, + url: U, + timeout: Option, + ) -> Result { + let mut builder = self.client.get(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + ok_or_error(response).await + } + + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic( + &self, + url: U, + body: &T, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.json(body).send().await?; + ok_or_error(response).await + } + + async fn post_with_raw_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self + .client + .post(url) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + ok_or_error(response).await + } + + /// `POST /eth/v1/builder/validators` + pub async fn post_builder_validators( + &self, + validator: &[SignedValidatorRegistrationData], + ) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("validators"); + + self.post_generic(path, &validator, None).await?; + Ok(()) + } + + /// `POST /eth/v1/builder/blinded_blocks` + pub async fn post_builder_blinded_blocks( + &self, + blinded_block: &SignedBeaconBlock>, + ) -> Result>, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("blinded_blocks"); + + Ok(self + .post_with_raw_response(path, &blinded_block) + .await? + .json() + .await?) + } + + /// `GET /eth/v1/builder/header` + pub async fn get_builder_header>( + &self, + slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: &PublicKeyBytes, + ) -> Result>, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("header") + .push(slot.to_string().as_str()) + .push(format!("{parent_hash:?}").as_str()) + .push(pubkey.as_hex_string().as_str()); + + self.get_with_timeout(path, self.timeouts.get_header).await + } + + /// `GET /eth/v1/builder/status` + pub async fn get_builder_status(&self) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("status"); + + self.get(path).await + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index a6124bdfad..95ba1b5657 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -724,7 +724,7 @@ where execution_layer.spawn_watchdog_routine(beacon_chain.slot_clock.clone()); // Spawn a routine that removes expired proposer preparations. - execution_layer.spawn_clean_proposer_caches_routine::( + execution_layer.spawn_clean_proposer_caches_routine::( beacon_chain.slot_clock.clone(), ); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index dbd6324680..c181c19050 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -38,3 +38,4 @@ zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +builder_client = { path = "../builder_client" } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 5f3edb78bf..a1e769e3e3 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,11 +1,9 @@ use crate::engines::ForkChoiceState; -use async_trait::async_trait; pub use ethers_core::types::Transaction; use http::deposit_methods::RpcError; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; -use slog::Logger; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, Hash256, Uint256, VariableList, @@ -28,10 +26,7 @@ pub enum Error { InvalidExecutePayloadResponse(&'static str), JsonRpc(RpcError), Json(serde_json::Error), - ServerMessage { - code: i64, - message: String, - }, + ServerMessage { code: i64, message: String }, Eip155Failure, IsSyncing, ExecutionBlockNotFound(ExecutionBlockHash), @@ -40,15 +35,9 @@ pub enum Error { PayloadIdUnavailable, TransitionConfigurationMismatch, PayloadConversionLogicFlaw, - InvalidBuilderQuery, - MissingPayloadId { - parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, - }, DeserializeTransaction(ssz_types::Error), DeserializeTransactions(ssz_types::Error), + BuilderApi(builder_client::Error), } impl From for Error { @@ -76,19 +65,14 @@ impl From for Error { } } -pub struct EngineApi; -pub struct BuilderApi; - -#[async_trait] -pub trait Builder { - async fn notify_forkchoice_updated( - &self, - forkchoice_state: ForkChoiceState, - payload_attributes: Option, - log: &Logger, - ) -> Result; +impl From for Error { + fn from(e: builder_client::Error) -> Self { + Error::BuilderApi(e) + } } +pub struct EngineApi; + #[derive(Clone, Copy, Debug, PartialEq)] pub enum PayloadStatusV1Status { Valid, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 157f9a3054..832771460e 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -10,7 +10,7 @@ use serde_json::json; use std::marker::PhantomData; use std::time::Duration; -use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock}; +use types::EthSpec; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; @@ -43,12 +43,6 @@ pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_millis(500); -pub const BUILDER_GET_PAYLOAD_HEADER_V1: &str = "builder_getPayloadHeaderV1"; -pub const BUILDER_GET_PAYLOAD_HEADER_TIMEOUT: Duration = Duration::from_secs(2); - -pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1"; -pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2); - /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; @@ -714,63 +708,6 @@ impl HttpJsonRpc { } } -impl HttpJsonRpc { - pub async fn get_payload_header_v1( - &self, - payload_id: PayloadId, - ) -> Result, Error> { - let params = json!([JsonPayloadIdRequest::from(payload_id)]); - - let response: JsonExecutionPayloadHeaderV1 = self - .rpc_request( - BUILDER_GET_PAYLOAD_HEADER_V1, - params, - BUILDER_GET_PAYLOAD_HEADER_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } - - pub async fn forkchoice_updated_v1( - &self, - forkchoice_state: ForkChoiceState, - payload_attributes: Option, - ) -> Result { - let params = json!([ - JsonForkChoiceStateV1::from(forkchoice_state), - payload_attributes.map(JsonPayloadAttributesV1::from) - ]); - - let response: JsonForkchoiceUpdatedV1Response = self - .rpc_request( - ENGINE_FORKCHOICE_UPDATED_V1, - params, - ENGINE_FORKCHOICE_UPDATED_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } - - pub async fn propose_blinded_block_v1( - &self, - block: SignedBeaconBlock>, - ) -> Result, Error> { - let params = json!([block]); - - let response: JsonExecutionPayloadV1 = self - .rpc_request( - BUILDER_PROPOSE_BLINDED_BLOCK_V1, - params, - BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } -} - #[cfg(test)] mod test { use super::auth::JwtKey; diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index d3c4d0e421..88c94162f8 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,12 +1,9 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - Builder, EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, - PayloadId, + EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, }; -use crate::{BuilderApi, HttpJsonRpc}; -use async_trait::async_trait; -use futures::future::join_all; +use crate::HttpJsonRpc; use lru::LruCache; use slog::{crit, debug, info, warn, Logger}; use std::future::Future; @@ -97,9 +94,8 @@ impl Engine { } } -#[async_trait] -impl Builder for Engine { - async fn notify_forkchoice_updated( +impl Engine { + pub async fn notify_forkchoice_updated( &self, forkchoice_state: ForkChoiceState, payload_attributes: Option, @@ -128,34 +124,6 @@ impl Builder for Engine { } } -#[async_trait] -impl Builder for Engine { - async fn notify_forkchoice_updated( - &self, - forkchoice_state: ForkChoiceState, - pa: Option, - log: &Logger, - ) -> Result { - let payload_attributes = pa.ok_or(EngineApiError::InvalidBuilderQuery)?; - let response = self - .api - .forkchoice_updated_v1(forkchoice_state, Some(payload_attributes)) - .await?; - - if let Some(payload_id) = response.payload_id { - let key = PayloadIdCacheKey::new(&forkchoice_state, &payload_attributes); - self.payload_id_cache.lock().await.put(key, payload_id); - } else { - warn!( - log, - "Builder should have returned a payload_id for attributes {:?}", payload_attributes - ); - } - - Ok(response) - } -} - // This structure used to hold multiple execution engines managed in a fallback manner. This // functionality has been removed following https://github.com/sigp/lighthouse/issues/3118 and this // struct will likely be removed in the future. @@ -165,15 +133,11 @@ pub struct Engines { pub log: Logger, } -pub struct Builders { - pub builders: Vec>, - pub log: Logger, -} - #[derive(Debug)] pub enum EngineError { Offline { id: String }, Api { id: String, error: EngineApiError }, + BuilderApi { error: EngineApiError }, Auth { id: String }, } @@ -422,66 +386,6 @@ impl Engines { } } -impl Builders { - pub async fn first_success_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Result> - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let mut errors = vec![]; - - for builder in &self.builders { - match func(builder).await { - Ok(result) => return Ok(result), - Err(error) => { - debug!( - self.log, - "Builder call failed"; - "error" => ?error, - "id" => &builder.id - ); - errors.push(EngineError::Api { - id: builder.id.clone(), - error, - }) - } - } - } - - Err(errors) - } - - pub async fn broadcast_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Vec> - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let func = &func; - let futures = self.builders.iter().map(|engine| async move { - func(engine).await.map_err(|error| { - debug!( - self.log, - "Builder call failed"; - "error" => ?error, - "id" => &engine.id - ); - EngineError::Api { - id: engine.id.clone(), - error, - } - }) - }); - - join_all(futures).await - } -} - impl PayloadIdCacheKey { fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { Self { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 4b29887675..156382c481 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,9 +4,8 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::engine_api::Builder; -use crate::engines::Builders; use auth::{strip_prefix, Auth, JwtKey}; +use builder_client::BuilderHttpClient; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; @@ -20,7 +19,6 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; -use std::convert::TryInto; use std::future::Future; use std::io::Write; use std::path::PathBuf; @@ -33,7 +31,7 @@ use tokio::{ }; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, - ProposerPreparationData, SignedBeaconBlock, Slot, + ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, }; mod engine_api; @@ -69,6 +67,7 @@ pub enum Error { NoEngines, NoPayloadBuilder, ApiError(ApiError), + Builder(builder_client::Error), EngineErrors(Vec), NotSynced, ShuttingDown, @@ -102,15 +101,16 @@ pub struct Proposer { payload_attributes: PayloadAttributes, } -struct Inner { +struct Inner { engines: Engines, - builders: Builders, + builder: Option, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, proposer_preparation_data: Mutex>, execution_blocks: Mutex>, proposers: RwLock>, executor: TaskExecutor, + phantom: std::marker::PhantomData, log: Logger, } @@ -119,7 +119,7 @@ pub struct Config { /// Endpoint urls for EL nodes that are running the engine api. pub execution_endpoints: Vec, /// Endpoint urls for services providing the builder api. - pub builder_endpoints: Vec, + pub builder_url: Option, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -143,16 +143,16 @@ pub struct Config { /// /// The fallback nodes have an ordering. The first supplied will be the first contacted, and so on. #[derive(Clone)] -pub struct ExecutionLayer { - inner: Arc, +pub struct ExecutionLayer { + inner: Arc>, } -impl ExecutionLayer { +impl ExecutionLayer { /// Instantiate `Self` with Execution engines specified using `Config`, all using the JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, - builder_endpoints: builder_urls, + builder_url, secret_files, suggested_fee_recipient, jwt_id, @@ -208,14 +208,9 @@ impl ExecutionLayer { Engine::::new(id, api) }; - let builders: Vec> = builder_urls - .into_iter() - .map(|url| { - let id = url.to_string(); - let api = HttpJsonRpc::::new(url)?; - Ok(Engine::::new(id, api)) - }) - .collect::>()?; + let builder = builder_url + .map(|url| BuilderHttpClient::new(url).map_err(Error::Builder)) + .transpose()?; let inner = Inner { engines: Engines { @@ -223,16 +218,14 @@ impl ExecutionLayer { latest_forkchoice_state: <_>::default(), log: log.clone(), }, - builders: Builders { - builders, - log: log.clone(), - }, + builder, execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, proposer_preparation_data: Mutex::new(HashMap::new()), proposers: RwLock::new(HashMap::new()), execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, + phantom: std::marker::PhantomData, log, }; @@ -242,13 +235,13 @@ impl ExecutionLayer { } } -impl ExecutionLayer { +impl ExecutionLayer { fn engines(&self) -> &Engines { &self.inner.engines } - fn builders(&self) -> &Builders { - &self.inner.builders + pub fn builder(&self) -> &Option { + &self.inner.builder } pub fn executor(&self) -> &TaskExecutor { @@ -282,9 +275,9 @@ impl ExecutionLayer { } /// Convenience function to allow calling async functions in a non-async context. - pub fn block_on<'a, T, U, V>(&'a self, generate_future: T) -> Result + pub fn block_on<'a, F, U, V>(&'a self, generate_future: F) -> Result where - T: Fn(&'a Self) -> U, + F: Fn(&'a Self) -> U, U: Future>, { let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; @@ -296,9 +289,9 @@ impl ExecutionLayer { /// /// The function is "generic" since it does not enforce a particular return type on /// `generate_future`. - pub fn block_on_generic<'a, T, U, V>(&'a self, generate_future: T) -> Result + pub fn block_on_generic<'a, F, U, V>(&'a self, generate_future: F) -> Result where - T: Fn(&'a Self) -> U, + F: Fn(&'a Self) -> U, U: Future, { let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; @@ -307,9 +300,9 @@ impl ExecutionLayer { } /// Convenience function to allow spawning a task without waiting for the result. - pub fn spawn(&self, generate_future: T, name: &'static str) + pub fn spawn(&self, generate_future: F, name: &'static str) where - T: FnOnce(Self) -> U, + F: FnOnce(Self) -> U, U: Future + Send + 'static, { self.executor().spawn(generate_future(self.clone()), name); @@ -317,12 +310,12 @@ impl ExecutionLayer { /// Spawns a routine which attempts to keep the execution engines online. pub fn spawn_watchdog_routine(&self, slot_clock: S) { - let watchdog = |el: ExecutionLayer| async move { + let watchdog = |el: ExecutionLayer| async move { // Run one task immediately. el.watchdog_task().await; let recurring_task = - |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { + |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { // We run the task three times per slot. // // The interval between each task is 1/3rd of the slot duration. This matches nicely @@ -377,11 +370,8 @@ impl ExecutionLayer { } /// Spawns a routine which cleans the cached proposer data periodically. - pub fn spawn_clean_proposer_caches_routine( - &self, - slot_clock: S, - ) { - let preparation_cleaner = |el: ExecutionLayer| async move { + pub fn spawn_clean_proposer_caches_routine(&self, slot_clock: S) { + let preparation_cleaner = |el: ExecutionLayer| async move { // Start the loop to periodically clean proposer preparation cache. loop { if let Some(duration_to_next_epoch) = @@ -395,7 +385,7 @@ impl ExecutionLayer { .map(|slot| slot.epoch(T::slots_per_epoch())) { Some(current_epoch) => el - .clean_proposer_caches::(current_epoch) + .clean_proposer_caches(current_epoch) .await .map_err(|e| { error!( @@ -420,7 +410,7 @@ impl ExecutionLayer { /// Spawns a routine that polls the `exchange_transition_configuration` endpoint. pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) { - let routine = |el: ExecutionLayer| async move { + let routine = |el: ExecutionLayer| async move { loop { if let Err(e) = el.exchange_transition_configuration(&spec).await { error!( @@ -454,7 +444,7 @@ impl ExecutionLayer { } /// Updates the proposer preparation data provided by validators - async fn update_proposer_preparation( + pub async fn update_proposer_preparation( &self, update_epoch: Epoch, preparation_data: &[ProposerPreparationData], @@ -476,7 +466,7 @@ impl ExecutionLayer { } /// Removes expired entries from proposer_preparation_data and proposers caches - async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { + async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { let mut proposer_preparation_data = self.proposer_preparation_data().await; // Keep all entries that have been updated in the last 2 epochs @@ -561,104 +551,164 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - pub async fn get_payload>( + #[allow(clippy::too_many_arguments)] + pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, finalized_block_hash: ExecutionBlockHash, proposer_index: u64, + pubkey: Option, + slot: Slot, ) -> Result { - let _timer = metrics::start_timer_vec( - &metrics::EXECUTION_LAYER_REQUEST_TIMES, - &[metrics::GET_PAYLOAD], - ); - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; match Payload::block_type() { BlockType::Blinded => { - debug!( - self.log(), - "Issuing builder_getPayloadHeader"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, + let _timer = metrics::start_timer_vec( + &metrics::EXECUTION_LAYER_REQUEST_TIMES, + &[metrics::GET_BLINDED_PAYLOAD], ); - self.builders() - .first_success_without_retry(|engine| async move { - let payload_id = engine - .get_payload_id( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - ) - .await - .ok_or(ApiError::MissingPayloadId { - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - })?; - engine - .api - .get_payload_header_v1::(payload_id) - .await? - .try_into() - .map_err(|_| ApiError::PayloadConversionLogicFlaw) - }) - .await - .map_err(Error::EngineErrors) + self.get_blinded_payload( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + pubkey, + slot, + ) + .await } BlockType::Full => { - debug!( + let _timer = metrics::start_timer_vec( + &metrics::EXECUTION_LAYER_REQUEST_TIMES, + &[metrics::GET_PAYLOAD], + ); + self.get_full_payload( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + ) + .await + } + } + } + + #[allow(clippy::too_many_arguments)] + async fn get_blinded_payload>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + finalized_block_hash: ExecutionBlockHash, + suggested_fee_recipient: Address, + pubkey_opt: Option, + slot: Slot, + ) -> Result { + //FIXME(sean) fallback logic included in PR #3134 + + // Don't attempt to outsource payload construction until after the merge transition has been + // finalized. We want to be conservative with payload construction until then. + if let (Some(builder), Some(pubkey)) = (self.builder(), pubkey_opt) { + if finalized_block_hash != ExecutionBlockHash::zero() { + info!( self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, + "Requesting blinded header from connected builder"; + "slot" => ?slot, + "pubkey" => ?pubkey, "parent_hash" => ?parent_hash, ); - self.engines() - .first_success(|engine| async move { - let payload_id = if let Some(id) = engine - .get_payload_id( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - ) - .await - { - // The payload id has been cached for this engine. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::HIT], - ); - id - } else { - // The payload id has *not* been cached for this engine. Trigger an artificial - // fork choice update to retrieve a payload ID. - // - // TODO(merge): a better algorithm might try to favour a node that already had a - // cached payload id, since a payload that has had more time to produce is - // likely to be more profitable. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::MISS], - ); - let fork_choice_state = ForkChoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash, - }; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - }; + return builder + .get_builder_header::(slot, parent_hash, &pubkey) + .await + .map(|d| d.data.message.header) + .map_err(Error::Builder); + } + } + self.get_full_payload::( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + ) + .await + } + + /// Get a full payload without caching its result in the execution layer's payload cache. + async fn get_full_payload>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + finalized_block_hash: ExecutionBlockHash, + suggested_fee_recipient: Address, + ) -> Result { + self.get_full_payload_with( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + noop, + ) + .await + } + + async fn get_full_payload_with>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + finalized_block_hash: ExecutionBlockHash, + suggested_fee_recipient: Address, + f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, + ) -> Result { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.engines() + .first_success(|engine| async move { + let payload_id = if let Some(id) = engine + .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + .await + { + // The payload id has been cached for this engine. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::HIT], + ); + id + } else { + // The payload id has *not* been cached for this engine. Trigger an artificial + // fork choice update to retrieve a payload ID. + // + // TODO(merge): a better algorithm might try to favour a node that already had a + // cached payload id, since a payload that has had more time to produce is + // likely to be more profitable. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::MISS], + ); + let fork_choice_state = ForkChoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash, + }; + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient, + }; let response = engine .notify_forkchoice_updated( @@ -684,16 +734,19 @@ impl ExecutionLayer { } }; - engine - .api - .get_payload_v1::(payload_id) - .await - .map(Into::into) - }) + engine + .api + .get_payload_v1::(payload_id) .await - .map_err(Error::EngineErrors) - } - } + .map(|full_payload| { + if f(self, &full_payload).is_some() { + warn!(self.log(), "Duplicate payload cached, this might indicate redundant proposal attempts."); + } + full_payload.into() + }) + }) + .await + .map_err(Error::EngineErrors) } /// Maps to the `engine_newPayload` JSON-RPC call. @@ -709,7 +762,7 @@ impl ExecutionLayer { /// - Invalid, if any nodes return invalid. /// - Syncing, if any nodes return syncing. /// - An error, if all nodes return an error. - pub async fn notify_new_payload( + pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, ) -> Result { @@ -872,23 +925,10 @@ impl ExecutionLayer { }) .await; - // Only query builders with payload attributes populated. - let builder_broadcast_results = if payload_attributes.is_some() { - self.builders() - .broadcast_without_retry(|engine| async move { - engine - .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) - .await - }) - .await - } else { - vec![] - }; process_multiple_payload_statuses( head_block_hash, Some(broadcast_results) .into_iter() - .chain(builder_broadcast_results.into_iter()) .map(|result| result.map(|response| response.payload_status)), self.log(), ) @@ -1147,7 +1187,7 @@ impl ExecutionLayer { } } - pub async fn get_payload_by_block_hash( + pub async fn get_payload_by_block_hash( &self, hash: ExecutionBlockHash, ) -> Result>, Error> { @@ -1160,7 +1200,7 @@ impl ExecutionLayer { .map_err(Error::EngineErrors) } - async fn get_payload_by_block_hash_from_engine( + async fn get_payload_by_block_hash_from_engine( &self, engine: &Engine, hash: ExecutionBlockHash, @@ -1205,21 +1245,24 @@ impl ExecutionLayer { })) } - pub async fn propose_blinded_beacon_block( + pub async fn propose_blinded_beacon_block( &self, block: &SignedBeaconBlock>, ) -> Result, Error> { debug!( self.log(), - "Issuing builder_proposeBlindedBlock"; + "Sending block to builder"; "root" => ?block.canonical_root(), ); - self.builders() - .first_success_without_retry(|engine| async move { - engine.api.propose_blinded_block_v1(block.clone()).await - }) - .await - .map_err(Error::EngineErrors) + if let Some(builder) = self.builder() { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + } else { + Err(Error::NoPayloadBuilder) + } } } @@ -1320,3 +1363,7 @@ mod test { .await; } } + +fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { + None +} diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 356c5a46dd..e28a81fd87 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -3,6 +3,7 @@ pub use lighthouse_metrics::*; pub const HIT: &str = "hit"; pub const MISS: &str = "miss"; pub const GET_PAYLOAD: &str = "get_payload"; +pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload"; pub const NEW_PAYLOAD: &str = "new_payload"; pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated"; pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash"; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 5770a8a382..707a7c0c3e 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -9,7 +9,7 @@ use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; pub struct MockExecutionLayer { pub server: MockServer, - pub el: ExecutionLayer, + pub el: ExecutionLayer, pub executor: TaskExecutor, pub spec: ChainSpec, } @@ -22,6 +22,7 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), Epoch::new(0), + None, ) } @@ -31,6 +32,7 @@ impl MockExecutionLayer { terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, + builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); @@ -54,6 +56,7 @@ impl MockExecutionLayer { let config = Config { execution_endpoints: vec![url], + builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() @@ -111,12 +114,14 @@ impl MockExecutionLayer { let validator_index = 0; let payload = self .el - .get_payload::>( + .get_payload::>( parent_hash, timestamp, prev_randao, finalized_block_hash, validator_index, + None, + slot, ) .await .unwrap() @@ -173,7 +178,7 @@ impl MockExecutionLayer { pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self where - U: Fn(ChainSpec, ExecutionLayer, Option) -> V, + U: Fn(ChainSpec, ExecutionLayer, Option) -> V, V: Future, { let terminal_block_number = self diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index a0cc124d47..964873a949 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -466,8 +466,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) ) .arg( - Arg::with_name("payload-builder") - .long("payload-builder") + Arg::with_name("builder") + .long("builder") + .alias("payload-builder") .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") .requires("execution-endpoint") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 63cc9214ff..c91bd711e5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -288,10 +288,10 @@ pub fn get_config( parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?; // Parse and set the payload builder, if any. - if let Some(endpoints) = cli_args.value_of("payload-builder") { + if let Some(endpoint) = cli_args.value_of("builder") { let payload_builder = - parse_only_one_value(endpoints, SensitiveUrl::parse, "--payload-builder", log)?; - el_config.builder_endpoints = vec![payload_builder]; + parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; + el_config.builder_url = Some(payload_builder); } // Set config values from parse values. diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 529bad1d85..d374101308 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1508,7 +1508,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an /// appropriate error message. -async fn ok_or_error(response: Response) -> Result { +pub async fn ok_or_error(response: Response) -> Result { let status = response.status(); if status == StatusCode::OK { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 881d17a330..96018230f0 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -46,6 +46,7 @@ itertools = "0.10.0" superstruct = "0.5.0" serde_json = "1.0.74" smallvec = "1.8.0" +serde_with = "1.13.0" [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs new file mode 100644 index 0000000000..1726f2ad07 --- /dev/null +++ b/consensus/types/src/builder_bid.rs @@ -0,0 +1,52 @@ +use crate::{EthSpec, ExecPayload, ExecutionPayloadHeader, Uint256}; +use bls::blst_implementations::PublicKeyBytes; +use bls::Signature; +use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; +use serde_derive::{Deserialize, Serialize}; +use serde_with::{serde_as, DeserializeAs, SerializeAs}; +use std::marker::PhantomData; + +#[serde_as] +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec, Payload: ExecPayload")] +pub struct BuilderBid> { + #[serde_as(as = "BlindedPayloadAsHeader")] + pub header: Payload, + #[serde(with = "eth2_serde_utils::quoted_u256")] + pub value: Uint256, + pub pubkey: PublicKeyBytes, + #[serde(skip)] + _phantom_data: PhantomData, +} + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec, Payload: ExecPayload")] +pub struct SignedBuilderBid> { + pub message: BuilderBid, + pub signature: Signature, +} + +struct BlindedPayloadAsHeader(PhantomData); + +impl> SerializeAs for BlindedPayloadAsHeader { + fn serialize_as(source: &Payload, serializer: S) -> Result + where + S: Serializer, + { + source.to_execution_payload_header().serialize(serializer) + } +} + +impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> + for BlindedPayloadAsHeader +{ + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let payload_header = ExecutionPayloadHeader::deserialize(deserializer)?; + Payload::try_from(payload_header) + .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index ecfd77d7a4..7823ec223c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -28,6 +28,7 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; pub mod beacon_state; +pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; pub mod consts; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 443c442027..a9f8900d0c 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -411,12 +411,13 @@ fn run_payload_builder_flag_test(flag: &str, builders: &str) { let config = config.execution_layer.as_ref().unwrap(); // Only first provided endpoint is parsed as we don't support // redundancy. - assert_eq!(&config.builder_endpoints, &all_builders[..1]); + assert_eq!(config.builder_url, all_builders.get(0).cloned()); }); } #[test] fn payload_builder_flags() { + run_payload_builder_flag_test("builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 21162fea56..a5bab4ed78 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -11,9 +11,9 @@ use types::{ const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); -struct ExecutionPair { +struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. - execution_layer: ExecutionLayer, + execution_layer: ExecutionLayer, /// A handle to external EE process, once this is dropped the process will be killed. #[allow(dead_code)] execution_engine: ExecutionEngine, @@ -23,11 +23,11 @@ struct ExecutionPair { /// /// There are two EEs held here so that we can test out-of-order application of payloads, and other /// edge-cases. -pub struct TestRig { +pub struct TestRig { #[allow(dead_code)] runtime: Arc, - ee_a: ExecutionPair, - ee_b: ExecutionPair, + ee_a: ExecutionPair, + ee_b: ExecutionPair, spec: ChainSpec, _runtime_shutdown: exit_future::Signal, } @@ -172,12 +172,14 @@ impl TestRig { let valid_payload = self .ee_a .execution_layer - .get_payload::>( + .get_payload::>( parent_hash, timestamp, prev_randao, finalized_block_hash, proposer_index, + None, + Slot::new(0), ) .await .unwrap() @@ -265,12 +267,14 @@ impl TestRig { let second_payload = self .ee_a .execution_layer - .get_payload::>( + .get_payload::>( parent_hash, timestamp, prev_randao, finalized_block_hash, proposer_index, + None, + Slot::new(0), ) .await .unwrap() @@ -400,7 +404,7 @@ impl TestRig { /// /// Panic if payload reconstruction fails. async fn check_payload_reconstruction( - ee: &ExecutionPair, + ee: &ExecutionPair, payload: &ExecutionPayload, ) { let reconstructed = ee From e5212f132021d58de9637355729096ccb511886e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 1 Jul 2022 03:44:37 +0000 Subject: [PATCH 049/184] Avoid growing Vec for sync committee indices (#3301) ## Issue Addressed NA ## Proposed Changes This is a fairly simple micro-optimization to avoid using `Vec::grow`. I don't believe this will have a substantial effect on block processing times, however it was showing up in flamegraphs. I think it's worth making this change for general memory-hygiene. ## Additional Info NA --- consensus/types/src/beacon_state.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 3a0f7d02e8..66656d3589 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -779,14 +779,14 @@ impl BeaconState { &mut self, sync_committee: &SyncCommittee, ) -> Result, Error> { - sync_committee - .pubkeys - .iter() - .map(|pubkey| { + let mut indices = Vec::with_capacity(sync_committee.pubkeys.len()); + for pubkey in sync_committee.pubkeys.iter() { + indices.push( self.get_validator_index(pubkey)? - .ok_or(Error::PubkeyCacheInconsistent) - }) - .collect() + .ok_or(Error::PubkeyCacheInconsistent)?, + ) + } + Ok(indices) } /// Compute the sync committee indices for the next sync committee. From be4e261e7433e02983648f7d7d8f21f74d3fa9d8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 3 Jul 2022 05:36:50 +0000 Subject: [PATCH 050/184] Use async code when interacting with EL (#3244) ## Overview This rather extensive PR achieves two primary goals: 1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state. 2. Refactors fork choice, block production and block processing to `async` functions. Additionally, it achieves: - Concurrent forkchoice updates to the EL and cache pruning after a new head is selected. - Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production. - Concurrent per-block-processing and execution payload verification during block processing. - The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?): - I had to do this to deal with sending blocks into spawned tasks. - Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones. - We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap. - Avoids cloning *all the blocks* in *every chain segment* during sync. - It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:) - The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs. For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273 ## Changes to `canonical_head` and `fork_choice` Previously, the `BeaconChain` had two separate fields: ``` canonical_head: RwLock, fork_choice: RwLock ``` Now, we have grouped these values under a single struct: ``` canonical_head: CanonicalHead { cached_head: RwLock>, fork_choice: RwLock } ``` Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously. ## Breaking Changes ### The `state` (root) field in the `finalized_checkpoint` SSE event Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event: 1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`. 4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots. Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1). I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku. ## Notes for Reviewers I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct. I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking". I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it. I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around. Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2. You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests: - Changing tests to be `tokio::async` tests. - Adding `.await` to fork choice, block processing and block production functions. - Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`. - Wrapping `SignedBeaconBlock` in an `Arc`. - In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant. I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic. Co-authored-by: Mac L --- Cargo.lock | 5 + .../src/attestation_verification.rs | 9 +- .../src/attestation_verification/batch.rs | 6 +- beacon_node/beacon_chain/src/beacon_chain.rs | 1947 +++++++---------- .../src/beacon_fork_choice_store.rs | 6 +- .../beacon_chain/src/beacon_proposer_cache.rs | 31 +- .../beacon_chain/src/beacon_snapshot.rs | 7 +- .../beacon_chain/src/block_verification.rs | 328 +-- beacon_node/beacon_chain/src/builder.rs | 72 +- .../beacon_chain/src/canonical_head.rs | 1307 +++++++++++ .../beacon_chain/src/early_attester_cache.rs | 7 +- beacon_node/beacon_chain/src/errors.rs | 19 +- .../beacon_chain/src/execution_payload.rs | 296 ++- beacon_node/beacon_chain/src/fork_revert.rs | 8 +- .../beacon_chain/src/historical_blocks.rs | 3 +- beacon_node/beacon_chain/src/lib.rs | 7 +- .../beacon_chain/src/proposer_prep_service.rs | 4 +- beacon_node/beacon_chain/src/schema_change.rs | 7 +- .../src/schema_change/migration_schema_v7.rs | 10 +- .../beacon_chain/src/shuffling_cache.rs | 6 + .../beacon_chain/src/snapshot_cache.rs | 15 +- .../beacon_chain/src/state_advance_timer.rs | 43 +- beacon_node/beacon_chain/src/test_utils.rs | 154 +- .../tests/attestation_production.rs | 53 +- .../tests/attestation_verification.rs | 104 +- .../beacon_chain/tests/block_verification.rs | 491 +++-- beacon_node/beacon_chain/tests/merge.rs | 33 +- .../beacon_chain/tests/op_verification.rs | 16 +- .../tests/payload_invalidation.rs | 398 ++-- beacon_node/beacon_chain/tests/store_tests.rs | 868 ++++---- .../tests/sync_committee_verification.rs | 64 +- beacon_node/beacon_chain/tests/tests.rs | 372 ++-- beacon_node/client/src/builder.rs | 34 +- beacon_node/client/src/notifier.rs | 157 +- beacon_node/execution_layer/src/lib.rs | 37 - beacon_node/http_api/src/attester_duties.rs | 8 +- beacon_node/http_api/src/block_id.rs | 41 +- beacon_node/http_api/src/database.rs | 2 +- beacon_node/http_api/src/lib.rs | 695 +++--- beacon_node/http_api/src/proposer_duties.rs | 26 +- beacon_node/http_api/src/state_id.rs | 40 +- beacon_node/http_api/tests/fork_tests.rs | 4 + .../http_api/tests/interactive_tests.rs | 29 +- beacon_node/http_api/tests/tests.rs | 219 +- .../lighthouse_network/src/behaviour/mod.rs | 4 +- .../src/rpc/codec/ssz_snappy.rs | 72 +- .../lighthouse_network/src/rpc/methods.rs | 5 +- .../lighthouse_network/src/types/pubsub.rs | 5 +- .../lighthouse_network/tests/rpc_tests.rs | 20 +- .../network/src/beacon_processor/mod.rs | 561 ++--- .../network/src/beacon_processor/tests.rs | 329 ++- .../beacon_processor/worker/gossip_methods.rs | 72 +- .../beacon_processor/worker/rpc_methods.rs | 7 +- .../beacon_processor/worker/sync_methods.rs | 52 +- beacon_node/network/src/metrics.rs | 4 - beacon_node/network/src/router/processor.rs | 46 +- beacon_node/network/src/service.rs | 41 +- beacon_node/network/src/status.rs | 40 +- .../network/src/subnet_service/tests/mod.rs | 10 +- .../network/src/sync/backfill_sync/mod.rs | 4 +- .../network/src/sync/block_lookups/mod.rs | 13 +- .../src/sync/block_lookups/parent_lookup.rs | 15 +- .../sync/block_lookups/single_block_lookup.rs | 7 +- .../network/src/sync/block_lookups/tests.rs | 38 +- beacon_node/network/src/sync/manager.rs | 35 +- .../network/src/sync/network_context.rs | 39 +- .../network/src/sync/peer_sync_info.rs | 4 +- .../network/src/sync/range_sync/batch.rs | 18 +- .../src/sync/range_sync/block_storage.rs | 2 +- .../network/src/sync/range_sync/chain.rs | 3 +- .../network/src/sync/range_sync/range.rs | 25 +- beacon_node/operation_pool/Cargo.toml | 1 + beacon_node/operation_pool/src/lib.rs | 40 +- beacon_node/store/src/hot_cold_store.rs | 6 +- beacon_node/store/src/lib.rs | 3 +- beacon_node/timer/src/lib.rs | 30 +- common/task_executor/Cargo.toml | 2 +- common/task_executor/src/lib.rs | 57 + common/task_executor/src/metrics.rs | 10 + consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 130 +- .../fork_choice/src/fork_choice_store.rs | 4 +- consensus/fork_choice/src/lib.rs | 5 +- consensus/fork_choice/tests/tests.rs | 403 ++-- consensus/proto_array/src/proto_array.rs | 1 + consensus/state_processing/Cargo.toml | 1 + .../src/per_block_processing/tests.rs | 322 ++- .../src/per_epoch_processing/tests.rs | 60 +- .../examples/flamegraph_beacon_state.rs | 2 +- consensus/types/Cargo.toml | 1 + consensus/types/src/beacon_block.rs | 46 +- consensus/types/src/beacon_block_body.rs | 47 + .../src/beacon_state/committee_cache/tests.rs | 40 +- consensus/types/src/beacon_state/tests.rs | 99 +- consensus/types/src/payload.rs | 2 + consensus/types/src/signed_beacon_block.rs | 8 + database_manager/src/lib.rs | 5 +- slasher/service/src/service.rs | 9 +- testing/ef_tests/src/cases.rs | 24 +- testing/ef_tests/src/cases/fork_choice.rs | 142 +- testing/ef_tests/src/handler.rs | 11 +- .../src/test_rig.rs | 7 +- testing/state_transition_vectors/Cargo.toml | 1 + testing/state_transition_vectors/src/exit.rs | 33 +- .../state_transition_vectors/src/macros.rs | 10 +- testing/state_transition_vectors/src/main.rs | 31 +- 106 files changed, 6515 insertions(+), 4538 deletions(-) create mode 100644 beacon_node/beacon_chain/src/canonical_head.rs diff --git a/Cargo.lock b/Cargo.lock index 1e9b5b4239..bb7308b938 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2062,6 +2062,7 @@ dependencies = [ "eth2_ssz_derive", "proto_array", "store", + "tokio", "types", ] @@ -4173,6 +4174,7 @@ dependencies = [ "serde_derive", "state_processing", "store", + "tokio", "types", ] @@ -5972,6 +5974,7 @@ dependencies = [ "rayon", "safe_arith", "smallvec", + "tokio", "tree_hash", "types", ] @@ -5984,6 +5987,7 @@ dependencies = [ "eth2_ssz", "lazy_static", "state_processing", + "tokio", "types", ] @@ -6698,6 +6702,7 @@ dependencies = [ "swap_or_not_shuffle", "tempfile", "test_random_derive", + "tokio", "tree_hash", "tree_hash_derive", ] diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 406c0049aa..63af6ab9e1 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -976,8 +976,8 @@ fn verify_head_block_is_known( max_skip_slots: Option, ) -> Result { let block_opt = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&attestation.data.beacon_block_root) .or_else(|| { chain @@ -1245,7 +1245,10 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - if !chain.fork_choice.read().contains_block(&target.root) + if !chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&target.root) && !chain.early_attester_cache.contains_block(target.root) { return Err(Error::UnknownTargetRoot(target.root)); diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 30f1ae7e5b..6f76cce024 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -65,7 +65,7 @@ where .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; + let fork = chain.canonical_head.cached_head().head_fork(); let mut signature_sets = Vec::with_capacity(num_indexed * 3); @@ -169,13 +169,13 @@ where &metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES, ); + let fork = chain.canonical_head.cached_head().head_fork(); + let pubkey_cache = chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; - let mut signature_sets = Vec::with_capacity(num_partially_verified); // Iterate, flattening to get only the `Ok` values. diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a64c971875..9fb895f78f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -9,15 +9,15 @@ use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, - signature_verify_chain_segment, BlockError, FullyVerifiedBlock, GossipVerifiedBlock, - IntoFullyVerifiedBlock, + signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, + IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::get_execution_payload; +use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; @@ -52,17 +52,17 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::BeaconForkChoiceStore; use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; -use eth2::types::{ - EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, -}; +use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; -use fork_choice::{AttestationFromBlock, ForkChoice, InvalidationOperation}; +use fork_choice::{ + AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, + InvalidationOperation, PayloadVerificationStatus, +}; use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; -use proto_array::ExecutionStatus; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -71,7 +71,7 @@ use ssz::Encode; use state_processing::{ common::get_indexed_attestation, per_block_processing, - per_block_processing::{errors::AttestationValidationError, is_merge_transition_complete}, + per_block_processing::errors::AttestationValidationError, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, @@ -87,16 +87,17 @@ use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterato use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; -use task_executor::ShutdownReason; +use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::*; +pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; + pub type ForkChoiceError = fork_choice::Error; -/// The time-out before failure during an operation to take a read/write RwLock on the canonical -/// head. -pub const HEAD_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +/// Alias to appease clippy. +type HashBlockTuple = (Hash256, Arc>); /// The time-out before failure during an operation to take a read/write RwLock on the block /// processing cache. @@ -216,22 +217,6 @@ pub enum StateSkipConfig { WithoutStateRoots, } -#[derive(Debug, PartialEq)] -pub struct HeadInfo { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub current_justified_checkpoint: types::Checkpoint, - pub finalized_checkpoint: types::Checkpoint, - pub fork: Fork, - pub genesis_time: u64, - pub genesis_validators_root: Hash256, - pub proposer_shuffling_decision_root: Hash256, - pub is_merge_transition_complete: bool, - pub execution_payload_block_hash: Option, - pub random: Hash256, -} - pub trait BeaconChainTypes: Send + Sync + 'static { type HotStore: store::ItemStore; type ColdStore: store::ItemStore; @@ -240,23 +225,22 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type EthSpec: types::EthSpec; } -/// Indicates the EL payload verification status of the head beacon block. -#[derive(Debug, PartialEq)] -pub enum HeadSafetyStatus { - /// The head block has either been verified by an EL or is does not require EL verification - /// (e.g., it is pre-merge or pre-terminal-block). - /// - /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with - /// the variant. - Safe(Option), - /// The head block execution payload has not yet been verified by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Unsafe(ExecutionBlockHash), - /// The head block execution payload was deemed to be invalid by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Invalid(ExecutionBlockHash), +/// Used internally to split block production into discrete functions. +struct PartialBeaconBlock { + state: BeaconState, + slot: Slot, + proposer_index: u64, + parent_root: Hash256, + randao_reveal: Signature, + eth1_data: Eth1Data, + graffiti: Graffiti, + proposer_slashings: Vec, + attester_slashings: Vec>, + attestations: Vec>, + deposits: Vec, + voluntary_exits: Vec, + sync_aggregate: Option>, + prepare_payload_handle: Option>, } pub type BeaconForkChoice = ForkChoice< @@ -284,6 +268,8 @@ pub struct BeaconChain { pub config: ChainConfig, /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. pub store: BeaconStore, + /// Used for spawning async and blocking tasks. + pub task_executor: TaskExecutor, /// Database migrator for running background maintenance on the store. pub store_migrator: BackgroundMigrator, /// Reports the current slot, typically based upon the system clock. @@ -335,21 +321,21 @@ pub struct BeaconChain { pub eth1_chain: Option>, /// Interfaces with the execution client. pub execution_layer: Option>, - /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. - pub(crate) canonical_head: TimeoutRwLock>, + /// Stores information about the canonical head and finalized/justified checkpoints of the + /// chain. Also contains the fork choice struct, for computing the canonical head. + pub canonical_head: CanonicalHead, /// The root of the genesis block. pub genesis_block_root: Hash256, /// The root of the genesis state. pub genesis_state_root: Hash256, /// The root of the list of genesis validators, used during syncing. pub genesis_validators_root: Hash256, - /// A state-machine that is updated with information from the network and chooses a canonical - /// head block. - pub fork_choice: RwLock>, /// Transmitter used to indicate that slot-start fork choice has completed running. pub fork_choice_signal_tx: Option, /// Receiver used by block production to wait on slot-start fork choice. pub fork_choice_signal_rx: Option, + /// The genesis time of this `BeaconChain` (seconds since UNIX epoch). + pub genesis_time: u64, /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, @@ -358,7 +344,7 @@ pub struct BeaconChain { /// A cache dedicated to block processing. pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. - pub(crate) shuffling_cache: TimeoutRwLock, + pub shuffling_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. @@ -430,25 +416,11 @@ impl BeaconChain { .as_kv_store_op(BEACON_CHAIN_DB_KEY) } - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { - let fork_choice = self.fork_choice.read(); - Self::persist_fork_choice_in_batch_standalone(&fork_choice) - } - - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch_standalone( - fork_choice: &BeaconForkChoice, - ) -> KeyValueStoreOp { - let persisted_fork_choice = PersistedForkChoice { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) - } - /// Load fork choice from disk, returning `None` if it isn't found. - pub fn load_fork_choice(store: BeaconStore) -> Result>, Error> { + pub fn load_fork_choice( + store: BeaconStore, + spec: &ChainSpec, + ) -> Result>, Error> { let persisted_fork_choice = match store.get_item::(&FORK_CHOICE_DB_KEY)? { Some(fc) => fc, @@ -461,6 +433,7 @@ impl BeaconChain { Ok(Some(ForkChoice::from_persisted( persisted_fork_choice.fork_choice, fc_store, + spec, )?)) } @@ -538,11 +511,11 @@ impl BeaconChain { )); } - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_block_roots_iterator( start_slot, - local_head.beacon_state, + local_head.beacon_state.clone_with(CloneConfig::none()), local_head.beacon_block_root, &self.spec, )?; @@ -612,77 +585,6 @@ impl BeaconChain { .map(|result| result.map_err(|e| e.into()))) } - /// Iterate through the current chain to find the slot intersecting with the given beacon state. - /// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached - /// and no intersection is found, the finalized slot will be returned. - pub fn find_reorg_slot( - &self, - new_state: &BeaconState, - new_block_root: Hash256, - ) -> Result { - self.with_head(|snapshot| { - let old_state = &snapshot.beacon_state; - let old_block_root = snapshot.beacon_block_root; - - // The earliest slot for which the two chains may have a common history. - let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); - - // Create an iterator across `$state`, assuming that the block at `$state.slot` has the - // block root of `$block_root`. - // - // The iterator will be skipped until the next value returns `lowest_slot`. - // - // This is a macro instead of a function or closure due to the complex types invloved - // in all the iterator wrapping. - macro_rules! aligned_roots_iter { - ($state: ident, $block_root: ident) => { - std::iter::once(Ok(($state.slot(), $block_root))) - .chain($state.rev_iter_block_roots(&self.spec)) - .skip_while(|result| { - result - .as_ref() - .map_or(false, |(slot, _)| *slot > lowest_slot) - }) - }; - } - - // Create iterators across old/new roots where iterators both start at the same slot. - let mut new_roots = aligned_roots_iter!(new_state, new_block_root); - let mut old_roots = aligned_roots_iter!(old_state, old_block_root); - - // Whilst *both* of the iterators are still returning values, try and find a common - // ancestor between them. - while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { - let (old_slot, old_root) = old?; - let (new_slot, new_root) = new?; - - // Sanity check to detect programming errors. - if old_slot != new_slot { - return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); - } - - if old_root == new_root { - // A common ancestor has been found. - return Ok(old_slot); - } - } - - // If no common ancestor is found, declare that the re-org happened at the previous - // finalized slot. - // - // Sometimes this will result in the return slot being *lower* than the actual reorg - // slot. However, assuming we don't re-org through a finalized slot, it will never be - // *higher*. - // - // We provide this potentially-inaccurate-but-safe information to avoid onerous - // database reads during times of deep reorgs. - Ok(old_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch())) - }) - } - /// Iterates backwards across all `(state_root, slot)` pairs starting from /// an arbitrary `BeaconState` to the earliest reachable ancestor (may or may not be genesis). /// @@ -713,12 +615,12 @@ impl BeaconChain { &self, start_slot: Slot, ) -> Result> + '_, Error> { - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), - local_head.beacon_state, + local_head.beacon_state.clone_with(CloneConfig::none()), &self.spec, )?; @@ -978,11 +880,11 @@ impl BeaconChain { pub async fn get_block_checking_early_attester_cache( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result>>, Error> { if let Some(block) = self.early_attester_cache.get_block(*block_root) { return Ok(Some(block)); } - self.get_block(block_root).await + Ok(self.get_block(block_root).await?.map(Arc::new)) } /// Returns the block at the given root, if any. @@ -1068,53 +970,6 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } - /// Returns a `Checkpoint` representing the head block and state. Contains the "best block"; - /// the head of the canonical `BeaconChain`. - /// - /// It is important to note that the `beacon_state` returned may not match the present slot. It - /// is the state as it was when the head block was received, which could be some slots prior to - /// now. - pub fn head(&self) -> Result, Error> { - self.with_head(|head| Ok(head.clone_with(CloneConfig::committee_caches_only()))) - } - - /// Apply a function to the canonical head without cloning it. - pub fn with_head(&self, f: F) -> Result - where - E: From, - F: FnOnce(&BeaconSnapshot) -> Result, - { - let head_lock = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - f(&head_lock) - } - - /// Returns the beacon block root at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block_root(&self) -> Result { - self.with_head(|s| Ok(s.beacon_block_root)) - } - - /// Returns the beacon block at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block(&self) -> Result, Error> { - self.with_head(|s| Ok(s.beacon_block.clone())) - } - - /// Returns the beacon state at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_state(&self) -> Result, Error> { - self.with_head(|s| { - Ok(s.beacon_state - .clone_with(CloneConfig::committee_caches_only())) - }) - } - /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed @@ -1189,42 +1044,6 @@ impl BeaconChain { self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) } - /// Returns info representing the head block and state. - /// - /// A summarized version of `Self::head` that involves less cloning. - pub fn head_info(&self) -> Result { - self.with_head(|head| { - let proposer_shuffling_decision_root = head - .beacon_state - .proposer_shuffling_decision_root(head.beacon_block_root)?; - - // The `random` value is used whilst producing an `ExecutionPayload` atop the head. - let current_epoch = head.beacon_state.current_epoch(); - let random = *head.beacon_state.get_randao_mix(current_epoch)?; - - Ok(HeadInfo { - slot: head.beacon_block.slot(), - block_root: head.beacon_block_root, - state_root: head.beacon_state_root(), - current_justified_checkpoint: head.beacon_state.current_justified_checkpoint(), - finalized_checkpoint: head.beacon_state.finalized_checkpoint(), - fork: head.beacon_state.fork(), - genesis_time: head.beacon_state.genesis_time(), - genesis_validators_root: head.beacon_state.genesis_validators_root(), - proposer_shuffling_decision_root, - is_merge_transition_complete: is_merge_transition_complete(&head.beacon_state), - execution_payload_block_hash: head - .beacon_block - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()), - random, - }) - }) - } - /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. @@ -1245,7 +1064,7 @@ impl BeaconChain { slot: Slot, config: StateSkipConfig, ) -> Result, Error> { - let head_state = self.head()?.beacon_state; + let head_state = self.head_beacon_state_cloned(); match slot.cmp(&head_state.slot()) { Ordering::Equal => Ok(head_state), @@ -1330,14 +1149,6 @@ impl BeaconChain { self.state_at_slot(self.slot()?, StateSkipConfig::WithStateRoots) } - /// Returns the slot of the highest block in the canonical chain. - pub fn best_slot(&self) -> Result { - self.canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .map(|head| head.beacon_block.slot()) - .ok_or(Error::CanonicalHeadLockTimeout) - } - /// Returns the validator index (if any) for the given public key. /// /// ## Notes @@ -1477,7 +1288,7 @@ impl BeaconChain { validator_indices: &[u64], epoch: Epoch, head_block_root: Hash256, - ) -> Result<(Vec>, Hash256), Error> { + ) -> Result<(Vec>, Hash256, ExecutionStatus), Error> { self.with_committee_cache(head_block_root, epoch, |committee_cache, dependent_root| { let duties = validator_indices .iter() @@ -1487,7 +1298,13 @@ impl BeaconChain { }) .collect(); - Ok((duties, dependent_root)) + let execution_status = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; + + Ok((duties, dependent_root, execution_status)) }) } @@ -1535,8 +1352,8 @@ impl BeaconChain { ) -> Result, Error> { let beacon_block_root = attestation.data.beacon_block_root; match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { // The attestation references a block that is not in fork choice, it must be @@ -1624,7 +1441,10 @@ impl BeaconChain { let current_epoch_attesting_info: Option<(Checkpoint, usize)>; let attester_cache_key; let head_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS); - if let Some(head) = self.canonical_head.try_read_for(HEAD_LOCK_TIMEOUT) { + // The following braces are to prevent the `cached_head` Arc from being held for longer than + // required. It also helps reduce the diff for a very large PR (#3244). + { + let head = self.head_snapshot(); let head_state = &head.beacon_state; head_state_slot = head_state.slot(); @@ -1699,15 +1519,13 @@ impl BeaconChain { // routine. attester_cache_key = AttesterCacheKey::new(request_epoch, head_state, beacon_block_root)?; - } else { - return Err(Error::CanonicalHeadLockTimeout); } drop(head_timer); // Only attest to a block if it is fully verified (i.e. not optimistic or invalid). match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { Some(execution_status) if execution_status.is_valid_or_irrelevant() => (), @@ -1911,8 +1729,8 @@ impl BeaconChain { ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - self.fork_choice - .write() + self.canonical_head + .fork_choice_write_lock() .on_attestation( self.slot()?, verified.indexed_attestation(), @@ -2047,8 +1865,7 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. if self.eth1_chain.is_some() { - let fork = - self.with_head(|head| Ok::<_, AttestationError>(head.beacon_state.fork()))?; + let fork = self.canonical_head.cached_head().head_fork(); self.op_pool .insert_attestation( @@ -2153,7 +1970,7 @@ impl BeaconChain { // pivot block is the same as the current state's pivot block. If it is, then the // attestation's shuffling is the same as the current state's. // To account for skipped slots, find the first block at *or before* the pivot slot. - let fork_choice_lock = self.fork_choice.read(); + let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); let pivot_block_root = fork_choice_lock .proto_array() .core_proto_array() @@ -2244,12 +2061,13 @@ impl BeaconChain { pub fn import_attester_slashing( &self, attester_slashing: SigVerifiedOp>, - ) -> Result<(), Error> { + ) { if self.eth1_chain.is_some() { - self.op_pool - .insert_attester_slashing(attester_slashing, self.head_info()?.fork) + self.op_pool.insert_attester_slashing( + attester_slashing, + self.canonical_head.cached_head().head_fork(), + ) } - Ok(()) } /// Attempt to obtain sync committee duties from the head. @@ -2265,22 +2083,36 @@ impl BeaconChain { }) } - /// Attempt to verify and import a chain of blocks to `self`. + /// A convenience method for spawning a blocking task. It maps an `Option` and + /// `tokio::JoinError` into a single `BeaconChainError`. + pub(crate) async fn spawn_blocking_handle( + &self, + task: F, + name: &'static str, + ) -> Result + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let handle = self + .task_executor + .spawn_blocking_handle(task, name) + .ok_or(Error::RuntimeShutdown)?; + + handle.await.map_err(Error::TokioJoin) + } + + /// Accepts a `chain_segment` and filters out any uninteresting blocks (e.g., pre-finalization + /// or already-known). /// - /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., - /// be a chain). An error will be returned if this is not the case. - /// - /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior - /// blocks might be imported. - /// - /// This method is generally much more efficient than importing each block using - /// `Self::process_block`. - pub fn process_chain_segment( + /// This method is potentially long-running and should not run on the core executor. + pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>, - ) -> ChainSegmentResult { + chain_segment: Vec>>, + ) -> Result>, ChainSegmentResult> { + // This function will never import any blocks. + let imported_blocks = 0; let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); - let mut imported_blocks = 0; // Produce a list of the parent root and slot of the child of each block. // @@ -2294,10 +2126,10 @@ impl BeaconChain { for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. if let Err(e) = block.fork_name(&self.spec) { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), - }; + }); } let block_root = get_block_root(&block); @@ -2309,18 +2141,18 @@ impl BeaconChain { // Without this check it would be possible to have a block verified using the // incorrect shuffling. That would be bad, mmkay. if block_root != *child_parent_root { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearParentRoots, - }; + }); } // Ensure that the slots are strictly increasing throughout the chain segment. if *child_slot <= block.slot() { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearSlots, - }; + }); } } @@ -2348,18 +2180,18 @@ impl BeaconChain { // The block has a known parent that does not descend from the finalized block. // There is no need to process this block or any children. Err(BlockError::NotFinalizedDescendant { block_parent_root }) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NotFinalizedDescendant { block_parent_root }, - }; + }); } // If there was an error whilst determining if the block was invalid, return that // error. Err(BlockError::BeaconChainError(e)) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::BeaconChainError(e), - }; + }); } // If the block was decided to be irrelevant for any other reason, don't include // this block or any of it's children in the filtered chain segment. @@ -2367,6 +2199,42 @@ impl BeaconChain { } } + Ok(filtered_chain_segment) + } + + /// Attempt to verify and import a chain of blocks to `self`. + /// + /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., + /// be a chain). An error will be returned if this is not the case. + /// + /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior + /// blocks might be imported. + /// + /// This method is generally much more efficient than importing each block using + /// `Self::process_block`. + pub async fn process_chain_segment( + self: &Arc, + chain_segment: Vec>>, + ) -> ChainSegmentResult { + let mut imported_blocks = 0; + + // Filter uninteresting blocks from the chain segment in a blocking task. + let chain = self.clone(); + let filtered_chain_segment_future = self.spawn_blocking_handle( + move || chain.filter_chain_segment(chain_segment), + "filter_chain_segment", + ); + let mut filtered_chain_segment = match filtered_chain_segment_future.await { + Ok(Ok(filtered_segment)) => filtered_segment, + Ok(Err(segment_result)) => return segment_result, + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + } + } + }; + while let Some((_root, block)) = filtered_chain_segment.first() { // Determine the epoch of the first block in the remaining segment. let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -2386,20 +2254,32 @@ impl BeaconChain { let mut blocks = filtered_chain_segment.split_off(last_index); std::mem::swap(&mut blocks, &mut filtered_chain_segment); + let chain = self.clone(); + let signature_verification_future = self.spawn_blocking_handle( + move || signature_verify_chain_segment(blocks, &chain), + "signature_verify_chain_segment", + ); + // Verify the signature of the blocks, returning early if the signature is invalid. - let signature_verified_blocks = match signature_verify_chain_segment(blocks, self) { - Ok(blocks) => blocks, - Err(error) => { + let signature_verified_blocks = match signature_verification_future.await { + Ok(Ok(blocks)) => blocks, + Ok(Err(error)) => { return ChainSegmentResult::Failed { imported_blocks, error, }; } + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + }; + } }; // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { - match self.process_block(signature_verified_block) { + match self.process_block(signature_verified_block).await { Ok(_) => imported_blocks += 1, Err(error) => { return ChainSegmentResult::Failed { @@ -2424,43 +2304,54 @@ impl BeaconChain { /// ## Errors /// /// Returns an `Err` if the given block was invalid, or an error was encountered during - pub fn verify_block_for_gossip( - &self, - block: SignedBeaconBlock, + pub async fn verify_block_for_gossip( + self: &Arc, + block: Arc>, ) -> Result, BlockError> { - let slot = block.slot(); - let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); + let chain = self.clone(); + self.task_executor + .clone() + .spawn_blocking_handle( + move || { + let slot = block.slot(); + let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); - match GossipVerifiedBlock::new(block, self) { - Ok(verified) => { - debug!( - self.log, - "Successfully processed gossip block"; - "graffiti" => graffiti_string, - "slot" => slot, - "root" => ?verified.block_root(), - ); + match GossipVerifiedBlock::new(block, &chain) { + Ok(verified) => { + debug!( + chain.log, + "Successfully processed gossip block"; + "graffiti" => graffiti_string, + "slot" => slot, + "root" => ?verified.block_root(), + ); - Ok(verified) - } - Err(e) => { - debug!( - self.log, - "Rejected gossip block"; - "error" => e.to_string(), - "graffiti" => graffiti_string, - "slot" => slot, - ); + Ok(verified) + } + Err(e) => { + debug!( + chain.log, + "Rejected gossip block"; + "error" => e.to_string(), + "graffiti" => graffiti_string, + "slot" => slot, + ); - Err(e) - } - } + Err(e) + } + } + }, + "payload_verification_handle", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)? } /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// - /// Items that implement `IntoFullyVerifiedBlock` include: + /// Items that implement `IntoExecutionPendingBlock` include: /// /// - `SignedBeaconBlock` /// - `GossipVerifiedBlock` @@ -2469,7 +2360,7 @@ impl BeaconChain { /// /// Returns an `Err` if the given block was invalid, or an error was encountered during /// verification. - pub fn process_block>( + pub async fn process_block>( self: &Arc, unverified_block: B, ) -> Result> { @@ -2483,13 +2374,16 @@ impl BeaconChain { let block = unverified_block.block().clone(); // A small closure to group the verification and import errors. - let import_block = |unverified_block: B| -> Result> { - let fully_verified = unverified_block.into_fully_verified_block(self)?; - self.import_block(fully_verified) + let chain = self.clone(); + let import_block = async move { + let execution_pending = unverified_block.into_execution_pending_block(&chain)?; + chain + .import_execution_pending_block(execution_pending) + .await }; // Verify and import the block. - match import_block(unverified_block) { + match import_block.await { // The block was successfully verified and imported. Yay. Ok(block_root) => { trace!( @@ -2504,6 +2398,14 @@ impl BeaconChain { Ok(block_root) } + Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { + debug!( + self.log, + "Beacon block processing cancelled"; + "error" => ?e, + ); + Err(e) + } // There was an error whilst attempting to verify and import the block. The block might // be partially verified or partially imported. Err(BlockError::BeaconChainError(e)) => { @@ -2526,6 +2428,81 @@ impl BeaconChain { } } + /// Accepts a fully-verified block and imports it into the chain without performing any + /// additional verification. + /// + /// An error is returned if the block was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + async fn import_execution_pending_block( + self: Arc, + execution_pending_block: ExecutionPendingBlock, + ) -> Result> { + let ExecutionPendingBlock { + block, + block_root, + state, + parent_block: _, + confirmed_state_roots, + payload_verification_handle, + } = execution_pending_block; + + let PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + } = payload_verification_handle + .await + .map_err(BeaconChainError::TokioJoin)? + .ok_or(BeaconChainError::RuntimeShutdown)??; + + // Log the PoS pandas if a merge transition just occurred. + if is_valid_merge_transition_block { + info!(self.log, "{}", POS_PANDA_BANNER); + info!( + self.log, + "Proof of Stake Activated"; + "slot" => block.slot() + ); + info!( + self.log, ""; + "Terminal POW Block Hash" => ?block + .message() + .execution_payload()? + .parent_hash() + .into_root() + ); + info!( + self.log, ""; + "Merge Transition Block Root" => ?block.message().tree_hash_root() + ); + info!( + self.log, ""; + "Merge Transition Execution Hash" => ?block + .message() + .execution_payload()? + .block_hash() + .into_root() + ); + } + + let chain = self.clone(); + let block_hash = self + .spawn_blocking_handle( + move || { + chain.import_block( + block, + block_root, + state, + confirmed_state_roots, + payload_verification_status, + ) + }, + "payload_verification_handle", + ) + .await??; + + Ok(block_hash) + } + /// Accepts a fully-verified block and imports it into the chain without performing any /// additional verification. /// @@ -2533,15 +2510,14 @@ impl BeaconChain { /// (i.e., this function is not atomic). fn import_block( &self, - fully_verified_block: FullyVerifiedBlock, + signed_block: Arc>, + block_root: Hash256, + mut state: BeaconState, + confirmed_state_roots: Vec, + payload_verification_status: PayloadVerificationStatus, ) -> Result> { - let signed_block = fully_verified_block.block; - let block_root = fully_verified_block.block_root; - let mut state = fully_verified_block.state; let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let mut ops = fully_verified_block.confirmation_db_batch; - let payload_verification_status = fully_verified_block.payload_verification_status; let attestation_observation_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); @@ -2614,21 +2590,24 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } - let mut fork_choice = self.fork_choice.write(); - - // Do not import a block that doesn't descend from the finalized root. - let signed_block = - check_block_is_finalized_descendant::(signed_block, &fork_choice, &self.store)?; - let (block, block_signature) = signed_block.clone().deconstruct(); - - // compare the existing finalized checkpoint with the incoming block's finalized checkpoint - let old_finalized_checkpoint = fork_choice.finalized_checkpoint(); - let new_finalized_checkpoint = state.finalized_checkpoint(); + // Alias for readability. + let block = signed_block.message(); // Only perform the weak subjectivity check if it was configured. if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { + // Note: we're using the finalized checkpoint from the head state, rather than fork + // choice. + // + // We are doing this to ensure that we detect changes in finalization. It's possible + // that fork choice has already been updated to the finalized checkpoint in the block + // we're importing. + let current_head_finalized_checkpoint = + self.canonical_head.cached_head().finalized_checkpoint(); + // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. + let new_finalized_checkpoint = state.finalized_checkpoint(); + // This ensures we only perform the check once. - if (old_finalized_checkpoint.epoch < wss_checkpoint.epoch) + if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch) && (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch) { if let Err(e) = @@ -2640,7 +2619,7 @@ impl BeaconChain { "Weak subjectivity checkpoint verification failed while importing block!"; "block_root" => ?block_root, "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?old_finalized_checkpoint.epoch, + "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, "error" => ?e, @@ -2656,6 +2635,13 @@ impl BeaconChain { } } + // Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by + // avoiding taking other locks whilst holding this lock. + let mut fork_choice = self.canonical_head.fork_choice_write_lock(); + + // Do not import a block that doesn't descend from the finalized root. + check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?; + // Register the new block with the fork choice service. { let _fork_choice_block_timer = @@ -2668,7 +2654,7 @@ impl BeaconChain { fork_choice .on_block( current_slot, - &block, + block, block_root, block_delay, &state, @@ -2843,7 +2829,11 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - ops.push(StoreOp::PutBlock(block_root, Box::new(signed_block))); + let mut ops: Vec<_> = confirmed_state_roots + .into_iter() + .map(StoreOp::DeleteStateTemporaryFlag) + .collect(); + ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); @@ -2854,18 +2844,23 @@ impl BeaconChain { "msg" => "Restoring fork choice from disk", "error" => ?e, ); - match Self::load_fork_choice(self.store.clone())? { - Some(persisted_fork_choice) => { - *fork_choice = persisted_fork_choice; - } - None => { - crit!( - self.log, - "No stored fork choice found to restore from"; - "warning" => "The database is likely corrupt now, consider --purge-db" - ); - } + + // Since the write failed, try to revert the canonical head back to what was stored + // in the database. This attempts to prevent inconsistency between the database and + // fork choice. + if let Err(e) = + self.canonical_head + .restore_from_store(fork_choice, &self.store, &self.spec) + { + crit!( + self.log, + "No stored fork choice found to restore from"; + "error" => ?e, + "warning" => "The database is likely corrupt now, consider --purge-db" + ); + return Err(BlockError::BeaconChainError(e)); } + return Err(e.into()); } drop(txn_lock); @@ -2880,7 +2875,6 @@ impl BeaconChain { let parent_root = block.parent_root(); let slot = block.slot(); - let signed_block = SignedBeaconBlock::from_block(block, block_signature); self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) @@ -3017,7 +3011,7 @@ impl BeaconChain { /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub fn produce_block>( + pub async fn produce_block>( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3029,16 +3023,51 @@ impl BeaconChain { validator_graffiti, ProduceBlockVerification::VerifyRandao, ) + .await } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub fn produce_block_with_verification>( + pub async fn produce_block_with_verification>( self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + // Part 1/2 (blocking) + // + // Load the parent state from disk. + let chain = self.clone(); + let (state, state_root_opt) = self + .task_executor + .spawn_blocking_handle( + move || chain.load_state_for_block_production::(slot), + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/2 (async, with some blocking components) + // + // Produce the block upon the state + self.produce_block_on_state::( + state, + state_root_opt, + slot, + randao_reveal, + validator_graffiti, + verification, + ) + .await + } + + /// Load a beacon state from the database for block production. This is a long-running process + /// that should not be performed in an `async` context. + fn load_state_for_block_production>( + self: &Arc, + slot: Slot, + ) -> Result<(BeaconState, Option), BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); @@ -3052,16 +3081,19 @@ impl BeaconChain { // signed. If we miss the cache or we're producing a block that conflicts with the head, // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); - let head_info = self - .head_info() - .map_err(BlockProductionError::UnableToGetHeadInfo)?; - let (state, state_root_opt) = if head_info.slot < slot { + // Atomically read some values from the head whilst avoiding holding cached head `Arc` any + // longer than necessary. + let (head_slot, head_block_root) = { + let head = self.canonical_head.cached_head(); + (head.head_slot(), head.head_block_root()) + }; + let (state, state_root_opt) = if head_slot < slot { // Normal case: proposing a block atop the current head. Use the snapshot cache. if let Some(pre_state) = self .snapshot_cache .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(head_info.block_root) + snapshot_cache.get_state_for_block_production(head_block_root) }) { (pre_state.pre_state, pre_state.state_root) @@ -3091,16 +3123,10 @@ impl BeaconChain { (state, None) }; + drop(state_load_timer); - self.produce_block_on_state::( - state, - state_root_opt, - slot, - randao_reveal, - validator_graffiti, - verification, - ) + Ok((state, state_root_opt)) } /// Produce a block for some `slot` upon the given `state`. @@ -3115,15 +3141,79 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub fn produce_block_on_state>( - &self, - mut state: BeaconState, + pub async fn produce_block_on_state>( + self: &Arc, + state: BeaconState, state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + // Part 1/3 (blocking) + // + // Perform the state advance and block-packing functions. + let chain = self.clone(); + let mut partial_beacon_block = self + .task_executor + .spawn_blocking_handle( + move || { + chain.produce_partial_beacon_block( + state, + state_root_opt, + produce_at_slot, + randao_reveal, + validator_graffiti, + ) + }, + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/3 (async) + // + // Wait for the execution layer to return an execution payload (if one is required). + let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); + let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { + let execution_payload = prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??; + Some(execution_payload) + } else { + None + }; + + // Part 3/3 (blocking) + // + // Perform the final steps of combining all the parts and computing the state root. + let chain = self.clone(); + self.task_executor + .spawn_blocking_handle( + move || { + chain.complete_partial_beacon_block( + partial_beacon_block, + execution_payload, + verification, + ) + }, + "complete_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)? + } + + fn produce_partial_beacon_block>( + self: &Arc, + mut state: BeaconState, + state_root_opt: Option, + produce_at_slot: Slot, + randao_reveal: Signature, + validator_graffiti: Option, + ) -> Result, BlockProductionError> { let eth1_chain = self .eth1_chain .as_ref() @@ -3154,13 +3244,35 @@ impl BeaconChain { state.latest_block_header().canonical_root() }; + let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + + let pubkey_opt = state + .validators() + .get(proposer_index as usize) + .map(|v| v.pubkey); + + // If required, start the process of loading an execution payload from the EL early. This + // allows it to run concurrently with things like attestation packing. + let prepare_payload_handle = match &state { + BeaconState::Base(_) | BeaconState::Altair(_) => None, + BeaconState::Merge(_) => { + let finalized_checkpoint = self.canonical_head.cached_head().finalized_checkpoint(); + let prepare_payload_handle = get_execution_payload( + self.clone(), + &state, + finalized_checkpoint, + proposer_index, + pubkey_opt, + )?; + Some(prepare_payload_handle) + } + }; + let (proposer_slashings, attester_slashings, voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; - let deposits = eth1_chain - .deposits_for_block_inclusion(&state, ð1_data, &self.spec)? - .into(); + let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. @@ -3209,21 +3321,16 @@ impl BeaconChain { curr_attestation_filter, &self.spec, ) - .map_err(BlockProductionError::OpPoolError)? - .into(); + .map_err(BlockProductionError::OpPoolError)?; drop(attestation_packing_timer); let slot = state.slot(); let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; - let pubkey_opt = state - .validators() - .get(proposer_index as usize) - .map(|v| v.pubkey); - - // Closure to fetch a sync aggregate in cases where it is required. - let get_sync_aggregate = || -> Result, BlockProductionError> { - Ok(self + let sync_aggregate = if matches!(&state, BeaconState::Base(_)) { + None + } else { + let sync_aggregate = self .op_pool .get_sync_aggregate(&state) .map_err(BlockProductionError::OpPoolError)? @@ -3234,9 +3341,54 @@ impl BeaconChain { "slot" => state.slot(), ); SyncAggregate::new() - })) + }); + Some(sync_aggregate) }; + Ok(PartialBeaconBlock { + state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + prepare_payload_handle, + }) + } + + fn complete_partial_beacon_block>( + &self, + partial_beacon_block: PartialBeaconBlock, + execution_payload: Option, + verification: ProduceBlockVerification, + ) -> Result, BlockProductionError> { + let PartialBeaconBlock { + mut state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + // We don't need the prepare payload handle since the `execution_payload` is passed into + // this function. We can assume that the handle has already been consumed in order to + // produce said `execution_payload`. + prepare_payload_handle: _, + } = partial_beacon_block; + let inner_block = match &state { BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { slot, @@ -3249,56 +3401,51 @@ impl BeaconChain { graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), - attestations, - deposits, + attestations: attestations.into(), + deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), _phantom: PhantomData, }, }), - BeaconState::Altair(_) => { - let sync_aggregate = get_sync_aggregate()?; - BeaconBlock::Altair(BeaconBlockAltair { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyAltair { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - _phantom: PhantomData, - }, - }) - } - BeaconState::Merge(_) => { - let sync_aggregate = get_sync_aggregate()?; - let execution_payload = - get_execution_payload::(self, &state, proposer_index, pubkey_opt)?; - BeaconBlock::Merge(BeaconBlockMerge { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - execution_payload, - }, - }) - } + BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + _phantom: PhantomData, + }, + }), + BeaconState::Merge(_) => BeaconBlock::Merge(BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: execution_payload + .ok_or(BlockProductionError::MissingExecutionPayload)?, + }, + }), }; let block = SignedBeaconBlock::from_block( @@ -3362,7 +3509,7 @@ impl BeaconChain { /// results in the justified checkpoint being invalidated. /// /// See the documentation of `InvalidationOperation` for information about defining `op`. - pub fn process_invalid_execution_payload( + pub async fn process_invalid_execution_payload( self: &Arc, op: &InvalidationOperation, ) -> Result<(), Error> { @@ -3373,8 +3520,26 @@ impl BeaconChain { "block_root" => ?op.block_root(), ); + // Update the execution status in fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let inner_op = op.clone(); + let fork_choice_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_invalid_execution_payload(&inner_op) + }, + "invalid_payload_fork_choice_update", + ) + .await?; + // Update fork choice. - if let Err(e) = self.fork_choice.write().on_invalid_execution_payload(op) { + if let Err(e) = fork_choice_result { crit!( self.log, "Failed to process invalid payload"; @@ -3389,7 +3554,7 @@ impl BeaconChain { // // Don't return early though, since invalidating the justified checkpoint might cause an // error here. - if let Err(e) = self.fork_choice() { + if let Err(e) = self.recompute_head_at_current_slot().await { crit!( self.log, "Failed to run fork choice routine"; @@ -3397,8 +3562,22 @@ impl BeaconChain { ); } - // Atomically obtain the justified root from fork choice. - let justified_block = self.fork_choice.read().get_justified_block()?; + // Obtain the justified root from fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let justified_block = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_read_lock() + .get_justified_block() + }, + "invalid_payload_fork_choice_get_justified", + ) + .await??; if justified_block.execution_status.is_invalid() { crit!( @@ -3430,452 +3609,10 @@ impl BeaconChain { Ok(()) } - /// Execute the fork choice algorithm and enthrone the result as the canonical head. - pub fn fork_choice(self: &Arc) -> Result<(), Error> { - self.fork_choice_at_slot(self.slot()?) - } - - /// Execute fork choice at `slot`, processing queued attestations from `slot - 1` and earlier. - /// - /// The `slot` is not verified in any way, callers should ensure it corresponds to at most - /// one slot ahead of the current wall-clock slot. - pub fn fork_choice_at_slot(self: &Arc, slot: Slot) -> Result<(), Error> { - metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); - let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); - - let result = self.fork_choice_internal(slot); - - if result.is_err() { - metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); - } - - result - } - - fn fork_choice_internal(self: &Arc, slot: Slot) -> Result<(), Error> { - // Atomically obtain the head block root and the finalized block. - let (beacon_block_root, finalized_block) = { - let mut fork_choice = self.fork_choice.write(); - - // Determine the root of the block that is the head of the chain. - let beacon_block_root = fork_choice.get_head(slot, &self.spec)?; - - (beacon_block_root, fork_choice.get_finalized_block()?) - }; - - let current_head = self.head_info()?; - let old_finalized_checkpoint = current_head.finalized_checkpoint; - - // Exit early if the head hasn't changed. - if beacon_block_root == current_head.block_root { - return Ok(()); - } - - // Check to ensure that this finalized block hasn't been marked as invalid. - if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { - crit!( - self.log, - "Finalized block has an invalid payload"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block_hash - ); - let mut shutdown_sender = self.shutdown_sender(); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Finalized block has an invalid execution payload.", - )) - .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; - - // Exit now, the node is in an invalid state. - return Err(Error::InvalidFinalizedPayload { - finalized_root: finalized_block.root, - execution_block_hash: block_hash, - }); - } - - let lag_timer = metrics::start_timer(&metrics::FORK_CHOICE_SET_HEAD_LAG_TIMES); - - // At this point we know that the new head block is not the same as the previous one - metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); - - // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling - // back to a database read if that fails. - let new_head = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned(beacon_block_root, CloneConfig::committee_caches_only()) - }) - .map::, _>(Ok) - .unwrap_or_else(|| { - let beacon_block = self - .store - .get_full_block(&beacon_block_root)? - .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; - - let beacon_state_root = beacon_block.state_root(); - let beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; - - Ok(BeaconSnapshot { - beacon_block, - beacon_block_root, - beacon_state, - }) - }) - .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. - snapshot - .beacon_state - .build_all_committee_caches(&self.spec) - .map_err(Into::into) - .map(|()| snapshot) - })?; - - // Attempt to detect if the new head is not on the same chain as the previous block - // (i.e., a re-org). - // - // Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks - // between calls to fork choice without swapping between chains. This seems like an - // extreme-enough scenario that a warning is fine. - let is_reorg = new_head - .beacon_state - .get_block_root(current_head.slot) - .map_or(true, |root| *root != current_head.block_root); - - let mut reorg_distance = Slot::new(0); - - if is_reorg { - match self.find_reorg_slot(&new_head.beacon_state, new_head.beacon_block_root) { - Ok(slot) => reorg_distance = current_head.slot.saturating_sub(slot), - Err(e) => { - warn!( - self.log, - "Could not find re-org depth"; - "error" => format!("{:?}", e), - ); - } - } - - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); - warn!( - self.log, - "Beacon chain re-org"; - "previous_head" => ?current_head.block_root, - "previous_slot" => current_head.slot, - "new_head_parent" => ?new_head.beacon_block.parent_root(), - "new_head" => ?beacon_block_root, - "new_slot" => new_head.beacon_block.slot(), - "reorg_distance" => reorg_distance, - ); - } else { - debug!( - self.log, - "Head beacon block"; - "justified_root" => ?new_head.beacon_state.current_justified_checkpoint().root, - "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, - "finalized_root" => ?new_head.beacon_state.finalized_checkpoint().root, - "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, - "root" => ?beacon_block_root, - "slot" => new_head.beacon_block.slot(), - ); - }; - - let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint(); - - // It is an error to try to update to a head with a lesser finalized epoch. - if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch { - return Err(Error::RevertedFinalizedEpoch { - previous_epoch: old_finalized_checkpoint.epoch, - new_epoch: new_finalized_checkpoint.epoch, - }); - } - - let is_epoch_transition = current_head.slot.epoch(T::EthSpec::slots_per_epoch()) - < new_head - .beacon_state - .slot() - .epoch(T::EthSpec::slots_per_epoch()); - - let update_head_timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); - - // These fields are used for server-sent events. - let state_root = new_head.beacon_state_root(); - let head_slot = new_head.beacon_state.slot(); - let head_proposer_index = new_head.beacon_block.message().proposer_index(); - let proposer_graffiti = new_head - .beacon_block - .message() - .body() - .graffiti() - .as_utf8_lossy(); - - // Find the dependent roots associated with this head before updating the snapshot. This - // is to ensure consistency when sending server sent events later in this method. - let dependent_root = new_head - .beacon_state - .proposer_shuffling_decision_root(self.genesis_block_root); - let prev_dependent_root = new_head - .beacon_state - .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - - drop(lag_timer); - - // Clear the early attester cache in case it conflicts with `self.canonical_head`. - self.early_attester_cache.clear(); - - // Update the snapshot that stores the head of the chain at the time it received the - // block. - *self - .canonical_head - .try_write_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)? = new_head; - - // The block has now been set as head so we can record times and delays. - metrics::stop_timer(update_head_timer); - - let block_time_set_as_head = timestamp_now(); - - // Calculate the total delay between the start of the slot and when it was set as head. - let block_delay_total = - get_slot_delay_ms(block_time_set_as_head, head_slot, &self.slot_clock); - - // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to - // the cache during sync. - if block_delay_total < self.slot_clock.slot_duration() * 64 { - self.block_times_cache.write().set_time_set_as_head( - beacon_block_root, - current_head.slot, - block_time_set_as_head, - ); - } - - // If a block comes in from over 4 slots ago, it is most likely a block from sync. - let block_from_sync = block_delay_total > self.slot_clock.slot_duration() * 4; - - // Determine whether the block has been set as head too late for proper attestation - // production. - let late_head = block_delay_total >= self.slot_clock.unagg_attestation_production_delay(); - - // Do not store metrics if the block was > 4 slots old, this helps prevent noise during - // sync. - if !block_from_sync { - // Observe the total block delay. This is the delay between the time the slot started - // and when the block was set as head. - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, - block_delay_total, - ); - - // Observe the delay between when we imported the block and when we set the block as - // head. - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, - block_delays - .observed - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, - block_delays - .set_as_head - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - // If the block was enshrined as head too late for attestations to be created for it, - // log a debug warning and increment a metric. - if late_head { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); - debug!( - self.log, - "Delayed head block"; - "block_root" => ?beacon_block_root, - "proposer_index" => head_proposer_index, - "slot" => head_slot, - "block_delay" => ?block_delay_total, - "observed_delay" => ?block_delays.observed, - "imported_delay" => ?block_delays.imported, - "set_as_head_delay" => ?block_delays.set_as_head, - ); - } - } - - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.update_head(beacon_block_root); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "update head" - ); - }); - - if is_epoch_transition || is_reorg { - self.persist_head_and_fork_choice()?; - self.op_pool.prune_attestations(self.epoch()?); - } - - if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { - // Due to race conditions, it's technically possible that the head we load here is - // different to the one earlier in this function. - // - // Since the head can't move backwards in terms of finalized epoch, we can only load a - // head with a *later* finalized state. There is no harm in this. - let head = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - - // State root of the finalized state on the epoch boundary, NOT the state - // of the finalized block. We need to use an iterator in case the state is beyond - // the reach of the new head's `state_roots` array. - let new_finalized_slot = head - .beacon_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let new_finalized_state_root = process_results( - StateRootsIterator::new(&self.store, &head.beacon_state), - |mut iter| { - iter.find_map(|(state_root, slot)| { - if slot == new_finalized_slot { - Some(state_root) - } else { - None - } - }) - }, - )? - .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; - - self.after_finalization(&head.beacon_state, new_finalized_state_root)?; - } - - // Register a server-sent event if necessary - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_head_subscribers() { - match (dependent_root, prev_dependent_root) { - (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { - event_handler.register(EventKind::Head(SseHead { - slot: head_slot, - block: beacon_block_root, - state: state_root, - current_duty_dependent_root, - previous_duty_dependent_root, - epoch_transition: is_epoch_transition, - })); - } - (Err(e), _) | (_, Err(e)) => { - warn!( - self.log, - "Unable to find dependent roots, cannot register head event"; - "error" => ?e - ); - } - } - } - - if is_reorg && event_handler.has_reorg_subscribers() { - event_handler.register(EventKind::ChainReorg(SseChainReorg { - slot: head_slot, - depth: reorg_distance.as_u64(), - old_head_block: current_head.block_root, - old_head_state: current_head.state_root, - new_head_block: beacon_block_root, - new_head_state: state_root, - epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), - })); - } - - if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { - let peer_info = self - .block_times_cache - .read() - .get_peer_info(beacon_block_root); - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - event_handler.register(EventKind::LateHead(SseLateHead { - slot: head_slot, - block: beacon_block_root, - peer_id: peer_info.id, - peer_client: peer_info.client, - proposer_index: head_proposer_index, - proposer_graffiti, - block_delay: block_delay_total, - observed_delay: block_delays.observed, - imported_delay: block_delays.imported, - set_as_head_delay: block_delays.set_as_head, - })); - } - } - - // Update the execution layer. - // Always use the wall-clock slot to update the execution engine rather than the `slot` - // passed in. - if let Err(e) = self.update_execution_engine_forkchoice_blocking(self.slot()?) { - crit!( - self.log, - "Failed to update execution head"; - "error" => ?e - ); - } - - // Performing this call immediately after - // `update_execution_engine_forkchoice_blocking` might result in two calls to fork - // choice updated, one *without* payload attributes and then a second *with* - // payload attributes. - // - // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as - // far as I know. - if let Err(e) = self.prepare_beacon_proposer_blocking() { - crit!( - self.log, - "Failed to prepare proposers after fork choice"; - "error" => ?e - ); - } - - Ok(()) - } - - pub fn prepare_beacon_proposer_blocking(self: &Arc) -> Result<(), Error> { - let current_slot = self.slot()?; - - // Avoids raising an error before Bellatrix. - // - // See `Self::prepare_beacon_proposer_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.prepare_beacon_proposer_async(current_slot)) - .map_err(Error::PrepareProposerBlockingFailed)? + pub fn block_is_known_to_fork_choice(&self, root: &Hash256) -> bool { + self.canonical_head + .fork_choice_read_lock() + .contains_block(root) } /// Determines the beacon proposer for the next slot. If that proposer is registered in the @@ -3890,7 +3627,7 @@ impl BeaconChain { /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for /// the next slot and the block at the current slot is already known). - pub async fn prepare_beacon_proposer_async( + pub async fn prepare_beacon_proposer( self: &Arc, current_slot: Slot, ) -> Result<(), Error> { @@ -3913,20 +3650,45 @@ impl BeaconChain { return Ok(()); } - let head = self.head_info()?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + // Atomically read some values from the canonical head, whilst avoiding holding the cached + // head `Arc` any longer than necessary. + // + // Use a blocking task since blocking the core executor on the canonical head read lock can + // block the core tokio executor. + let chain = self.clone(); + let (head_slot, head_root, head_decision_root, head_random, forkchoice_update_params) = + self.spawn_blocking_handle( + move || { + let cached_head = chain.canonical_head.cached_head(); + let head_block_root = cached_head.head_block_root(); + let decision_root = cached_head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root)?; + Ok::<_, Error>(( + cached_head.head_slot(), + head_block_root, + decision_root, + cached_head.head_random()?, + cached_head.forkchoice_update_parameters(), + )) + }, + "prepare_beacon_proposer_fork_choice_read", + ) + .await??; + let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); // Don't bother with proposer prep if the head is more than // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. // // This prevents the routine from running during sync. - if head.slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS + if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS < current_slot { debug!( self.log, "Head too old for proposer prep"; - "head_slot" => head.slot, + "head_slot" => head_slot, "current_slot" => current_slot, ); return Ok(()); @@ -3935,9 +3697,9 @@ impl BeaconChain { // Ensure that the shuffling decision root is correct relative to the epoch we wish to // query. let shuffling_decision_root = if head_epoch == prepare_epoch { - head.proposer_shuffling_decision_root + head_decision_root } else { - head.block_root + head_root }; // Read the proposer from the proposer cache. @@ -3967,7 +3729,7 @@ impl BeaconChain { return Ok(()); } - let (proposers, decision_root, fork) = + let (proposers, decision_root, _, fork) = compute_proposer_duties_from_head(prepare_epoch, self)?; let proposer_index = prepare_slot.as_usize() % (T::EthSpec::slots_per_epoch() as usize); @@ -4013,7 +3775,7 @@ impl BeaconChain { .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: head.random, + prev_randao: head_random, suggested_fee_recipient: execution_layer .get_suggested_fee_recipient(proposer as u64) .await, @@ -4023,18 +3785,13 @@ impl BeaconChain { self.log, "Preparing beacon proposer"; "payload_attributes" => ?payload_attributes, - "head_root" => ?head.block_root, + "head_root" => ?head_root, "prepare_slot" => prepare_slot, "validator" => proposer, ); let already_known = execution_layer - .insert_proposer( - prepare_slot, - head.block_root, - proposer as u64, - payload_attributes, - ) + .insert_proposer(prepare_slot, head_root, proposer as u64, payload_attributes) .await; // Only push a log to the user if this is the first time we've seen this proposer for this // slot. @@ -4076,7 +3833,7 @@ impl BeaconChain { // known). if till_prepare_slot <= self.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR - || head.slot + 1 >= prepare_slot + || head_slot + 1 >= prepare_slot { debug!( self.log, @@ -4085,37 +3842,17 @@ impl BeaconChain { "prepare_slot" => prepare_slot ); - self.update_execution_engine_forkchoice_async(current_slot) + self.update_execution_engine_forkchoice(current_slot, forkchoice_update_params) .await?; } Ok(()) } - pub fn update_execution_engine_forkchoice_blocking( - self: &Arc, - current_slot: Slot, - ) -> Result<(), Error> { - // Avoids raising an error before Bellatrix. - // - // See `Self::update_execution_engine_forkchoice_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.update_execution_engine_forkchoice_async(current_slot)) - .map_err(Error::ForkchoiceUpdate)? - } - - pub async fn update_execution_engine_forkchoice_async( + pub async fn update_execution_engine_forkchoice( self: &Arc, current_slot: Slot, + params: ForkchoiceUpdateParameters, ) -> Result<(), Error> { let next_slot = current_slot + 1; @@ -4153,73 +3890,56 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - // Deadlock warning: - // - // We are taking the `self.fork_choice` lock whilst holding the `forkchoice_lock`. This - // is intentional, since it allows us to ensure a consistent ordering of messages to the - // execution layer. - let forkchoice_update_parameters = - self.fork_choice.read().get_forkchoice_update_parameters(); - let (head_block_root, head_hash, finalized_hash) = if let Some(params) = - forkchoice_update_parameters + let (head_block_root, head_hash, finalized_hash) = if let Some(head_hash) = params.head_hash { - if let Some(head_hash) = params.head_hash { - ( - params.head_root, - head_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - match self.spec.fork_name_at_slot::(next_slot) { - // We are pre-bellatrix; no need to update the EL. - ForkName::Base | ForkName::Altair => return Ok(()), - _ => { - // We are post-bellatrix - if execution_layer - .payload_attributes(next_slot, params.head_root) + ( + params.head_root, + head_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + match self.spec.fork_name_at_slot::(next_slot) { + // We are pre-bellatrix; no need to update the EL. + ForkName::Base | ForkName::Altair => return Ok(()), + _ => { + // We are post-bellatrix + if execution_layer + .payload_attributes(next_slot, params.head_root) + .await + .is_some() + { + // We are a proposer, check for terminal_pow_block_hash + if let Some(terminal_pow_block_hash) = execution_layer + .get_terminal_pow_block_hash(&self.spec) .await - .is_some() + .map_err(Error::ForkchoiceUpdate)? { - // We are a proposer, check for terminal_pow_block_hash - if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec) - .await - .map_err(Error::ForkchoiceUpdate)? - { - info!( - self.log, - "Prepared POS transition block proposer"; "slot" => next_slot - ); - ( - params.head_root, - terminal_pow_block_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // TTD hasn't been reached yet, no need to update the EL. - return Ok(()); - } + info!( + self.log, + "Prepared POS transition block proposer"; "slot" => next_slot + ); + ( + params.head_root, + terminal_pow_block_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) } else { - // We are not a proposer, no need to update the EL. + // TTD hasn't been reached yet, no need to update the EL. return Ok(()); } + } else { + // We are not a proposer, no need to update the EL. + return Ok(()); } } } - } else { - warn!( - self.log, - "Missing forkchoice params"; - "msg" => "please report this non-critical bug" - ); - return Ok(()); }; let forkchoice_updated_response = execution_layer @@ -4235,11 +3955,19 @@ impl BeaconChain { Ok(status) => match status { PayloadStatus::Valid => { // Ensure that fork choice knows that the block is no longer optimistic. - if let Err(e) = self - .fork_choice - .write() - .on_valid_execution_payload(head_block_root) - { + let chain = self.clone(); + let fork_choice_update_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_valid_execution_payload(head_block_root) + }, + "update_execution_engine_valid_payload", + ) + .await?; + if let Err(e) = fork_choice_update_result { error!( self.log, "Failed to validate payload"; @@ -4275,24 +4003,14 @@ impl BeaconChain { ); // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: latest_valid_hash, - }, - ) - }, - "process_invalid_execution_payload_many", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + .await?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4308,22 +4026,10 @@ impl BeaconChain { // // Using a `None` latest valid ancestor will result in only the head block // being invalidated (no ancestors). - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: head_block_root, - }, - ) - }, - "process_invalid_execution_payload_single", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + self.process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }) + .await?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4333,30 +4039,85 @@ impl BeaconChain { } /// Returns `true` if the given slot is prior to the `bellatrix_fork_epoch`. - fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { + pub fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { self.spec.bellatrix_fork_epoch.map_or(true, |bellatrix| { slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix }) } - /// Returns the status of the current head block, regarding the validity of the execution - /// payload. - pub fn head_safety_status(&self) -> Result { - let head = self.head_info()?; - let head_block = self - .fork_choice - .read() - .get_block(&head.block_root) - .ok_or(BeaconChainError::HeadMissingFromForkChoice(head.block_root))?; + /// Returns the value of `execution_optimistic` for `block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + pub fn is_optimistic_block( + &self, + block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block(&block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } - let status = match head_block.execution_status { - ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)), - ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash), - ExecutionStatus::Optimistic(block_hash) => HeadSafetyStatus::Unsafe(block_hash), - ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None), - }; + /// Returns the value of `execution_optimistic` for `head_block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + /// + /// This function will return an error if `head_block` is not present in the fork choice store + /// and so should only be used on the head block or when the block *should* be present in the + /// fork choice store. + /// + /// There is a potential race condition when syncing where the block_root of `head_block` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_head_block( + &self, + head_block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(head_block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block_no_fallback(&head_block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } - Ok(status) + /// Returns the value of `execution_optimistic` for the current head block. + /// You can optionally provide `head_info` if it was computed previously. + /// + /// Returns `Ok(false)` if the head block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic`. + /// + /// There is a potential race condition when syncing where the block root of `head_info` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_head(&self) -> Result { + self.canonical_head + .head_execution_status() + .map(|status| status.is_optimistic()) + } + + pub fn is_optimistic_block_root( + &self, + block_slot: Slot, + block_root: &Hash256, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block_slot) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block_no_fallback(block_root) + .map_err(BeaconChainError::ForkChoiceError) + } } /// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`. @@ -4418,7 +4179,7 @@ impl BeaconChain { /// Note: this function **MUST** be called from a non-async context since /// it contains a call to `fork_choice` which may eventually call /// `tokio::runtime::block_on` in certain cases. - pub fn per_slot_task(self: &Arc) { + pub async fn per_slot_task(self: &Arc) { trace!(self.log, "Running beacon chain per slot tasks"); if let Some(slot) = self.slot_clock.now() { // Always run the light-weight pruning tasks (these structures should be empty during @@ -4427,14 +4188,12 @@ impl BeaconChain { self.block_times_cache.write().prune(slot); // Don't run heavy-weight tasks during sync. - if self.best_slot().map_or(true, |head_slot| { - head_slot + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot - }) { + if self.best_slot() + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot { return; } // Run fork choice and signal to any waiting task that it has completed. - if let Err(e) = self.fork_choice() { + if let Err(e) = self.recompute_head_at_current_slot().await { error!( self.log, "Fork choice error at slot start"; @@ -4445,80 +4204,28 @@ impl BeaconChain { // Send the notification regardless of fork choice success, this is a "best effort" // notification and we don't want block production to hit the timeout in case of error. - if let Some(tx) = &self.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(slot) { - warn!( - self.log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => slot, - ); - } - } + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + let chain = self.clone(); + self.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(slot) { + warn!( + chain.log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => slot, + ); + } + } + }, + "per_slot_task_fc_signal_tx", + ); } } - /// Called after `self` has had a new block finalized. - /// - /// Performs pruning and finality-based optimizations. - fn after_finalization( - &self, - head_state: &BeaconState, - new_finalized_state_root: Hash256, - ) -> Result<(), Error> { - self.fork_choice.write().prune()?; - let new_finalized_checkpoint = head_state.finalized_checkpoint(); - - self.observed_block_producers.write().prune( - new_finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - ); - - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.prune(new_finalized_checkpoint.epoch); - debug!( - self.log, - "Snapshot cache pruned"; - "new_len" => snapshot_cache.len(), - "remaining_roots" => ?snapshot_cache.beacon_block_roots(), - ); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "prune" - ); - }); - - self.op_pool.prune_all(head_state, self.epoch()?); - - self.store_migrator.process_finalization( - new_finalized_state_root.into(), - new_finalized_checkpoint, - self.head_tracker.clone(), - )?; - - self.attester_cache - .prune_below(new_finalized_checkpoint.epoch); - - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_finalized_subscribers() { - event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - epoch: new_finalized_checkpoint.epoch, - block: new_finalized_checkpoint.root, - state: new_finalized_state_root, - })); - } - } - - Ok(()) - } - /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head /// `head_block_root`. The `map_fn` will be supplied two values: /// @@ -4557,8 +4264,8 @@ impl BeaconChain { F: Fn(&CommitteeCache, Hash256) -> Result, { let head_block = self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head_block_root) .ok_or(Error::MissingBeaconBlock(head_block_root))?; @@ -4703,10 +4410,13 @@ impl BeaconChain { ) -> Result>>, Error> { let mut dump = vec![]; - let mut last_slot = BeaconSnapshot { - beacon_block: self.head()?.beacon_block.into(), - beacon_block_root: self.head()?.beacon_block_root, - beacon_state: self.head()?.beacon_state, + let mut last_slot = { + let head = self.canonical_head.cached_head(); + BeaconSnapshot { + beacon_block: Arc::new(head.snapshot.beacon_block.clone_as_blinded()), + beacon_block_root: head.snapshot.beacon_block_root, + beacon_state: head.snapshot.beacon_state.clone(), + } }; dump.push(last_slot.clone()); @@ -4733,7 +4443,7 @@ impl BeaconChain { })?; let slot = BeaconSnapshot { - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_block_root, beacon_state, }; @@ -4771,12 +4481,7 @@ impl BeaconChain { } pub fn dump_as_dot(&self, output: &mut W) { - let canonical_head_hash = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout) - .unwrap() - .beacon_block_root; + let canonical_head_hash = self.canonical_head.cached_head().head_block_root(); let mut visited: HashSet = HashSet::new(); let mut finalized_blocks: HashSet = HashSet::new(); let mut justified_blocks: HashSet = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index dc80fb7008..c7663c77c4 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -13,8 +13,8 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, Hash256, - Slot, + BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, + Hash256, Slot, }; #[derive(Debug)] @@ -257,7 +257,7 @@ where fn on_verified_block>( &mut self, - _block: &BeaconBlock, + _block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error> { diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index d645201a58..e76a5a8058 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -9,12 +9,14 @@ //! values it stores are very small, so this should not be an issue. use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use fork_choice::ExecutionStatus; use lru::LruCache; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; use types::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, + BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, + Unsigned, }; /// The number of sets of proposer indices that should be cached. @@ -135,11 +137,26 @@ impl BeaconProposerCache { pub fn compute_proposer_duties_from_head( current_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, Fork), BeaconChainError> { - // Take a copy of the head of the chain. - let head = chain.head()?; - let mut state = head.beacon_state; - let head_state_root = head.beacon_block.state_root(); +) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { + // Atomically collect information about the head whilst holding the canonical head `Arc` as + // short as possible. + let (mut state, head_state_root, head_block_root) = { + let head = chain.canonical_head.cached_head(); + // Take a copy of the head state. + let head_state = head + .snapshot + .beacon_state + .clone_with(CloneConfig::committee_caches_only()); + let head_state_root = head.head_state_root(); + let head_block_root = head.head_block_root(); + (head_state, head_state_root, head_block_root) + }; + + let execution_status = chain + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; // Advance the state into the requested epoch. ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; @@ -153,7 +170,7 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root(chain.genesis_block_root) .map_err(BeaconChainError::from)?; - Ok((indices, dependent_root, state.fork())) + Ok((indices, dependent_root, execution_status, state.fork())) } /// If required, advance `state` to `target_epoch`. diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 94adb479c8..8491622cb0 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,4 +1,5 @@ use serde_derive::Serialize; +use std::sync::Arc; use types::{ beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, SignedBeaconBlock, @@ -8,7 +9,7 @@ use types::{ /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] pub struct BeaconSnapshot = FullPayload> { - pub beacon_block: SignedBeaconBlock, + pub beacon_block: Arc>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } @@ -16,7 +17,7 @@ pub struct BeaconSnapshot = FullPayload> impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { @@ -39,7 +40,7 @@ impl> BeaconSnapshot { /// Update all fields of the checkpoint. pub fn update( &mut self, - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a6cd98c253..a64fb387e3 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -31,24 +31,27 @@ //! |--------------- //! | //! ▼ -//! SignatureVerifiedBlock +//! SignatureVerifiedBlock //! | //! ▼ -//! FullyVerifiedBlock +//! ExecutionPendingBlock +//! | +//! await //! | //! ▼ //! END //! //! ``` use crate::execution_payload::{ - notify_new_payload, validate_execution_payload_for_gossip, validate_merge_block, + is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, + PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ - BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + BeaconForkChoice, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, @@ -56,11 +59,11 @@ use crate::{ use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; +use fork_choice::PayloadVerificationStatus; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::per_block_processing::is_merge_transition_block; @@ -75,16 +78,16 @@ use std::fs; use std::io::Write; use std::sync::Arc; use std::time::Duration; -use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; +use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use task_executor::JoinHandle; use tree_hash::TreeHash; -use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -const POS_PANDA_BANNER: &str = r#" +pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, ;" ^; ;' ", ;" ^; ;' ", ; s$$$$$$$s ; ; s$$$$$$$s ; @@ -129,7 +132,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. - ParentUnknown(Box>), + ParentUnknown(Arc>), /// The block skips too many slots and is a DoS risk. TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. @@ -419,6 +422,12 @@ impl From for BlockError { } } +/// Stores information about verifying a payload against an execution engine. +pub struct PayloadVerificationOutcome { + pub payload_verification_status: PayloadVerificationStatus, + pub is_valid_merge_transition_block: bool, +} + /// Information about invalid blocks which might still be slashable despite being invalid. #[allow(clippy::enum_variant_names)] pub enum BlockSlashInfo { @@ -474,7 +483,7 @@ fn process_block_slash_info( /// Verify all signatures (except deposit signatures) on all blocks in the `chain_segment`. If all /// signatures are valid, the `chain_segment` is mapped to a `Vec` that can -/// later be transformed into a `FullyVerifiedBlock` without re-checking the signatures. If any +/// later be transformed into a `ExecutionPendingBlock` without re-checking the signatures. If any /// signature in the block is invalid, an `Err` is returned (it is not possible to known _which_ /// signature was invalid). /// @@ -483,7 +492,7 @@ fn process_block_slash_info( /// The given `chain_segment` must span no more than two epochs, otherwise an error will be /// returned. pub fn signature_verify_chain_segment( - mut chain_segment: Vec<(Hash256, SignedBeaconBlock)>, + mut chain_segment: Vec<(Hash256, Arc>)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -541,7 +550,7 @@ pub fn signature_verify_chain_segment( #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct GossipVerifiedBlock { - pub block: SignedBeaconBlock, + pub block: Arc>, pub block_root: Hash256, parent: Option>, } @@ -549,11 +558,15 @@ pub struct GossipVerifiedBlock { /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// signatures) have been verified. pub struct SignatureVerifiedBlock { - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: Option>, } +/// Used to await the result of executing payload with a remote EE. +type PayloadVerificationHandle = + JoinHandle>>>; + /// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and /// ready to import into the `BeaconChain`. The validation includes: /// @@ -562,42 +575,42 @@ pub struct SignatureVerifiedBlock { /// - State root check /// - Per block processing /// -/// Note: a `FullyVerifiedBlock` is not _forever_ valid to be imported, it may later become invalid -/// due to finality or some other event. A `FullyVerifiedBlock` should be imported into the +/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid +/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the /// `BeaconChain` immediately after it is instantiated. -pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { - pub block: SignedBeaconBlock, +pub struct ExecutionPendingBlock { + pub block: Arc>, pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, - pub confirmation_db_batch: Vec>, - pub payload_verification_status: PayloadVerificationStatus, + pub confirmed_state_roots: Vec, + pub payload_verification_handle: PayloadVerificationHandle, } -/// Implemented on types that can be converted into a `FullyVerifiedBlock`. +/// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. -pub trait IntoFullyVerifiedBlock: Sized { - fn into_fully_verified_block( +pub trait IntoExecutionPendingBlock: Sized { + fn into_execution_pending_block( self, chain: &Arc>, - ) -> Result, BlockError> { - self.into_fully_verified_block_slashable(chain) - .map(|fully_verified| { + ) -> Result, BlockError> { + self.into_execution_pending_block_slashable(chain) + .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_block_header(fully_verified.block.signed_block_header()); + slasher.accept_block_header(execution_pending.block.signed_block_header()); } - fully_verified + execution_pending }) .map_err(|slash_info| process_block_slash_info(chain, slash_info)) } /// Convert the block to fully-verified form while producing data to aid checking slashability. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>>; + ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; } @@ -608,7 +621,7 @@ impl GossipVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // If the block is valid for gossip we don't supply it to the slasher here because @@ -623,7 +636,7 @@ impl GossipVerifiedBlock { /// As for new, but doesn't pass the block to the slasher. fn new_without_slasher_checks( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. @@ -658,7 +671,11 @@ impl GossipVerifiedBlock { // reboot if the `observed_block_producers` cache is empty. In that case, without this // check, we will load the parent and state from disk only to find out later that we // already know this block. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -678,10 +695,10 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - let block = check_block_is_finalized_descendant::( - block, - &chain.fork_choice.read(), - &chain.store, + check_block_is_finalized_descendant( + chain, + &chain.canonical_head.fork_choice_write_lock(), + &block, )?; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -827,15 +844,15 @@ impl GossipVerifiedBlock { } } -impl IntoFullyVerifiedBlock for GossipVerifiedBlock { +impl IntoExecutionPendingBlock for GossipVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { - let fully_verified = + ) -> Result, BlockSlashInfo>> { + let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - fully_verified.into_fully_verified_block_slashable(chain) + execution_pending.into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -849,7 +866,7 @@ impl SignatureVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result> { @@ -892,7 +909,7 @@ impl SignatureVerifiedBlock { /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result>> { @@ -947,12 +964,12 @@ impl SignatureVerifiedBlock { } } -impl IntoFullyVerifiedBlock for SignatureVerifiedBlock { +impl IntoExecutionPendingBlock for SignatureVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) @@ -961,7 +978,7 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignedBeaconBlock { +impl IntoExecutionPendingBlock for Arc> { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` - /// and then using that implementation of `IntoFullyVerifiedBlock` to complete verification. - fn into_fully_verified_block_slashable( + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, None, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_fully_verified_block_slashable(chain) + .into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -995,7 +1012,7 @@ impl IntoFullyVerifiedBlock for SignedBeaconBlock FullyVerifiedBlock<'a, T> { +impl ExecutionPendingBlock { /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See /// the struct-level documentation for more information. /// @@ -1004,12 +1021,16 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn from_signature_verified_components( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: PreProcessingSnapshot, chain: &Arc>, ) -> Result> { - if let Some(parent) = chain.fork_choice.read().get_block(&block.parent_root()) { + if let Some(parent) = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block.parent_root()) + { // Reject any block where the parent has an invalid payload. It's impossible for a valid // block to descend from an invalid parent. if parent.execution_status.is_invalid() { @@ -1028,7 +1049,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } // Reject any block that exceeds our limit on skipped slots. @@ -1048,7 +1069,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // Stage a batch of operations to be completed atomically if this block is imported // successfully. - let mut confirmation_db_batch = vec![]; + let mut confirmed_state_roots = vec![]; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { @@ -1121,7 +1142,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { chain.store.do_atomically(state_batch)?; drop(txn_lock); - confirmation_db_batch.push(StoreOp::DeleteStateTemporaryFlag(state_root)); + confirmed_state_roots.push(state_root); state_root }; @@ -1140,59 +1161,82 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { } } - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - let valid_merge_transition_block = - if is_merge_transition_block(&state, block.message().body()) { - validate_merge_block(chain, block.message())?; - true - } else { - false + let block_slot = block.slot(); + let state_current_epoch = state.current_epoch(); + + // Define a future that will verify the execution payload with an execution engine (but + // don't execute it yet). + let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; + let is_valid_merge_transition_block = + is_merge_transition_block(&state, block.message().body()); + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + let block = payload_notifier.block.clone(); + + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_valid_merge_transition_block { + validate_merge_block(&chain, block.message()).await?; }; - // The specification declares that this should be run *inside* `per_block_processing`, - // however we run it here to keep `per_block_processing` pure (i.e., no calls to external - // servers). - // - // It is important that this function is called *after* `per_slot_processing`, since the - // `randao` may change. - let payload_verification_status = notify_new_payload(chain, &state, block.message())?; + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + // + // It is important that this function is called *after* `per_slot_processing`, since the + // `randao` may change. + let payload_verification_status = payload_notifier.notify_new_payload().await?; - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status.is_optimistic() { + let block_hash_opt = block + .message() + .body() + .execution_payload() + .map(|full_payload| full_payload.execution_payload.block_hash); - if !chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? - { - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + // Ensure the block is a candidate for optimistic import. + if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? + { + warn!( + chain.log, + "Rejecting optimistic block"; + "block_hash" => ?block_hash_opt, + "msg" => "the execution engine is not synced" + ); + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } } - } + + Ok(PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + if block_slot.epoch(T::EthSpec::slots_per_epoch()) + VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 >= epoch { @@ -1201,7 +1245,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // the `validator_monitor` lock from being bounced or held for a long time whilst // performing `per_slot_processing`. for (i, summary) in summaries.iter().enumerate() { - let epoch = state.current_epoch() - Epoch::from(summaries.len() - i); + let epoch = state_current_epoch - Epoch::from(summaries.len() - i); if let Err(e) = validator_monitor.process_validator_statuses(epoch, summary, &chain.spec) { @@ -1300,21 +1344,13 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { }); } - if valid_merge_transition_block { - info!(chain.log, "{}", POS_PANDA_BANNER); - info!(chain.log, "Proof of Stake Activated"; "slot" => block.slot()); - info!(chain.log, ""; "Terminal POW Block Hash" => ?block.message().execution_payload()?.parent_hash().into_root()); - info!(chain.log, ""; "Merge Transition Block Root" => ?block.message().tree_hash_root()); - info!(chain.log, ""; "Merge Transition Execution Hash" => ?block.message().execution_payload()?.block_hash().into_root()); - } - Ok(Self { block, block_root, state, parent_block: parent.beacon_block, - confirmation_db_batch, - payload_verification_status, + confirmed_state_roots, + payload_verification_handle, }) } } @@ -1366,8 +1402,9 @@ fn check_block_against_finalized_slot( chain: &BeaconChain, ) -> Result<(), BlockError> { let finalized_slot = chain - .head_info()? - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -1383,13 +1420,17 @@ fn check_block_against_finalized_slot( } /// Returns `Ok(block)` if the block descends from the finalized root. -pub fn check_block_is_finalized_descendant>( - block: SignedBeaconBlock, - fork_choice: &ForkChoice, - store: &HotColdDB, -) -> Result, BlockError> { +/// +/// ## Warning +/// +/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. +pub fn check_block_is_finalized_descendant( + chain: &BeaconChain, + fork_choice: &BeaconForkChoice, + block: &Arc>, +) -> Result<(), BlockError> { if fork_choice.is_descendant_of_finalized(block.parent_root()) { - Ok(block) + Ok(()) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, // then there are two more cases: @@ -1399,7 +1440,8 @@ pub fn check_block_is_finalized_descendant( // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -1477,16 +1523,16 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( chain: &BeaconChain, - block: SignedBeaconBlock, -) -> Result<(ProtoBlock, SignedBeaconBlock), BlockError> { + block: Arc>, +) -> Result<(ProtoBlock, Arc>), BlockError> { if let Some(proto_block) = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block.message().parent_root()) { Ok((proto_block, block)) } else { - Err(BlockError::ParentUnknown(Box::new(block))) + Err(BlockError::ParentUnknown(block)) } } @@ -1496,12 +1542,12 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result< ( PreProcessingSnapshot, - SignedBeaconBlock, + Arc>, ), BlockError, > { @@ -1518,11 +1564,11 @@ fn load_parent( // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). if !chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } let block_delay = chain @@ -1717,18 +1763,12 @@ fn verify_header_signature( .get(header.message.proposer_index as usize) .cloned() .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; - let (fork, genesis_validators_root) = - chain.with_head::<_, BlockError, _>(|head| { - Ok(( - head.beacon_state.fork(), - head.beacon_state.genesis_validators_root(), - )) - })?; + let head_fork = chain.canonical_head.cached_head().head_fork(); if header.verify_signature::( &proposer_pubkey, - &fork, - genesis_validators_root, + &head_fork, + chain.genesis_validators_root, &chain.spec, ) { Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 87f9416158..cef33ee4f7 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; @@ -245,6 +245,7 @@ where let fork_choice = BeaconChain::>::load_fork_choice( store.clone(), + &self.spec, ) .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? .ok_or("Fork choice not found in store")?; @@ -337,7 +338,7 @@ where Ok(( BeaconSnapshot { beacon_block_root, - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_state, }, self, @@ -352,12 +353,15 @@ where self = updated_builder; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); + let current_slot = None; let fork_choice = ForkChoice::from_anchor( fc_store, genesis.beacon_block_root, &genesis.beacon_block, &genesis.beacon_state, + current_slot, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -455,17 +459,20 @@ where let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, - beacon_block: weak_subj_block, + beacon_block: Arc::new(weak_subj_block), beacon_state: weak_subj_state, }; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot); + let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + current_slot, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -638,17 +645,18 @@ where head_block_root, &head_state, store.clone(), + Some(current_slot), &self.spec, )?; } - let mut canonical_head = BeaconSnapshot { + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, - beacon_block: head_block, + beacon_block: Arc::new(head_block), beacon_state: head_state, }; - canonical_head + head_snapshot .beacon_state .build_all_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; @@ -658,25 +666,17 @@ where // // This is a sanity check to detect database corruption. let fc_finalized = fork_choice.finalized_checkpoint(); - let head_finalized = canonical_head.beacon_state.finalized_checkpoint(); - if fc_finalized != head_finalized { - let is_genesis = head_finalized.root.is_zero() - && head_finalized.epoch == fc_finalized.epoch - && fc_finalized.root == genesis_block_root; - let is_wss = store.get_anchor_slot().map_or(false, |anchor_slot| { - fc_finalized.epoch == anchor_slot.epoch(TEthSpec::slots_per_epoch()) - }); - if !is_genesis && !is_wss { - return Err(format!( - "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ + let head_finalized = head_snapshot.beacon_state.finalized_checkpoint(); + if fc_finalized.epoch < head_finalized.epoch { + return Err(format!( + "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ {:?}", - fc_finalized, head_finalized - )); - } + fc_finalized, head_finalized + )); } let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| { - ValidatorPubkeyCache::new(&canonical_head.beacon_state, store.clone()) + ValidatorPubkeyCache::new(&head_snapshot.beacon_state, store.clone()) .map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e)) })?; @@ -691,7 +691,7 @@ where if let Some(slot) = slot_clock.now() { validator_monitor.process_valid_state( slot.epoch(TEthSpec::slots_per_epoch()), - &canonical_head.beacon_state, + &head_snapshot.beacon_state, ); } @@ -725,10 +725,18 @@ where .do_atomically(self.pending_io_batch) .map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?; + let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); + let genesis_time = head_snapshot.beacon_state.genesis_time(); + let head_for_snapshot_cache = head_snapshot.clone(); + let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let beacon_chain = BeaconChain { spec: self.spec, config: self.chain_config, store, + task_executor: self + .task_executor + .ok_or("Cannot build without task executor")?, store_migrator, slot_clock, op_pool: self.op_pool.ok_or("Cannot build without op pool")?, @@ -758,18 +766,18 @@ where observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, - genesis_validators_root: canonical_head.beacon_state.genesis_validators_root(), - canonical_head: TimeoutRwLock::new(canonical_head.clone()), + genesis_validators_root, + genesis_time, + canonical_head, genesis_block_root, genesis_state_root, - fork_choice: RwLock::new(fork_choice), fork_choice_signal_tx, fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( DEFAULT_SNAPSHOT_CACHE_SIZE, - canonical_head, + head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), beacon_proposer_cache: <_>::default(), @@ -787,9 +795,7 @@ where validator_monitor: RwLock::new(validator_monitor), }; - let head = beacon_chain - .head() - .map_err(|e| format!("Failed to get head: {:?}", e))?; + let head = beacon_chain.head_snapshot(); // Prime the attester cache with the head state. beacon_chain @@ -992,10 +998,10 @@ mod test { .build() .expect("should build"); - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); - let state = head.beacon_state; - let block = head.beacon_block; + let state = &head.beacon_state; + let block = &head.beacon_block; assert_eq!(state.slot(), Slot::new(0), "should start from genesis"); assert_eq!( @@ -1014,7 +1020,7 @@ mod test { .get_blinded_block(&Hash256::zero()) .expect("should read db") .expect("should find genesis block"), - block.clone().into(), + block.clone_as_blinded(), "should store genesis block under zero hash alias" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs new file mode 100644 index 0000000000..c02ddb8263 --- /dev/null +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -0,0 +1,1307 @@ +//! This module provides all functionality for finding the canonical head, updating all necessary +//! components (e.g. caches) and maintaining a cached head block and state. +//! +//! For practically all applications, the "canonical head" can be read using +//! `beacon_chain.canonical_head.cached_head()`. +//! +//! The canonical head can be updated using `beacon_chain.recompute_head()`. +//! +//! ## Deadlock safety +//! +//! This module contains three locks: +//! +//! 1. `RwLock`: Contains `proto_array` fork choice. +//! 2. `RwLock`: Contains a cached block/state from the last run of `proto_array`. +//! 3. `Mutex<()>`: Is used to prevent concurrent execution of `BeaconChain::recompute_head`. +//! +//! This module has to take great efforts to avoid causing a deadlock with these three methods. Any +//! developers working in this module should tread carefully and seek a detailed review. +//! +//! To encourage safe use of this module, it should **only ever return a read or write lock for the +//! fork choice lock (lock 1)**. Whilst public functions might indirectly utilise locks (2) and (3), +//! the fundamental `RwLockWriteGuard` or `RwLockReadGuard` should never be exposed. This prevents +//! external functions from acquiring these locks in conflicting orders and causing a deadlock. +//! +//! ## Design Considerations +//! +//! We separate the `BeaconForkChoice` and `CachedHead` into two `RwLocks` because we want to ensure +//! fast access to the `CachedHead`. If we were to put them both under the same lock, we would need +//! to take an exclusive write-lock on it in order to run `ForkChoice::get_head`. This can take tens +//! of milliseconds and would block all downstream functions that want to know simple things like +//! the head block root. This is unacceptable for fast-responding functions like the networking +//! stack. + +use crate::persisted_fork_choice::PersistedForkChoice; +use crate::{ + beacon_chain::{ + BeaconForkChoice, BeaconStore, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, + }, + block_times_cache::BlockTimesCache, + events::ServerSentEventHandler, + metrics, + validator_monitor::{get_slot_delay_ms, timestamp_now}, + BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, +}; +use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; +use fork_choice::{ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock}; +use itertools::process_results; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use slog::{crit, debug, error, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; +use task_executor::{JoinHandle, ShutdownReason}; +use types::*; + +/// Simple wrapper around `RwLock` that uses private visibility to prevent any other modules from +/// accessing the contained lock without it being explicitly noted in this module. +pub struct CanonicalHeadRwLock(RwLock); + +impl From> for CanonicalHeadRwLock { + fn from(rw_lock: RwLock) -> Self { + Self(rw_lock) + } +} + +impl CanonicalHeadRwLock { + fn new(item: T) -> Self { + Self::from(RwLock::new(item)) + } + + fn read(&self) -> RwLockReadGuard { + self.0.read() + } + + fn write(&self) -> RwLockWriteGuard { + self.0.write() + } +} + +/// Provides a series of cached values from the last time `BeaconChain::recompute_head` was run. +/// +/// This struct is designed to be cheap-to-clone, any large fields should be wrapped in an `Arc` (or +/// similar). +#[derive(Clone)] +pub struct CachedHead { + /// Provides the head block and state from the last time the head was updated. + pub snapshot: Arc>, + /// The justified checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.snapshot.beacon_state.justified_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + justified_checkpoint: Checkpoint, + /// The finalized checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.snapshot.beacon_state.finalized_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + finalized_checkpoint: Checkpoint, + /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` + /// before Bellatrix. + head_hash: Option, + /// The `execution_payload.block_hash` of the finalized block. Set to `None` before Bellatrix. + finalized_hash: Option, +} + +impl CachedHead { + /// Returns root of the block at the head of the beacon chain. + pub fn head_block_root(&self) -> Hash256 { + self.snapshot.beacon_block_root + } + + /// Returns root of the `BeaconState` at the head of the beacon chain. + /// + /// ## Note + /// + /// This `BeaconState` has *not* been advanced to the current slot, it has the same slot as the + /// head block. + pub fn head_state_root(&self) -> Hash256 { + self.snapshot.beacon_state_root() + } + + /// Returns slot of the block at the head of the beacon chain. + /// + /// ## Notes + /// + /// This is *not* the current slot as per the system clock. Use `BeaconChain::slot` for the + /// system clock (aka "wall clock") slot. + pub fn head_slot(&self) -> Slot { + self.snapshot.beacon_block.slot() + } + + /// Returns the `Fork` from the `BeaconState` at the head of the chain. + pub fn head_fork(&self) -> Fork { + self.snapshot.beacon_state.fork() + } + + /// Returns the randao mix for the block at the head of the chain. + pub fn head_random(&self) -> Result { + let state = &self.snapshot.beacon_state; + let root = *state.get_randao_mix(state.current_epoch())?; + Ok(root) + } + + /// Returns the active validator count for the current epoch of the head state. + /// + /// Should only return `None` if the caches have not been built on the head state (this should + /// never happen). + pub fn active_validator_count(&self) -> Option { + self.snapshot + .beacon_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map(|indices| indices.len()) + .ok() + } + + /// Returns the finalized checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the finalized checkpoint of the `head_snapshot.beacon_state`, rather it is the + /// best finalized checkpoint that has been observed by `self.fork_choice`. It is possible that + /// the `head_snapshot.beacon_state` finalized value is earlier than the one returned here. + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.finalized_checkpoint + } + + /// Returns the justified checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the "current justified checkpoint" of the `head_snapshot.beacon_state`, rather + /// it is the justified checkpoint in the view of `self.fork_choice`. It is possible that the + /// `head_snapshot.beacon_state` justified value is different to, but not conflicting with, the + /// one returned here. + pub fn justified_checkpoint(&self) -> Checkpoint { + self.justified_checkpoint + } + + /// Returns the cached values of `ForkChoice::forkchoice_update_parameters`. + /// + /// Useful for supplying to the execution layer. + pub fn forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { + ForkchoiceUpdateParameters { + head_root: self.snapshot.beacon_block_root, + head_hash: self.head_hash, + finalized_hash: self.finalized_hash, + } + } +} + +/// Represents the "canonical head" of the beacon chain. +/// +/// The `cached_head` is elected by the `fork_choice` algorithm contained in this struct. +/// +/// There is no guarantee that the state of the `fork_choice` struct will always represent the +/// `cached_head` (i.e. we may call `fork_choice` *without* updating the cached values), however +/// there is a guarantee that the `cached_head` represents some past state of `fork_choice` (i.e. +/// `fork_choice` never lags *behind* the `cached_head`). +pub struct CanonicalHead { + /// Provides an in-memory representation of the non-finalized block tree and is used to run the + /// fork choice algorithm and determine the canonical head. + pub fork_choice: CanonicalHeadRwLock>, + /// Provides values cached from a previous execution of `self.fork_choice.get_head`. + /// + /// Although `self.fork_choice` might be slightly more advanced that this value, it is safe to + /// consider that these values represent the "canonical head" of the beacon chain. + pub cached_head: CanonicalHeadRwLock>, + /// A lock used to prevent concurrent runs of `BeaconChain::recompute_head`. + /// + /// This lock **should not be made public**, it should only be used inside this module. + recompute_head_lock: Mutex<()>, +} + +impl CanonicalHead { + /// Instantiate `Self`. + pub fn new( + fork_choice: BeaconForkChoice, + snapshot: Arc>, + ) -> Self { + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot, + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + Self { + fork_choice: CanonicalHeadRwLock::new(fork_choice), + cached_head: CanonicalHeadRwLock::new(cached_head), + recompute_head_lock: Mutex::new(()), + } + } + + /// Load a persisted version of `BeaconForkChoice` from the `store` and restore `self` to that + /// state. + /// + /// This is useful if some database corruption is expected and we wish to go back to our last + /// save-point. + pub(crate) fn restore_from_store( + &self, + // We don't actually need this value, however it's always present when we call this function + // and it needs to be dropped to prevent a dead-lock. Requiring it to be passed here is + // defensive programming. + mut fork_choice_write_lock: RwLockWriteGuard>, + store: &BeaconStore, + spec: &ChainSpec, + ) -> Result<(), Error> { + let fork_choice = >::load_fork_choice(store.clone(), spec)? + .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let beacon_block_root = fork_choice_view.head_block_root; + let beacon_block = store + .get_full_block(&beacon_block_root)? + .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; + let beacon_state_root = beacon_block.state_root(); + let beacon_state = store + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + let snapshot = BeaconSnapshot { + beacon_block_root, + beacon_block: Arc::new(beacon_block), + beacon_state, + }; + + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot: Arc::new(snapshot), + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + *fork_choice_write_lock = fork_choice; + // Avoid interleaving the fork choice and cached head locks. + drop(fork_choice_write_lock); + *self.cached_head.write() = cached_head; + + Ok(()) + } + + /// Returns the execution status of the block at the head of the beacon chain. + /// + /// This will only return `Err` in the scenario where `self.fork_choice` has advanced + /// significantly past the cached `head_snapshot`. In such a scenario it is likely prudent to + /// run `BeaconChain::recompute_head` to update the cached values. + pub fn head_execution_status(&self) -> Result { + let head_block_root = self.cached_head().head_block_root(); + self.fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::HeadMissingFromForkChoice(head_block_root)) + } + + /// Returns a clone of `self.cached_head`. + /// + /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). + /// The `CachedHead` is designed to be fast-to-clone so this is preferred to passing back a + /// `RwLockReadGuard`, which may cause deadlock issues (see module-level documentation). + /// + /// This function is safe to be public since it does not expose any locks. + pub fn cached_head(&self) -> CachedHead { + self.cached_head_read_lock().clone() + } + + /// Access a read-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_read_lock(&self) -> RwLockReadGuard> { + self.cached_head.read() + } + + /// Access a write-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_write_lock(&self) -> RwLockWriteGuard> { + self.cached_head.write() + } + + /// Access a read-lock for fork choice. + pub fn fork_choice_read_lock(&self) -> RwLockReadGuard> { + self.fork_choice.read() + } + + /// Access a write-lock for fork choice. + pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard> { + self.fork_choice.write() + } +} + +impl BeaconChain { + /// Contains the "best block"; the head of the canonical `BeaconChain`. + /// + /// It is important to note that the `snapshot.beacon_state` returned may not match the present slot. It + /// is the state as it was when the head block was received, which could be some slots prior to + /// now. + pub fn head(&self) -> CachedHead { + self.canonical_head.cached_head() + } + + /// Apply a function to an `Arc`-clone of the canonical head snapshot. + /// + /// This method is a relic from an old implementation where the canonical head was not behind + /// an `Arc` and the canonical head lock had to be held whenever it was read. This method is + /// fine to be left here, it just seems a bit weird. + pub fn with_head( + &self, + f: impl FnOnce(&BeaconSnapshot) -> Result, + ) -> Result + where + E: From, + { + let head_snapshot = self.head_snapshot(); + f(&head_snapshot) + } + + /// Returns the beacon block root at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block_root(&self) -> Hash256 { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block_root + } + + /// Returns the slot of the highest block in the canonical chain. + pub fn best_slot(&self) -> Slot { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block + .slot() + } + + /// Returns a `Arc` of the `BeaconSnapshot` at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_snapshot(&self) -> Arc> { + self.canonical_head.cached_head_read_lock().snapshot.clone() + } + + /// Returns the beacon block at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block(&self) -> Arc> { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block + .clone() + } + + /// Returns a clone of the beacon state at the head of the canonical chain. + /// + /// Cloning the head state is expensive and should generally be avoided outside of tests. + /// + /// See `Self::head` for more information. + pub fn head_beacon_state_cloned(&self) -> BeaconState { + // Don't clone whilst holding the read-lock, take an Arc-clone to reduce lock contention. + let snapshot: Arc<_> = self.head_snapshot(); + snapshot + .beacon_state + .clone_with(CloneConfig::committee_caches_only()) + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// This method replaces the old `BeaconChain::fork_choice` method. + pub async fn recompute_head_at_current_slot(self: &Arc) -> Result<(), Error> { + let current_slot = self.slot()?; + self.recompute_head_at_slot(current_slot).await + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// The `current_slot` is specified rather than relying on the wall-clock slot. Using a + /// different slot to the wall-clock can be useful for pushing fork choice into the next slot + /// *just* before the start of the slot. This ensures that block production can use the correct + /// head value without being delayed. + pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) -> Result<(), Error> { + metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); + + let chain = self.clone(); + match self + .spawn_blocking_handle( + move || chain.recompute_head_at_slot_internal(current_slot), + "recompute_head_internal", + ) + .await? + { + // Fork choice returned successfully and did not need to update the EL. + Ok(None) => Ok(()), + // Fork choice returned successfully and needed to update the EL. It has returned a + // join-handle from when it spawned some async tasks. We should await those tasks. + Ok(Some(join_handle)) => match join_handle.await { + // The async task completed successfully. + Ok(Some(())) => Ok(()), + // The async task did not complete successfully since the runtime is shutting down. + Ok(None) => { + debug!( + self.log, + "Did not update EL fork choice"; + "info" => "shutting down" + ); + Err(Error::RuntimeShutdown) + } + // The async task did not complete successfully, tokio returned an error. + Err(e) => { + error!( + self.log, + "Did not update EL fork choice"; + "error" => ?e + ); + Err(Error::TokioJoin(e)) + } + }, + // There was an error recomputing the head. + Err(e) => { + metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); + Err(e) + } + } + } + + /// A non-async (blocking) function which recomputes the canonical head and spawns async tasks. + /// + /// This function performs long-running, heavy-lifting tasks which should not be performed on + /// the core `tokio` executor. + fn recompute_head_at_slot_internal( + self: &Arc, + current_slot: Slot, + ) -> Result>>, Error> { + let recompute_head_lock = self.canonical_head.recompute_head_lock.lock(); + + // Take a clone of the current ("old") head. + let old_cached_head = self.canonical_head.cached_head(); + + // Determine the current ("old") fork choice parameters. + // + // It is important to read the `fork_choice_view` from the cached head rather than from fork + // choice, since the fork choice value might have changed between calls to this function. We + // are interested in the changes since we last cached the head values, not since fork choice + // was last run. + let old_view = ForkChoiceView { + head_block_root: old_cached_head.head_block_root(), + justified_checkpoint: old_cached_head.justified_checkpoint(), + finalized_checkpoint: old_cached_head.finalized_checkpoint(), + }; + + let mut fork_choice_write_lock = self.canonical_head.fork_choice_write_lock(); + + // Recompute the current head via the fork choice algorithm. + fork_choice_write_lock.get_head(current_slot, &self.spec)?; + + // Downgrade the fork choice write-lock to a read lock, without allowing access to any + // other writers. + let fork_choice_read_lock = RwLockWriteGuard::downgrade(fork_choice_write_lock); + + // Read the current head value from the fork choice algorithm. + let new_view = fork_choice_read_lock.cached_fork_choice_view(); + + // Check to ensure that the finalized block hasn't been marked as invalid. If it has, + // shut down Lighthouse. + let finalized_proto_block = fork_choice_read_lock.get_finalized_block()?; + check_finalized_payload_validity(self, &finalized_proto_block)?; + + // Sanity check the finalized checkpoint. + // + // The new finalized checkpoint must be either equal to or better than the previous + // finalized checkpoint. + check_against_finality_reversion(&old_view, &new_view)?; + + let new_head_proto_block = fork_choice_read_lock + .get_block(&new_view.head_block_root) + .ok_or(Error::HeadBlockMissingFromForkChoice( + new_view.head_block_root, + ))?; + + // Do not allow an invalid block to become the head. + // + // This check avoids the following infinite loop: + // + // 1. A new block is set as the head. + // 2. The EL is updated with the new head, and returns INVALID. + // 3. We call `process_invalid_execution_payload` and it calls this function. + // 4. This function elects an invalid block as the head. + // 5. GOTO 2 + // + // In theory, fork choice should never select an invalid head (i.e., step #3 is impossible). + // However, this check is cheap. + if new_head_proto_block.execution_status.is_invalid() { + return Err(Error::HeadHasInvalidPayload { + block_root: new_head_proto_block.root, + execution_status: new_head_proto_block.execution_status, + }); + } + + // Exit early if the head or justified/finalized checkpoints have not changed, there's + // nothing to do. + if new_view == old_view { + debug!( + self.log, + "No change in canonical head"; + "head" => ?new_view.head_block_root + ); + return Ok(None); + } + + // Get the parameters to update the execution layer since either the head or some finality + // parameters have changed. + let new_forkchoice_update_parameters = + fork_choice_read_lock.get_forkchoice_update_parameters(); + + perform_debug_logging::(&old_view, &new_view, &fork_choice_read_lock, &self.log); + + // Drop the read lock, it's no longer required and holding it any longer than necessary + // will just cause lock contention. + drop(fork_choice_read_lock); + + // If the head has changed, update `self.canonical_head`. + let new_cached_head = if new_view.head_block_root != old_view.head_block_root { + metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); + + // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling + // back to a database read if that fails. + let new_snapshot = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_cloned( + new_view.head_block_root, + CloneConfig::committee_caches_only(), + ) + }) + .map::, _>(Ok) + .unwrap_or_else(|| { + let beacon_block = self + .store + .get_full_block(&new_view.head_block_root)? + .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + + let beacon_state_root = beacon_block.state_root(); + let beacon_state: BeaconState = self + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + Ok(BeaconSnapshot { + beacon_block: Arc::new(beacon_block), + beacon_block_root: new_view.head_block_root, + beacon_state, + }) + }) + .and_then(|mut snapshot| { + // Regardless of where we got the state from, attempt to build the committee + // caches. + snapshot + .beacon_state + .build_all_committee_caches(&self.spec) + .map_err(Into::into) + .map(|()| snapshot) + })?; + + let new_cached_head = CachedHead { + snapshot: Arc::new(new_snapshot), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let new_head = { + // Now the new snapshot has been obtained, take a write-lock on the cached head so + // we can update it quickly. + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + // Enshrine the new head as the canonical cached head. + *cached_head_write_lock = new_cached_head; + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Clear the early attester cache in case it conflicts with `self.canonical_head`. + self.early_attester_cache.clear(); + + new_head + } else { + let new_cached_head = CachedHead { + // The head hasn't changed, take a relatively cheap `Arc`-clone of the existing + // head. + snapshot: old_cached_head.snapshot.clone(), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + + // Enshrine the new head as the canonical cached head. Whilst the head block hasn't + // changed, the FFG checkpoints must have changed. + *cached_head_write_lock = new_cached_head; + + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Alias for readability. + let new_snapshot = &new_cached_head.snapshot; + let old_snapshot = &old_cached_head.snapshot; + + // If the head changed, perform some updates. + if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root { + if let Err(e) = + self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) + { + crit!( + self.log, + "Error updating canonical head"; + "error" => ?e + ); + } + } + + // Drop the old cache head nice and early to try and free the memory as soon as possible. + drop(old_cached_head); + + // If the finalized checkpoint changed, perform some updates. + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + if let Err(e) = + self.after_finalization(&new_cached_head, new_view, finalized_proto_block) + { + crit!( + self.log, + "Error updating finalization"; + "error" => ?e + ); + } + } + + // The execution layer updates might attempt to take a write-lock on fork choice, so it's + // important to ensure the fork-choice lock isn't being held. + let el_update_handle = + spawn_execution_layer_updates(self.clone(), new_forkchoice_update_parameters)?; + + // We have completed recomputing the head and it's now valid for another process to do the + // same. + drop(recompute_head_lock); + + Ok(Some(el_update_handle)) + } + + /// Perform updates to caches and other components after the canonical head has been changed. + fn after_new_head( + self: &Arc, + old_cached_head: &CachedHead, + new_cached_head: &CachedHead, + new_head_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let old_snapshot = &old_cached_head.snapshot; + let new_snapshot = &new_cached_head.snapshot; + + // Detect and potentially report any re-orgs. + let reorg_distance = detect_reorg( + &old_snapshot.beacon_state, + old_snapshot.beacon_block_root, + &new_snapshot.beacon_state, + new_snapshot.beacon_block_root, + &self.spec, + &self.log, + ); + + // Determine if the new head is in a later epoch to the previous head. + let is_epoch_transition = old_snapshot + .beacon_block + .slot() + .epoch(T::EthSpec::slots_per_epoch()) + < new_snapshot + .beacon_state + .slot() + .epoch(T::EthSpec::slots_per_epoch()); + + // These fields are used for server-sent events. + let state_root = new_snapshot.beacon_state_root(); + let head_slot = new_snapshot.beacon_state.slot(); + let dependent_root = new_snapshot + .beacon_state + .proposer_shuffling_decision_root(self.genesis_block_root); + let prev_dependent_root = new_snapshot + .beacon_state + .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); + + // Update the snapshot cache with the latest head value. + // + // This *could* be done inside `recompute_head`, however updating the head on the snapshot + // cache is not critical so we avoid placing it on a critical path. Note that this function + // will not return an error if the update fails, it will just log an error. + self.snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .map(|mut snapshot_cache| { + snapshot_cache.update_head(new_snapshot.beacon_block_root); + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "snapshot_cache", + "task" => "update head" + ); + }); + + observe_head_block_delays( + &mut self.block_times_cache.write(), + &new_head_proto_block, + new_snapshot.beacon_block.message().proposer_index(), + new_snapshot + .beacon_block + .message() + .body() + .graffiti() + .as_utf8_lossy(), + &self.slot_clock, + self.event_handler.as_ref(), + &self.log, + ); + + if is_epoch_transition || reorg_distance.is_some() { + self.persist_head_and_fork_choice()?; + self.op_pool.prune_attestations(self.epoch()?); + } + + // Register server-sent-events for a new head. + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_head_subscribers()) + { + match (dependent_root, prev_dependent_root) { + (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { + event_handler.register(EventKind::Head(SseHead { + slot: head_slot, + block: new_snapshot.beacon_block_root, + state: state_root, + current_duty_dependent_root, + previous_duty_dependent_root, + epoch_transition: is_epoch_transition, + })); + } + (Err(e), _) | (_, Err(e)) => { + warn!( + self.log, + "Unable to find dependent roots, cannot register head event"; + "error" => ?e + ); + } + } + } + + // Register a server-sent-event for a reorg (if necessary). + if let Some(depth) = reorg_distance { + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_reorg_subscribers()) + { + event_handler.register(EventKind::ChainReorg(SseChainReorg { + slot: head_slot, + depth: depth.as_u64(), + old_head_block: old_snapshot.beacon_block_root, + old_head_state: old_snapshot.beacon_state_root(), + new_head_block: new_snapshot.beacon_block_root, + new_head_state: new_snapshot.beacon_state_root(), + epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), + })); + } + } + + Ok(()) + } + + /// Perform updates to caches and other components after the finalized checkpoint has been + /// changed. + fn after_finalization( + self: &Arc, + new_cached_head: &CachedHead, + new_view: ForkChoiceView, + finalized_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let new_snapshot = &new_cached_head.snapshot; + + self.op_pool + .prune_all(&new_snapshot.beacon_state, self.epoch()?); + + self.observed_block_producers.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + + self.snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .map(|mut snapshot_cache| { + snapshot_cache.prune(new_view.finalized_checkpoint.epoch); + debug!( + self.log, + "Snapshot cache pruned"; + "new_len" => snapshot_cache.len(), + "remaining_roots" => ?snapshot_cache.beacon_block_roots(), + ); + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "snapshot_cache", + "task" => "prune" + ); + }); + + self.attester_cache + .prune_below(new_view.finalized_checkpoint.epoch); + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_finalized_subscribers() { + event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { + epoch: new_view.finalized_checkpoint.epoch, + block: new_view.finalized_checkpoint.root, + // Provide the state root of the latest finalized block, rather than the + // specific state root at the first slot of the finalized epoch (which + // might be a skip slot). + state: finalized_proto_block.state_root, + })); + } + } + + // The store migration task requires the *state at the slot of the finalized epoch*, + // rather than the state of the latest finalized block. These two values will only + // differ when the first slot of the finalized epoch is a skip slot. + // + // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` + // to ensure we use the same state that we just set as the head. + let new_finalized_slot = new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let new_finalized_state_root = process_results( + StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), + |mut iter| { + iter.find_map(|(state_root, slot)| { + if slot == new_finalized_slot { + Some(state_root) + } else { + None + } + }) + }, + )? + .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; + + self.store_migrator.process_finalization( + new_finalized_state_root.into(), + new_view.finalized_checkpoint, + self.head_tracker.clone(), + )?; + + Ok(()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { + Self::persist_fork_choice_in_batch_standalone(&self.canonical_head.fork_choice_read_lock()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch_standalone( + fork_choice: &BeaconForkChoice, + ) -> KeyValueStoreOp { + let persisted_fork_choice = PersistedForkChoice { + fork_choice: fork_choice.to_persisted(), + fork_choice_store: fork_choice.fc_store().to_persisted(), + }; + persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) + } +} + +/// Check to see if the `finalized_proto_block` has an invalid execution payload. If so, shut down +/// Lighthouse. +/// +/// ## Notes +/// +/// This function is called whilst holding a write-lock on the `canonical_head`. To ensure dead-lock +/// safety, **do not take any other locks inside this function**. +fn check_finalized_payload_validity( + chain: &BeaconChain, + finalized_proto_block: &ProtoBlock, +) -> Result<(), Error> { + if let ExecutionStatus::Invalid(block_hash) = finalized_proto_block.execution_status { + crit!( + chain.log, + "Finalized block has an invalid payload"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block_hash + ); + let mut shutdown_sender = chain.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Finalized block has an invalid execution payload.", + )) + .map_err(Error::InvalidFinalizedPayloadShutdownError)?; + + // Exit now, the node is in an invalid state. + return Err(Error::InvalidFinalizedPayload { + finalized_root: finalized_proto_block.root, + execution_block_hash: block_hash, + }); + } + + Ok(()) +} + +/// Check to ensure that the transition from `old_view` to `new_view` will not revert finality. +fn check_against_finality_reversion( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, +) -> Result<(), Error> { + let finalization_equal = new_view.finalized_checkpoint == old_view.finalized_checkpoint; + let finalization_advanced = + new_view.finalized_checkpoint.epoch > old_view.finalized_checkpoint.epoch; + + if finalization_equal || finalization_advanced { + Ok(()) + } else { + Err(Error::RevertedFinalizedEpoch { + old: old_view.finalized_checkpoint, + new: new_view.finalized_checkpoint, + }) + } +} + +fn perform_debug_logging( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, + fork_choice: &BeaconForkChoice, + log: &Logger, +) { + if new_view.head_block_root != old_view.head_block_root { + debug!( + log, + "Fork choice updated head"; + "new_head_weight" => ?fork_choice + .get_block_weight(&new_view.head_block_root), + "new_head" => ?new_view.head_block_root, + "old_head_weight" => ?fork_choice + .get_block_weight(&old_view.head_block_root), + "old_head" => ?old_view.head_block_root, + ) + } + if new_view.justified_checkpoint != old_view.justified_checkpoint { + debug!( + log, + "Fork choice justified"; + "new_root" => ?new_view.justified_checkpoint.root, + "new_epoch" => new_view.justified_checkpoint.epoch, + "old_root" => ?old_view.justified_checkpoint.root, + "old_epoch" => old_view.justified_checkpoint.epoch, + ) + } + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + debug!( + log, + "Fork choice finalized"; + "new_root" => ?new_view.finalized_checkpoint.root, + "new_epoch" => new_view.finalized_checkpoint.epoch, + "old_root" => ?old_view.finalized_checkpoint.root, + "old_epoch" => old_view.finalized_checkpoint.epoch, + ) + } +} + +fn spawn_execution_layer_updates( + chain: Arc>, + forkchoice_update_params: ForkchoiceUpdateParameters, +) -> Result>, Error> { + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(Error::UnableToReadSlot)?; + + chain + .task_executor + .clone() + .spawn_handle( + async move { + // Avoids raising an error before Bellatrix. + // + // See `Self::prepare_beacon_proposer` for more detail. + if chain.slot_is_prior_to_bellatrix(current_slot + 1) { + return; + } + + if let Err(e) = chain + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await + { + crit!( + chain.log, + "Failed to update execution head"; + "error" => ?e + ); + } + + // Update the mechanism for preparing for block production on the execution layer. + // + // Performing this call immediately after `update_execution_engine_forkchoice_blocking` + // might result in two calls to fork choice updated, one *without* payload attributes and + // then a second *with* payload attributes. + // + // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as far as I + // know. + if let Err(e) = chain.prepare_beacon_proposer(current_slot).await { + crit!( + chain.log, + "Failed to prepare proposers after fork choice"; + "error" => ?e + ); + } + }, + "update_el_forkchoice", + ) + .ok_or(Error::RuntimeShutdown) +} + +/// Attempt to detect if the new head is not on the same chain as the previous block +/// (i.e., a re-org). +/// +/// Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks +/// between calls to fork choice without swapping between chains. This seems like an +/// extreme-enough scenario that a warning is fine. +fn detect_reorg( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, + log: &Logger, +) -> Option { + let is_reorg = new_state + .get_block_root(old_state.slot()) + .map_or(true, |root| *root != old_block_root); + + if is_reorg { + let reorg_distance = + match find_reorg_slot(old_state, old_block_root, new_state, new_block_root, spec) { + Ok(slot) => old_state.slot().saturating_sub(slot), + Err(e) => { + warn!( + log, + "Could not find re-org depth"; + "error" => format!("{:?}", e), + ); + return None; + } + }; + + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); + warn!( + log, + "Beacon chain re-org"; + "previous_head" => ?old_block_root, + "previous_slot" => old_state.slot(), + "new_head" => ?new_block_root, + "new_slot" => new_state.slot(), + "reorg_distance" => reorg_distance, + ); + + Some(reorg_distance) + } else { + None + } +} + +/// Iterate through the current chain to find the slot intersecting with the given beacon state. +/// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached +/// and no intersection is found, the finalized slot will be returned. +pub fn find_reorg_slot( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, +) -> Result { + // The earliest slot for which the two chains may have a common history. + let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); + + // Create an iterator across `$state`, assuming that the block at `$state.slot` has the + // block root of `$block_root`. + // + // The iterator will be skipped until the next value returns `lowest_slot`. + // + // This is a macro instead of a function or closure due to the complex types invloved + // in all the iterator wrapping. + macro_rules! aligned_roots_iter { + ($state: ident, $block_root: ident) => { + std::iter::once(Ok(($state.slot(), $block_root))) + .chain($state.rev_iter_block_roots(spec)) + .skip_while(|result| { + result + .as_ref() + .map_or(false, |(slot, _)| *slot > lowest_slot) + }) + }; + } + + // Create iterators across old/new roots where iterators both start at the same slot. + let mut new_roots = aligned_roots_iter!(new_state, new_block_root); + let mut old_roots = aligned_roots_iter!(old_state, old_block_root); + + // Whilst *both* of the iterators are still returning values, try and find a common + // ancestor between them. + while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { + let (old_slot, old_root) = old?; + let (new_slot, new_root) = new?; + + // Sanity check to detect programming errors. + if old_slot != new_slot { + return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); + } + + if old_root == new_root { + // A common ancestor has been found. + return Ok(old_slot); + } + } + + // If no common ancestor is found, declare that the re-org happened at the previous + // finalized slot. + // + // Sometimes this will result in the return slot being *lower* than the actual reorg + // slot. However, assuming we don't re-org through a finalized slot, it will never be + // *higher*. + // + // We provide this potentially-inaccurate-but-safe information to avoid onerous + // database reads during times of deep reorgs. + Ok(old_state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch())) +} + +fn observe_head_block_delays( + block_times_cache: &mut BlockTimesCache, + head_block: &ProtoBlock, + head_block_proposer_index: u64, + head_block_graffiti: String, + slot_clock: &S, + event_handler: Option<&ServerSentEventHandler>, + log: &Logger, +) { + let block_time_set_as_head = timestamp_now(); + let head_block_root = head_block.root; + let head_block_slot = head_block.slot; + + // Calculate the total delay between the start of the slot and when it was set as head. + let block_delay_total = get_slot_delay_ms(block_time_set_as_head, head_block_slot, slot_clock); + + // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to + // the cache during sync. + if block_delay_total < slot_clock.slot_duration() * 64 { + block_times_cache.set_time_set_as_head( + head_block_root, + head_block_slot, + block_time_set_as_head, + ); + } + + // If a block comes in from over 4 slots ago, it is most likely a block from sync. + let block_from_sync = block_delay_total > slot_clock.slot_duration() * 4; + + // Determine whether the block has been set as head too late for proper attestation + // production. + let late_head = block_delay_total >= slot_clock.unagg_attestation_production_delay(); + + // Do not store metrics if the block was > 4 slots old, this helps prevent noise during + // sync. + if !block_from_sync { + // Observe the total block delay. This is the delay between the time the slot started + // and when the block was set as head. + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, + block_delay_total, + ); + + // Observe the delay between when we imported the block and when we set the block as + // head. + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, + block_delays + .observed + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, + block_delays + .set_as_head + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + // If the block was enshrined as head too late for attestations to be created for it, + // log a debug warning and increment a metric. + if late_head { + metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); + debug!( + log, + "Delayed head block"; + "block_root" => ?head_block_root, + "proposer_index" => head_block_proposer_index, + "slot" => head_block_slot, + "block_delay" => ?block_delay_total, + "observed_delay" => ?block_delays.observed, + "imported_delay" => ?block_delays.imported, + "set_as_head_delay" => ?block_delays.set_as_head, + ); + } + } + + if let Some(event_handler) = event_handler { + if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { + let peer_info = block_times_cache.get_peer_info(head_block_root); + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + event_handler.register(EventKind::LateHead(SseLateHead { + slot: head_block_slot, + block: head_block_root, + peer_id: peer_info.id, + peer_client: peer_info.client, + proposer_index: head_block_proposer_index, + proposer_graffiti: head_block_graffiti, + block_delay: block_delay_total, + observed_delay: block_delays.observed, + imported_delay: block_delays.imported, + set_as_head_delay: block_delays.set_as_head, + })); + } + } +} diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index f589585f8a..62b584968f 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -4,6 +4,7 @@ use crate::{ }; use parking_lot::RwLock; use proto_array::Block as ProtoBlock; +use std::sync::Arc; use types::*; pub struct CacheItem { @@ -18,7 +19,7 @@ pub struct CacheItem { /* * Values used to make the block available. */ - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, } @@ -48,7 +49,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -146,7 +147,7 @@ impl EarlyAttesterCache { } /// Returns the block, if `block_root` matches the cached item. - pub fn get_block(&self, block_root: Hash256) -> Option> { + pub fn get_block(&self, block_root: Hash256) -> Option>> { self.item .read() .as_ref() diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 834823992a..d3337dfafe 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -45,8 +45,8 @@ pub enum BeaconChainError { UnableToReadSlot, UnableToComputeTimeAtSlot, RevertedFinalizedEpoch { - previous_epoch: Epoch, - new_epoch: Epoch, + old: Checkpoint, + new: Checkpoint, }, SlotClockDidNotStart, NoStateForSlot(Slot), @@ -161,6 +161,7 @@ pub enum BeaconChainError { BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), + HeadBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayload { finalized_root: Hash256, execution_block_hash: ExecutionBlockHash, @@ -184,11 +185,19 @@ pub enum BeaconChainError { beacon_block_root: Hash256, }, RuntimeShutdown, + TokioJoin(tokio::task::JoinError), ProcessInvalidExecutionPayload(JoinError), ForkChoiceSignalOutOfOrder { current: Slot, latest: Slot, }, + ForkchoiceUpdateParamsMissing, + HeadHasInvalidPayload { + block_root: Hash256, + execution_status: ExecutionStatus, + }, + AttestationHeadNotInForkChoice(Hash256), + MissingPersistedForkChoice, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -214,7 +223,6 @@ easy_from_to!(BlockReplayError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { - UnableToGetHeadInfo(BeaconChainError), UnableToGetBlockRootFromState, UnableToReadSlot, UnableToProduceAtSlot(Slot), @@ -239,6 +247,11 @@ pub enum BlockProductionError { MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ForkChoiceError(BeaconChainError), + ShuttingDown, + MissingSyncAggregate, + MissingExecutionPayload, + TokioJoin(tokio::task::JoinError), + BeaconChain(BeaconChainError), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 7085fc6500..747b8a468d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -21,8 +21,59 @@ use state_processing::per_block_processing::{ partially_verify_execution_payload, }; use std::sync::Arc; +use tokio::task::JoinHandle; use types::*; +pub type PreparePayloadResult = Result; +pub type PreparePayloadHandle = JoinHandle>>; + +/// Used to await the result of executing payload with a remote EE. +pub struct PayloadNotifier { + pub chain: Arc>, + pub block: Arc>, + payload_verification_status: Option, +} + +impl PayloadNotifier { + pub fn new( + chain: Arc>, + block: Arc>, + state: &BeaconState, + ) -> Result> { + let payload_verification_status = if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload( + state, + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + }; + + Ok(Self { + chain, + block, + payload_verification_status, + }) + } + + pub async fn notify_new_payload( + self, + ) -> Result> { + if let Some(precomputed_status) = self.payload_verification_status { + Ok(precomputed_status) + } else { + notify_new_payload(&self.chain, self.block.message()).await + } + } +} + /// Verify that `execution_payload` contained by `block` is considered valid by an execution /// engine. /// @@ -32,31 +83,20 @@ use types::*; /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -pub fn notify_new_payload( +async fn notify_new_payload<'a, T: BeaconChainTypes>( chain: &Arc>, - state: &BeaconState, - block: BeaconBlockRef, + block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result> { - if !is_execution_enabled(state, block.body()) { - return Ok(PayloadVerificationStatus::Irrelevant); - } - let execution_payload = block.execution_payload()?; - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution payload from junk. - partially_verify_execution_payload(state, execution_payload, &chain.spec) - .map_err(BlockError::PerBlockProcessingError)?; - let execution_layer = chain .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let new_payload_response = execution_layer.block_on(|execution_layer| { - execution_layer.notify_new_payload(&execution_payload.execution_payload) - }); + + let new_payload_response = execution_layer + .notify_new_payload(&execution_payload.execution_payload) + .await; match new_payload_response { Ok(status) => match status { @@ -70,13 +110,13 @@ pub fn notify_new_payload( // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. let latest_root = block.parent_root(); - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { + chain + .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { head_block_root: latest_root, always_invalidate_head: false, latest_valid_ancestor: latest_valid_hash, - }, - )?; + }) + .await?; Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } @@ -103,9 +143,9 @@ pub fn notify_new_payload( /// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub fn validate_merge_block( - chain: &BeaconChain, - block: BeaconBlockRef, +pub async fn validate_merge_block<'a, T: BeaconChainTypes>( + chain: &Arc>, + block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result<(), BlockError> { let spec = &chain.spec; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -137,9 +177,8 @@ pub fn validate_merge_block( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let is_valid_terminal_pow_block = execution_layer - .block_on(|execution_layer| { - execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) - }) + .is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) + .await .map_err(ExecutionPayloadError::from)?; match is_valid_terminal_pow_block { @@ -149,23 +188,7 @@ pub fn validate_merge_block( } .into()), None => { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; - - // Ensure the block is a candidate for optimistic import. - if chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? - { + if is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? { debug!( chain.log, "Optimistically accepting terminal block"; @@ -180,6 +203,36 @@ pub fn validate_merge_block( } } +/// Check to see if a block with the given parameters is valid to be imported optimistically. +pub async fn is_optimistic_candidate_block( + chain: &Arc>, + block_slot: Slot, + block_parent_root: Hash256, +) -> Result { + let current_slot = chain.slot()?; + let inner_chain = chain.clone(); + + // Use a blocking task to check if the block is an optimistic candidate. Interacting + // with the `fork_choice` lock in an async task can block the core executor. + chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_candidate_block( + current_slot, + block_slot, + &block_parent_root, + &inner_chain.spec, + ) + }, + "validate_merge_block_optimistic_candidate", + ) + .await? + .map_err(BeaconChainError::from) +} + /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( @@ -243,35 +296,52 @@ pub fn validate_execution_payload_for_gossip( /// Equivalent to the `get_execution_payload` function in the Validator Guide: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal -pub fn get_execution_payload>( - chain: &BeaconChain, +pub fn get_execution_payload< + T: BeaconChainTypes, + Payload: ExecPayload + Default + Send + 'static, +>( + chain: Arc>, state: &BeaconState, + finalized_checkpoint: Checkpoint, proposer_index: u64, pubkey: Option, -) -> Result { - Ok( - prepare_execution_payload_blocking::(chain, state, proposer_index, pubkey)? - .unwrap_or_default(), - ) -} +) -> Result, BlockProductionError> { + // Compute all required values from the `state` now to avoid needing to pass it into a spawned + // task. + let spec = &chain.spec; + let slot = state.slot(); + let current_epoch = state.current_epoch(); + let is_merge_transition_complete = is_merge_transition_complete(state); + let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let random = *state.get_randao_mix(current_epoch)?; + let latest_execution_payload_header_block_hash = + state.latest_execution_payload_header()?.block_hash; -/// Wraps the async `prepare_execution_payload` function as a blocking task. -pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, - state: &BeaconState, - proposer_index: u64, - pubkey: Option, -) -> Result, BlockProductionError> { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BlockProductionError::ExecutionLayerMissing)?; + // Spawn a task to obtain the execution payload from the EL via a series of async calls. The + // `join_handle` can be used to await the result of the function. + let join_handle = chain + .task_executor + .clone() + .spawn_handle( + async move { + prepare_execution_payload::( + &chain, + slot, + is_merge_transition_complete, + timestamp, + random, + finalized_checkpoint, + proposer_index, + pubkey, + latest_execution_payload_header_block_hash, + ) + .await + }, + "get_execution_payload", + ) + .ok_or(BlockProductionError::ShuttingDown)?; - execution_layer - .block_on_generic(|_| async { - prepare_execution_payload::(chain, state, proposer_index, pubkey).await - }) - .map_err(BlockProductionError::BlockingFailed)? + Ok(join_handle) } /// Prepares an execution payload for inclusion in a block. @@ -288,25 +358,38 @@ pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, - state: &BeaconState, +#[allow(clippy::too_many_arguments)] +pub async fn prepare_execution_payload( + chain: &Arc>, + slot: Slot, + is_merge_transition_complete: bool, + timestamp: u64, + random: Hash256, + finalized_checkpoint: Checkpoint, proposer_index: u64, pubkey: Option, -) -> Result, BlockProductionError> { + latest_execution_payload_header_block_hash: ExecutionBlockHash, +) -> Result +where + T: BeaconChainTypes, + Payload: ExecPayload + Default, +{ + let current_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; let execution_layer = chain .execution_layer .as_ref() .ok_or(BlockProductionError::ExecutionLayerMissing)?; - let parent_hash = if !is_merge_transition_complete(state) { + let parent_hash = if !is_merge_transition_complete { let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = - state.current_epoch() >= spec.terminal_block_hash_activation_epoch; + current_epoch >= spec.terminal_block_hash_activation_epoch; if is_terminal_block_hash_set && !is_activation_epoch_reached { - return Ok(None); + // Use the "empty" payload if there's a terminal block hash, but we haven't reached the + // terminal block epoch yet. + return Ok(<_>::default()); } let terminal_pow_block_hash = execution_layer @@ -317,36 +400,55 @@ pub async fn prepare_execution_payload::default()); } } else { - state.latest_execution_payload_header()?.block_hash + latest_execution_payload_header_block_hash }; - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; - let random = *state.get_randao_mix(state.current_epoch())?; - let finalized_root = state.finalized_checkpoint().root; + // Try to obtain the finalized proto block from fork choice. + // + // Use a blocking task to interact with the `fork_choice` lock otherwise we risk blocking the + // core `tokio` executor. + let inner_chain = chain.clone(); + let finalized_proto_block = chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .fork_choice_read_lock() + .get_block(&finalized_checkpoint.root) + }, + "prepare_execution_payload_finalized_hash", + ) + .await + .map_err(BlockProductionError::BeaconChain)?; // The finalized block hash is not included in the specification, however we provide this // parameter so that the execution layer can produce a payload id if one is not already known // (e.g., due to a recent reorg). - let finalized_block_hash = - if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) { - block.execution_status.block_hash() - } else { - chain - .store - .get_blinded_block(&finalized_root) - .map_err(BlockProductionError::FailedToReadFinalizedBlock)? - .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()) - }; + let finalized_block_hash = if let Some(block) = finalized_proto_block { + block.execution_status.block_hash() + } else { + chain + .store + .get_blinded_block(&finalized_checkpoint.root) + .map_err(BlockProductionError::FailedToReadFinalizedBlock)? + .ok_or(BlockProductionError::MissingFinalizedBlock( + finalized_checkpoint.root, + ))? + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash()) + }; // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. + // + // This future is not executed here, it's up to the caller to await it. let execution_payload = execution_layer .get_payload::( parent_hash, @@ -355,10 +457,10 @@ pub async fn prepare_execution_payload, Cold: It head_block_root: Hash256, head_state: &BeaconState, store: Arc>, + current_slot: Option, spec: &ChainSpec, ) -> Result, E>, String> { // Fetch finalized block. @@ -138,7 +139,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It })?; let finalized_snapshot = BeaconSnapshot { beacon_block_root: finalized_block_root, - beacon_block: finalized_block, + beacon_block: Arc::new(finalized_block), beacon_state: finalized_state, }; @@ -149,6 +150,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It finalized_block_root, &finalized_snapshot.beacon_block, &finalized_snapshot.beacon_state, + current_slot, + spec, ) .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; @@ -180,11 +183,10 @@ pub fn reset_fork_choice_to_finalization, Cold: It // This scenario is so rare that it seems OK to double-verify some blocks. let payload_verification_status = PayloadVerificationStatus::Optimistic; - let (block, _) = block.deconstruct(); fork_choice .on_block( block.slot(), - &block, + block.message(), block.canonical_root(), // Reward proposer boost. We are reinforcing the canonical chain. Duration::from_secs(0), diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1891362ebb..cc45a6bb9a 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -7,6 +7,7 @@ use state_processing::{ }; use std::borrow::Cow; use std::iter; +use std::sync::Arc; use std::time::Duration; use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; use types::{Hash256, SignedBlindedBeaconBlock, Slot}; @@ -58,7 +59,7 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: Vec>, + blocks: Vec>>, ) -> Result { let anchor_info = self .store diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 579020b1d1..b82b690d20 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,6 +9,7 @@ pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; +pub mod canonical_head; pub mod chain_config; mod early_attester_cache; mod errors; @@ -42,8 +43,8 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, HeadInfo, HeadSafetyStatus, ProduceBlockVerification, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; @@ -52,8 +53,10 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock}; +pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; +pub use fork_choice::ExecutionStatus; pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 18abbc8c5b..9cd177b340 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -51,9 +51,7 @@ async fn proposer_prep_service( executor.spawn( async move { if let Ok(current_slot) = inner_chain.slot() { - if let Err(e) = inner_chain - .prepare_beacon_proposer_async(current_slot) - .await + if let Err(e) = inner_chain.prepare_beacon_proposer(current_slot).await { error!( inner_chain.log, diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 8fb4f82bed..a48f1d3756 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -7,6 +7,7 @@ mod types; use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; +use crate::types::ChainSpec; use slog::{warn, Logger}; use std::path::Path; use std::sync::Arc; @@ -21,6 +22,7 @@ pub fn migrate_schema( from: SchemaVersion, to: SchemaVersion, log: Logger, + spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to iself is always OK, a no-op. @@ -28,8 +30,8 @@ pub fn migrate_schema( // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone())?; - migrate_schema::(db, datadir, next, to, log) + migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; + migrate_schema::(db, datadir, next, to, log, spec) } // @@ -89,6 +91,7 @@ pub fn migrate_schema( migration_schema_v7::update_with_reinitialized_fork_choice::( &mut persisted_fork_choice_v7, db.clone(), + spec, ) .map_err(StoreError::SchemaMigrationError)?; } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index 4cede798ea..9222266ba9 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -3,8 +3,7 @@ use crate::beacon_chain::BeaconChainTypes; use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7}; -use crate::types::{Checkpoint, Epoch, Hash256}; -use crate::types::{EthSpec, Slot}; +use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::ForkChoice; use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice}; @@ -25,6 +24,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); pub(crate) fn update_with_reinitialized_fork_choice( persisted_fork_choice: &mut PersistedForkChoiceV7, db: Arc>, + spec: &ChainSpec, ) -> Result<(), String> { let anchor_block_root = persisted_fork_choice .fork_choice_store @@ -39,7 +39,7 @@ pub(crate) fn update_with_reinitialized_fork_choice( .map_err(|e| format!("{:?}", e))? .ok_or_else(|| "Missing anchor beacon state".to_string())?; let snapshot = BeaconSnapshot { - beacon_block: anchor_block, + beacon_block: Arc::new(anchor_block), beacon_block_root: anchor_block_root, beacon_state: anchor_state, }; @@ -49,6 +49,10 @@ pub(crate) fn update_with_reinitialized_fork_choice( anchor_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + // Don't provide the current slot here, just use what's in the store. We don't need to know + // the head here, plus it's nice to avoid mutating fork choice during this process. + None, + spec, ) .map_err(|e| format!("{:?}", e))?; persisted_fork_choice.fork_choice = fork_choice.to_persisted(); diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 5a287daf0f..0bbd4419b9 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -47,6 +47,12 @@ impl ShufflingCache { } } +impl Default for ShufflingCache { + fn default() -> Self { + Self::new() + } +} + /// Contains the shuffling IDs for a beacon block. pub struct BlockShufflingIds { pub current: AttestationShufflingId, diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index d5b41366cc..40b73451cb 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -1,6 +1,7 @@ use crate::BeaconSnapshot; use itertools::process_results; use std::cmp; +use std::sync::Arc; use std::time::Duration; use types::{ beacon_state::CloneConfig, BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, @@ -33,7 +34,7 @@ impl From> for PreProcessingSnapshot { Self { pre_state: snapshot.beacon_state, beacon_state_root, - beacon_block: snapshot.beacon_block.into(), + beacon_block: snapshot.beacon_block.clone_as_blinded(), beacon_block_root: snapshot.beacon_block_root, } } @@ -63,7 +64,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block.into(), + beacon_block: self.beacon_block.clone_as_blinded(), beacon_block_root: self.beacon_block_root, pre_state: self.pre_state.unwrap_or(self.beacon_state), beacon_state_root, @@ -76,7 +77,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block.clone().into(), + beacon_block: self.beacon_block.clone_as_blinded(), beacon_block_root: self.beacon_block_root, pre_state: self .pre_state @@ -116,7 +117,7 @@ pub enum StateAdvance { /// The item stored in the `SnapshotCache`. pub struct CacheItem { - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, /// This state is equivalent to `self.beacon_block.state_root()`. beacon_state: BeaconState, @@ -185,7 +186,7 @@ impl SnapshotCache { ) { let parent_root = snapshot.beacon_block.message().parent_root(); let item = CacheItem { - beacon_block: snapshot.beacon_block, + beacon_block: snapshot.beacon_block.clone(), beacon_block_root: snapshot.beacon_block_root, beacon_state: snapshot.beacon_state, pre_state, @@ -384,7 +385,7 @@ mod test { fn get_snapshot(i: u64) -> BeaconSnapshot { let spec = MainnetEthSpec::default_spec(); - let beacon_state = get_harness().chain.head_beacon_state().unwrap(); + let beacon_state = get_harness().chain.head_beacon_state_cloned(); let signed_beacon_block = SignedBeaconBlock::from_block( BeaconBlock::empty(&spec), @@ -395,7 +396,7 @@ mod test { BeaconSnapshot { beacon_state, - beacon_block: signed_beacon_block, + beacon_block: Arc::new(signed_beacon_block), beacon_block_root: Hash256::from_low_u64_be(i), } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 030507a83a..5abec98877 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -213,16 +213,14 @@ async fn state_advance_timer( let log = log.clone(); let beacon_chain = beacon_chain.clone(); let next_slot = current_slot + 1; - executor.spawn_blocking( - move || { + executor.spawn( + async move { // Don't run fork choice during sync. - if beacon_chain.best_slot().map_or(true, |head_slot| { - head_slot + MAX_FORK_CHOICE_DISTANCE < current_slot - }) { + if beacon_chain.best_slot() + MAX_FORK_CHOICE_DISTANCE < current_slot { return; } - if let Err(e) = beacon_chain.fork_choice_at_slot(next_slot) { + if let Err(e) = beacon_chain.recompute_head_at_slot(next_slot).await { warn!( log, "Error updating fork choice for next slot"; @@ -231,17 +229,24 @@ async fn state_advance_timer( ); } - // Signal block proposal for the next slot (if it happens to be waiting). - if let Some(tx) = &beacon_chain.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(next_slot) { - warn!( - log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => next_slot, - ); - } - } + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + beacon_chain.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &beacon_chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(next_slot) { + warn!( + log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => next_slot, + ); + } + } + }, + "fork_choice_advance_signal_tx", + ); }, "fork_choice_advance", ); @@ -264,7 +269,7 @@ fn advance_head( // // Fork-choice is not run *before* this function to avoid unnecessary calls whilst syncing. { - let head_slot = beacon_chain.head_info()?.slot; + let head_slot = beacon_chain.best_slot(); // Don't run this when syncing or if lagging too far behind. if head_slot + MAX_ADVANCE_DISTANCE < current_slot { @@ -275,7 +280,7 @@ fn advance_head( } } - let head_root = beacon_chain.head_info()?.block_root; + let head_root = beacon_chain.head_beacon_block_root(); let (head_slot, head_state_root, mut state) = match beacon_chain .snapshot_cache diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 980de25cf3..62765c2222 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -515,13 +515,38 @@ where } pub fn get_current_state(&self) -> BeaconState { - self.chain.head().unwrap().beacon_state + self.chain.head_beacon_state_cloned() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { - let head = self.chain.head().unwrap(); + let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); - (head.beacon_state, state_root) + ( + head.beacon_state.clone_with_only_committee_caches(), + state_root, + ) + } + + pub fn head_slot(&self) -> Slot { + self.chain.canonical_head.cached_head().head_slot() + } + + pub fn head_block_root(&self) -> Hash256 { + self.chain.canonical_head.cached_head().head_block_root() + } + + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .finalized_checkpoint() + } + + pub fn justified_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .justified_checkpoint() } pub fn get_current_slot(&self) -> Slot { @@ -565,7 +590,7 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } - pub fn make_block( + pub async fn make_block( &self, mut state: BeaconState, slot: Slot, @@ -599,6 +624,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -613,7 +639,7 @@ where /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. - pub fn make_block_return_pre_state( + pub async fn make_block_return_pre_state( &self, mut state: BeaconState, slot: Slot, @@ -649,6 +675,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -1098,11 +1125,11 @@ where let mut attestation_2 = attestation_1.clone(); attestation_2.data.index += 1; + let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1156,11 +1183,11 @@ where attestation_2.data.index += 1; + let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1182,19 +1209,14 @@ where } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { - let mut block_header_1 = self - .chain - .head_beacon_block() - .unwrap() - .message() - .block_header(); + let mut block_header_1 = self.chain.head_beacon_block().message().block_header(); block_header_1.proposer_index = validator_index; let mut block_header_2 = block_header_1.clone(); block_header_2.state_root = Hash256::zero(); let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; let mut signed_block_headers = vec![block_header_1, block_header_2] @@ -1212,7 +1234,7 @@ where pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { @@ -1235,7 +1257,7 @@ where /// Create a new block, apply `block_modifier` to it, sign it and return it. /// /// The state returned is a pre-block state at the same slot as the produced block. - pub fn make_block_with_modifier( + pub async fn make_block_with_modifier( &self, state: BeaconState, slot: Slot, @@ -1244,7 +1266,7 @@ where assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); - let (block, state) = self.make_block_return_pre_state(state, slot); + let (block, state) = self.make_block_return_pre_state(state, slot).await; let (mut block, _) = block.deconstruct(); block_modifier(&mut block); @@ -1332,23 +1354,25 @@ where (deposits, state) } - pub fn process_block( + pub async fn process_block( &self, slot: Slot, block: SignedBeaconBlock, ) -> Result> { self.set_current_slot(slot); - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice()?; + let block_hash: SignedBeaconBlockHash = + self.chain.process_block(Arc::new(block)).await?.into(); + self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } - pub fn process_block_result( + pub async fn process_block_result( &self, block: SignedBeaconBlock, ) -> Result> { - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice().unwrap(); + let block_hash: SignedBeaconBlockHash = + self.chain.process_block(Arc::new(block)).await?.into(); + self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } @@ -1403,14 +1427,14 @@ where self.chain.slot_clock.set_slot(slot.into()); } - pub fn add_block_at_slot( + pub async fn add_block_at_slot( &self, slot: Slot, state: BeaconState, ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { self.set_current_slot(slot); - let (block, new_state) = self.make_block(state, slot); - let block_hash = self.process_block(slot, block.clone())?; + let (block, new_state) = self.make_block(state, slot).await; + let block_hash = self.process_block(slot, block.clone()).await?; Ok((block_hash, block, new_state)) } @@ -1427,19 +1451,19 @@ where self.process_attestations(attestations); } - pub fn add_attested_block_at_slot( + pub async fn add_attested_block_at_slot( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { - let (block_hash, block, state) = self.add_block_at_slot(slot, state)?; + let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block, validators); Ok((block_hash, state)) } - pub fn add_attested_blocks_at_slots( + pub async fn add_attested_blocks_at_slots( &self, state: BeaconState, state_root: Hash256, @@ -1448,9 +1472,10 @@ where ) -> AddBlocksResult { assert!(!slots.is_empty()); self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None) + .await } - fn add_attested_blocks_at_slots_given_lbh( + async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, state_root: Hash256, @@ -1467,6 +1492,7 @@ where for slot in slots { let (block_hash, new_state) = self .add_attested_block_at_slot(*slot, state, state_root, validators) + .await .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); @@ -1488,7 +1514,7 @@ where /// epoch at a time. /// /// Chains is a vec of `(state, slots, validators)` tuples. - pub fn add_blocks_on_multiple_chains( + pub async fn add_blocks_on_multiple_chains( &self, chains: Vec<(BeaconState, Vec, Vec)>, ) -> Vec> { @@ -1547,7 +1573,8 @@ where &epoch_slots, &validators, Some(head_block), - ); + ) + .await; block_hashes.extend(new_block_hashes); state_hashes.extend(new_state_hashes); @@ -1596,18 +1623,18 @@ where /// Deprecated: Use make_block() instead /// /// Returns a newly created block, signed by the proposer for the given slot. - pub fn build_block( + pub async fn build_block( &self, state: BeaconState, slot: Slot, _block_strategy: BlockStrategy, ) -> (SignedBeaconBlock, BeaconState) { - self.make_block(state, slot) + self.make_block(state, slot).await } /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. - pub fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1618,7 +1645,7 @@ where .checked_add(1) .unwrap(); - self.extend_slots(num_slots) + self.extend_slots(num_slots).await } /// Uses `Self::extend_chain` to `num_slots` blocks. @@ -1627,8 +1654,8 @@ where /// /// - BlockStrategy::OnCanonicalHead, /// - AttestationStrategy::AllValidators, - pub fn extend_slots(&self, num_slots: usize) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_slots(&self, num_slots: usize) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1637,6 +1664,7 @@ where BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) + .await } /// Deprecated: Use add_attested_blocks_at_slots() instead @@ -1650,7 +1678,7 @@ where /// /// The `attestation_strategy` dictates which validators will attest to the newly created /// blocks. - pub fn extend_chain( + pub async fn extend_chain( &self, num_blocks: usize, block_strategy: BlockStrategy, @@ -1685,8 +1713,9 @@ where AttestationStrategy::SomeValidators(vals) => vals, }; let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, last_produced_block_hash, _) = - self.add_attested_blocks_at_slots(state, state_root, &slots, &validators); + let (_, _, last_produced_block_hash, _) = self + .add_attested_blocks_at_slots(state, state_root, &slots, &validators) + .await; last_produced_block_hash.into() } @@ -1700,41 +1729,40 @@ where /// then built `faulty_fork_blocks`. /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. - pub fn generate_two_forks_by_skipping_a_block( + pub async fn generate_two_forks_by_skipping_a_block( &self, honest_validators: &[usize], faulty_validators: &[usize], honest_fork_blocks: usize, faulty_fork_blocks: usize, ) -> (Hash256, Hash256) { - let initial_head_slot = self - .chain - .head() - .expect("should get head") - .beacon_block - .slot(); + let initial_head_slot = self.chain.head_snapshot().beacon_block.slot(); // Move to the next slot so we may produce some more blocks on the head. self.advance_slot(); // Extend the chain with blocks where only honest validators agree. - let honest_head = self.extend_chain( - honest_fork_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(honest_validators.to_vec()), - ); + let honest_head = self + .extend_chain( + honest_fork_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(honest_validators.to_vec()), + ) + .await; // Go back to the last block where all agreed, and build blocks upon it where only faulty nodes // agree. - let faulty_head = self.extend_chain( - faulty_fork_blocks, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: initial_head_slot, - // `initial_head_slot + 2` means one slot is skipped. - first_slot: initial_head_slot + 2, - }, - AttestationStrategy::SomeValidators(faulty_validators.to_vec()), - ); + let faulty_head = self + .extend_chain( + faulty_fork_blocks, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: initial_head_slot, + // `initial_head_slot + 2` means one slot is skipped. + first_slot: initial_head_slot + 2, + }, + AttestationStrategy::SomeValidators(faulty_validators.to_vec()), + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index b1d1f71d6c..85e4f1f093 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -3,6 +3,7 @@ use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; +use std::sync::Arc; use tree_hash::TreeHash; use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; @@ -17,8 +18,8 @@ lazy_static! { /// attestation at each slot from genesis through to three epochs past the head. /// /// It checks the produced attestation against some locally computed values. -#[test] -fn produces_attestations() { +#[tokio::test] +async fn produces_attestations() { let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4; let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3; @@ -37,11 +38,13 @@ fn produces_attestations() { if slot > 0 && slot <= num_blocks_produced { harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } let slot = Slot::from(slot); @@ -129,10 +132,20 @@ fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let early_attestation = { - let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap(); + let proto_block = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root) + .unwrap(); chain .early_attester_cache - .add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec) + .add_head_block( + block_root, + Arc::new(block.clone()), + proto_block, + &state, + &chain.spec, + ) .unwrap(); chain .early_attester_cache @@ -151,8 +164,8 @@ fn produces_attestations() { /// Ensures that the early attester cache wont create an attestation to a block in a later slot than /// the one requested. -#[test] -fn early_attester_cache_old_request() { +#[tokio::test] +async fn early_attester_cache_old_request() { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .keypairs(KEYPAIRS[..].to_vec()) @@ -162,18 +175,20 @@ fn early_attester_cache_old_request() { harness.advance_slot(); - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!(head.beacon_block.slot(), 2); let head_proto_block = harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head.beacon_block_root) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 2fe8818a9a..6a9e604793 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -56,7 +56,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness( chain: &BeaconChain, ) -> (Attestation, usize, usize, SecretKey, SubnetId) { - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); let current_slot = chain.slot().expect("should get slot"); let mut valid_attestation = chain @@ -106,7 +106,8 @@ fn get_valid_aggregated_attestation( chain: &BeaconChain, aggregate: Attestation, ) -> (SignedAggregateAndProof, usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -155,7 +156,8 @@ fn get_non_aggregator( chain: &BeaconChain, aggregate: &Attestation, ) -> (usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -213,15 +215,17 @@ struct GossipTester { } impl GossipTester { - pub fn new() -> Self { + pub async fn new() -> Self { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -395,9 +399,10 @@ impl GossipTester { } } /// Tests verification of `SignedAggregateAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { GossipTester::new() + .await /* * The following two tests ensure: * @@ -511,8 +516,7 @@ fn aggregated_gossip_verification() { let committee_len = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_beacon_committee(tester.slot(), a.message.aggregate.data.index) .expect("should get committees") @@ -612,7 +616,7 @@ fn aggregated_gossip_verification() { tester.valid_aggregate.message.aggregate.clone(), None, &sk, - &chain.head_info().unwrap().fork, + &chain.canonical_head.cached_head().head_fork(), chain.genesis_validators_root, &chain.spec, ) @@ -669,9 +673,10 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for an unaggregated attestation on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { GossipTester::new() + .await /* * The following test ensures: * @@ -684,8 +689,7 @@ fn unaggregated_gossip_verification() { a.data.index = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_committee_count_at_slot(a.data.slot) .unwrap() @@ -924,16 +928,18 @@ fn unaggregated_gossip_verification() { /// Ensures that an attestation that skips epochs can still be processed. /// /// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache. -#[test] -fn attestation_that_skips_epochs() { +#[tokio::test] +async fn attestation_that_skips_epochs() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); let current_epoch = harness.chain.epoch().expect("should get epoch"); @@ -992,16 +998,18 @@ fn attestation_that_skips_epochs() { .expect("should gossip verify attestation that skips slots"); } -#[test] -fn attestation_to_finalized_block() { +#[tokio::test] +async fn attestation_to_finalized_block() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let finalized_checkpoint = harness .chain @@ -1067,16 +1075,18 @@ fn attestation_to_finalized_block() { .contains(earlier_block_root)); } -#[test] -fn verify_aggregate_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_aggregate_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -1124,16 +1134,18 @@ fn verify_aggregate_for_gossip_doppelganger_detection() { .expect("should check if gossip aggregator was observed")); } -#[test] -fn verify_attestation_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_attestation_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index ca65b05fd8..4b3e1e72fe 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -27,19 +27,18 @@ const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGT lazy_static! { /// A cached set of keys. static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); - - /// A cached set of valid blocks - static ref CHAIN_SEGMENT: Vec> = get_chain_segment(); } -fn get_chain_segment() -> Vec> { +async fn get_chain_segment() -> Vec> { let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - CHAIN_SEGMENT_LENGTH, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + CHAIN_SEGMENT_LENGTH, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness .chain @@ -50,11 +49,14 @@ fn get_chain_segment() -> Vec> { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, snapshot.beacon_block) + .make_full_block( + &snapshot.beacon_block_root, + snapshot.beacon_block.as_ref().clone(), + ) .unwrap(); BeaconSnapshot { beacon_block_root: snapshot.beacon_block_root, - beacon_block: full_block, + beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, } }) @@ -75,8 +77,8 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness Vec> { - CHAIN_SEGMENT +fn chain_segment_blocks(chain_segment: &[BeaconSnapshot]) -> Vec>> { + chain_segment .iter() .map(|snapshot| snapshot.beacon_block.clone()) .collect() @@ -110,13 +112,13 @@ fn update_proposal_signatures( .get(proposer_index) .expect("proposer keypair should be available"); - let (block, _) = snapshot.beacon_block.clone().deconstruct(); - snapshot.beacon_block = block.sign( + let (block, _) = snapshot.beacon_block.as_ref().clone().deconstruct(); + snapshot.beacon_block = Arc::new(block.sign( &keypair.sk, &state.fork(), state.genesis_validators_root(), spec, - ); + )); } } @@ -124,17 +126,18 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); if let Some(child) = snapshots.get_mut(i + 1) { - let (mut block, signature) = child.beacon_block.clone().deconstruct(); + let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; - child.beacon_block = SignedBeaconBlock::from_block(block, signature) + child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)) } } } -#[test] -fn chain_segment_full_segment() { +#[tokio::test] +async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -145,33 +148,36 @@ fn chain_segment_full_segment() { harness .chain .process_chain_segment(vec![]) + .await .into_block_error() .expect("should import empty chain segment"); harness .chain .process_chain_segment(blocks.clone()) + .await .into_block_error() .expect("should import chain segment"); - harness.chain.fork_choice().expect("should run fork choice"); + harness + .chain + .recompute_head_at_current_slot() + .await + .expect("should run fork choice"); assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } -#[test] -fn chain_segment_varying_chunk_size() { +#[tokio::test] +async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -182,36 +188,39 @@ fn chain_segment_varying_chunk_size() { harness .chain .process_chain_segment(chunk.to_vec()) + .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } - harness.chain.fork_choice().expect("should run fork choice"); + harness + .chain + .recompute_head_at_current_slot() + .await + .expect("should run fork choice"); assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } } -#[test] -fn chain_segment_non_linear_parent_roots() { +#[tokio::test] +async fn chain_segment_non_linear_parent_roots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; + harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test with a block removed. */ - let mut blocks = chain_segment_blocks(); + let mut blocks = chain_segment_blocks(&chain_segment); blocks.remove(2); assert!( @@ -219,6 +228,7 @@ fn chain_segment_non_linear_parent_roots() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -228,16 +238,17 @@ fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -245,28 +256,30 @@ fn chain_segment_non_linear_parent_roots() { ); } -#[test] -fn chain_segment_non_linear_slots() { +#[tokio::test] +async fn chain_segment_non_linear_slots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test where a child is lower than the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -277,16 +290,17 @@ fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -294,7 +308,8 @@ fn chain_segment_non_linear_slots() { ); } -fn assert_invalid_signature( +async fn assert_invalid_signature( + chain_segment: &[BeaconSnapshot], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], @@ -311,6 +326,7 @@ fn assert_invalid_signature( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -319,19 +335,20 @@ fn assert_invalid_signature( ); // Ensure the block will be rejected if imported on its own (without gossip checking). - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. - let _ = harness.chain.process_chain_segment(ancestor_blocks); + let _ = harness.chain.process_chain_segment(ancestor_blocks).await; assert!( matches!( harness .chain - .process_block(snapshots[block_index].beacon_block.clone()), + .process_block(snapshots[block_index].beacon_block.clone()) + .await, Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid {} signature", @@ -346,25 +363,34 @@ fn assert_invalid_signature( // slot) tuple. } -fn get_invalid_sigs_harness() -> BeaconChainHarness> { +async fn get_invalid_sigs_harness( + chain_segment: &[BeaconSnapshot], +) -> BeaconChainHarness> { let harness = get_harness(VALIDATOR_COUNT); harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); harness } -#[test] -fn invalid_signature_gossip_block() { +#[tokio::test] +async fn invalid_signature_gossip_block() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Ensure the block will be rejected if imported on its own (without gossip checking). - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); // Import all the ancestors before the `block_index` block. - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) @@ -372,13 +398,18 @@ fn invalid_signature_gossip_block() { harness .chain .process_chain_segment(ancestor_blocks) + .await .into_block_error() .expect("should import all blocks prior to the one being tested"); assert!( matches!( harness .chain - .process_block(SignedBeaconBlock::from_block(block, junk_signature())), + .process_block(Arc::new(SignedBeaconBlock::from_block( + block, + junk_signature() + ))) + .await, Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid gossip signature", @@ -386,14 +417,21 @@ fn invalid_signature_gossip_block() { } } -#[test] -fn invalid_signature_block_proposal() { +#[tokio::test] +async fn invalid_signature_block_proposal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); let blocks = snapshots .iter() .map(|snapshot| snapshot.beacon_block.clone()) @@ -404,6 +442,7 @@ fn invalid_signature_block_proposal() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -412,26 +451,37 @@ fn invalid_signature_block_proposal() { } } -#[test] -fn invalid_signature_randao_reveal() { +#[tokio::test] +async fn invalid_signature_randao_reveal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); *block.body_mut().randao_reveal_mut() = junk_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "randao"); + assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await; } } -#[test] -fn invalid_signature_proposer_slashing() { +#[tokio::test] +async fn invalid_signature_proposer_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); let proposer_slashing = ProposerSlashing { signed_header_1: SignedBeaconBlockHeader { message: block.block_header(), @@ -447,18 +497,27 @@ fn invalid_signature_proposer_slashing() { .proposer_slashings_mut() .push(proposer_slashing) .expect("should update proposer slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "proposer slashing", + ) + .await; } } -#[test] -fn invalid_signature_attester_slashing() { +#[tokio::test] +async fn invalid_signature_attester_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let indexed_attestation = IndexedAttestation { attesting_indices: vec![0].into(), data: AttestationData { @@ -480,33 +539,58 @@ fn invalid_signature_attester_slashing() { attestation_1: indexed_attestation.clone(), attestation_2: indexed_attestation, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .attester_slashings_mut() .push(attester_slashing) .expect("should update attester slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attester slashing", + ) + .await; } } -#[test] -fn invalid_signature_attestation() { +#[tokio::test] +async fn invalid_signature_attestation() { + let chain_segment = get_chain_segment().await; let mut checked_attestation = false; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); if let Some(attestation) = block.body_mut().attestations_mut().get_mut(0) { attestation.signature = junk_aggregate_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attestation"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attestation", + ) + .await; checked_attestation = true; } } @@ -517,12 +601,13 @@ fn invalid_signature_attestation() { ) } -#[test] -fn invalid_signature_deposit() { +#[tokio::test] +async fn invalid_signature_deposit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Note: an invalid deposit signature is permitted! - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let deposit = Deposit { proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), data: DepositData { @@ -532,13 +617,18 @@ fn invalid_signature_deposit() { signature: junk_signature().into(), }, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .deposits_mut() .push(deposit) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); let blocks = snapshots @@ -550,6 +640,7 @@ fn invalid_signature_deposit() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -558,13 +649,18 @@ fn invalid_signature_deposit() { } } -#[test] -fn invalid_signature_exit() { +#[tokio::test] +async fn invalid_signature_exit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let epoch = snapshots[block_index].beacon_state.current_epoch(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .voluntary_exits_mut() @@ -576,10 +672,18 @@ fn invalid_signature_exit() { signature: junk_signature(), }) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "voluntary exit", + ) + .await; } } @@ -590,27 +694,30 @@ fn unwrap_err(result: Result) -> E { } } -#[test] -fn block_gossip_verification() { +#[tokio::test] +async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT[block_index].beacon_block.slot().as_u64()); + .set_slot(chain_segment[block_index].beacon_block.slot().as_u64()); // Import the ancestors prior to the block we're testing. - for snapshot in &CHAIN_SEGMENT[0..block_index] { + for snapshot in &chain_segment[0..block_index] { let gossip_verified = harness .chain .verify_block_for_gossip(snapshot.beacon_block.clone()) + .await .expect("should obtain gossip verified block"); harness .chain .process_block(gossip_verified) + .await .expect("should import valid gossip verified block"); } @@ -624,15 +731,16 @@ fn block_gossip_verification() { * future blocks for processing at the appropriate slot). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_block_slot = block.slot() + 1; *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::FutureSlot { present_slot, block_slot, @@ -654,21 +762,19 @@ fn block_gossip_verification() { * nodes, etc). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_finalized_slot = harness - .chain - .head_info() - .expect("should get head info") - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -687,8 +793,9 @@ fn block_gossip_verification() { * proposer_index pubkey. */ - let block = CHAIN_SEGMENT[block_index] + let block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -697,10 +804,11 @@ fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip(SignedBeaconBlock::from_block( + .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( block, junk_signature() - )) + ))) + .await ), BlockError::ProposalSignatureInvalid ), @@ -715,15 +823,16 @@ fn block_gossip_verification() { * The block's parent (defined by block.parent_root) passes validation. */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let parent_root = Hash256::from_low_u64_be(42); *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -740,15 +849,16 @@ fn block_gossip_verification() { * store.finalized_checkpoint.root */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); - let parent_root = CHAIN_SEGMENT[0].beacon_block_root; + let parent_root = chain_segment[0].beacon_block_root; *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -766,8 +876,9 @@ fn block_gossip_verification() { * processing while proposers for the block's branch are calculated. */ - let mut block = CHAIN_SEGMENT[block_index] + let mut block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -779,13 +890,13 @@ fn block_gossip_verification() { *block.proposer_index_mut() = other_proposer; let block = block.sign( &generate_deterministic_keypair(other_proposer as usize).sk, - &harness.chain.head_info().unwrap().fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -797,7 +908,7 @@ fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::RepeatProposal { proposer, slot, @@ -807,9 +918,9 @@ fn block_gossip_verification() { "should register any valid signature against the proposer, even if the block failed later verification" ); - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( - harness.chain.verify_block_for_gossip(block).is_ok(), + harness.chain.verify_block_for_gossip(block).await.is_ok(), "the valid block should be processed" ); @@ -822,12 +933,13 @@ fn block_gossip_verification() { * signed_beacon_block.message.slot. */ - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( matches!( harness .chain .verify_block_for_gossip(block.clone()) + .await .err() .expect("should error when processing known block"), BlockError::RepeatProposal { @@ -840,8 +952,8 @@ fn block_gossip_verification() { ); } -#[test] -fn verify_block_for_gossip_slashing_detection() { +#[tokio::test] +async fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); let slasher = Arc::new( Slasher::open(SlasherConfig::new(slasher_dir.path().into()), test_logger()).unwrap(), @@ -858,12 +970,21 @@ fn verify_block_for_gossip_slashing_detection() { harness.advance_slot(); let state = harness.get_current_state(); - let (block1, _) = harness.make_block(state.clone(), Slot::new(1)); - let (block2, _) = harness.make_block(state, Slot::new(1)); + let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await; + let (block2, _) = harness.make_block(state, Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block1).unwrap(); - harness.chain.process_block(verified_block).unwrap(); - unwrap_err(harness.chain.verify_block_for_gossip(block2)); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block1)) + .await + .unwrap(); + harness.chain.process_block(verified_block).await.unwrap(); + unwrap_err( + harness + .chain + .verify_block_for_gossip(Arc::new(block2)) + .await, + ); // Slasher should have been handed the two conflicting blocks and crafted a slashing. slasher.process_queued(Epoch::new(0)).unwrap(); @@ -875,16 +996,20 @@ fn verify_block_for_gossip_slashing_detection() { slasher_dir.close().unwrap(); } -#[test] -fn verify_block_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_block_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - let (block, _) = harness.make_block(state.clone(), Slot::new(1)); + let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block).unwrap(); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block)) + .await + .unwrap(); let attestations = verified_block.block.message().body().attestations().clone(); - harness.chain.process_block(verified_block).unwrap(); + harness.chain.process_block(verified_block).await.unwrap(); for att in attestations.iter() { let epoch = att.data.target.epoch; @@ -921,8 +1046,8 @@ fn verify_block_for_gossip_doppelganger_detection() { } } -#[test] -fn add_base_block_to_altair_chain() { +#[tokio::test] +async fn add_base_block_to_altair_chain() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); @@ -940,11 +1065,13 @@ fn add_base_block_to_altair_chain() { harness.advance_slot(); // Build out all the blocks in epoch 0. - harness.extend_chain( - slots_per_epoch as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + slots_per_epoch as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -952,7 +1079,7 @@ fn add_base_block_to_altair_chain() { // Produce an Altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (altair_signed_block, _) = harness.make_block(state.clone(), slot); + let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await; let altair_block = &altair_signed_block .as_altair() .expect("test expects an altair block") @@ -1007,7 +1134,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(base_block.clone()) + .verify_block_for_gossip(Arc::new(base_block.clone())) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1020,7 +1148,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_block(base_block.clone()) + .process_block(Arc::new(base_block.clone())) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1031,7 +1160,10 @@ fn add_base_block_to_altair_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![base_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(base_block)]) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { @@ -1042,8 +1174,8 @@ fn add_base_block_to_altair_chain() { )); } -#[test] -fn add_altair_block_to_base_chain() { +#[tokio::test] +async fn add_altair_block_to_base_chain() { let mut spec = MainnetEthSpec::default_spec(); // Altair never happens. @@ -1060,11 +1192,13 @@ fn add_altair_block_to_base_chain() { harness.advance_slot(); // Build one block. - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -1072,7 +1206,7 @@ fn add_altair_block_to_base_chain() { // Produce an altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (base_signed_block, _) = harness.make_block(state.clone(), slot); + let (base_signed_block, _) = harness.make_block(state.clone(), slot).await; let base_block = &base_signed_block .as_base() .expect("test expects a base block") @@ -1128,7 +1262,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(altair_block.clone()) + .verify_block_for_gossip(Arc::new(altair_block.clone())) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1141,7 +1276,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_block(altair_block.clone()) + .process_block(Arc::new(altair_block.clone())) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1152,7 +1288,10 @@ fn add_altair_block_to_base_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![altair_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(altair_block)]) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index d67ed35f9c..91d5eb21ca 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -27,11 +27,11 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { } } -#[test] +#[tokio::test] // TODO(merge): This isn't working cause the non-zero values in `initialize_beacon_state_from_eth1` // are causing failed lookups to the execution node. I need to come back to this. #[should_panic] -fn merge_with_terminal_block_hash_override() { +async fn merge_with_terminal_block_hash_override() { let altair_fork_epoch = Epoch::new(0); let bellatrix_fork_epoch = Epoch::new(0); @@ -70,8 +70,7 @@ fn merge_with_terminal_block_hash_override() { assert!( harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_block .as_merge() .is_ok(), @@ -80,9 +79,9 @@ fn merge_with_terminal_block_hash_override() { let mut execution_payloads = vec![]; for i in 0..E::slots_per_epoch() * 3 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; let execution_payload = block.message().body().execution_payload().unwrap().clone(); if i == 0 { @@ -94,8 +93,8 @@ fn merge_with_terminal_block_hash_override() { verify_execution_payload_chain(execution_payloads.as_slice()); } -#[test] -fn base_altair_merge_with_terminal_block_after_fork() { +#[tokio::test] +async fn base_altair_merge_with_terminal_block_after_fork() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); let bellatrix_fork_epoch = Epoch::new(8); @@ -118,15 +117,15 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Start with the base fork. */ - assert!(harness.chain.head().unwrap().beacon_block.as_base().is_ok()); + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot); + harness.extend_to_slot(altair_fork_slot).await; - let altair_head = harness.chain.head().unwrap().beacon_block; + let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); assert_eq!(altair_head.slot(), altair_fork_slot); @@ -134,9 +133,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Do the merge fork, without a terminal PoW block. */ - harness.extend_to_slot(merge_fork_slot); + harness.extend_to_slot(merge_fork_slot).await; - let merge_head = harness.chain.head().unwrap().beacon_block; + let merge_head = &harness.chain.head_snapshot().beacon_block; assert!(merge_head.as_merge().is_ok()); assert_eq!(merge_head.slot(), merge_fork_slot); assert_eq!( @@ -148,9 +147,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Next merge block shouldn't include an exec payload. */ - harness.extend_slots(1); + harness.extend_slots(1).await; - let one_after_merge_head = harness.chain.head().unwrap().beacon_block; + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; assert_eq!( *one_after_merge_head .message() @@ -175,9 +174,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { */ for _ in 0..4 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); } diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index c9df6aa31d..535fe080a7 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -46,18 +46,20 @@ fn get_harness(store: Arc, validator_count: usize) -> TestHarness { harness } -#[test] -fn voluntary_exit() { +#[tokio::test] +async fn voluntary_exit() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), VALIDATOR_COUNT); let spec = &harness.chain.spec.clone(); - harness.extend_chain( - (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let validator_index1 = VALIDATOR_COUNT - 1; let validator_index2 = VALIDATOR_COUNT - 2; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2a48a4b691..e37ed286bc 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2,8 +2,8 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, @@ -12,6 +12,7 @@ use execution_layer::{ use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; +use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; use tree_hash::TreeHash; @@ -84,19 +85,19 @@ impl InvalidPayloadRig { fn execution_status(&self, block_root: Hash256) -> ExecutionStatus { self.harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block_root) .unwrap() .execution_status } - fn fork_choice(&self) { - self.harness.chain.fork_choice().unwrap(); - } - - fn head_info(&self) -> HeadInfo { - self.harness.chain.head_info().unwrap() + async fn recompute_head(&self) { + self.harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); } fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { @@ -142,22 +143,24 @@ impl InvalidPayloadRig { .block_hash } - fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { - (0..num_blocks) - .map(|_| self.import_block(is_valid.clone())) - .collect() + async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { + let mut roots = Vec::with_capacity(num_blocks as usize); + for _ in 0..num_blocks { + roots.push(self.import_block(is_valid.clone()).await); + } + roots } - fn move_to_first_justification(&mut self, is_valid: Payload) { + async fn move_to_first_justification(&mut self, is_valid: Payload) { let slots_till_justification = E::slots_per_epoch() * 3; - self.build_blocks(slots_till_justification, is_valid); + self.build_blocks(slots_till_justification, is_valid).await; - let justified_checkpoint = self.head_info().current_justified_checkpoint; + let justified_checkpoint = self.harness.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 2); } /// Import a block while setting the newPayload and forkchoiceUpdated responses to `is_valid`. - fn import_block(&mut self, is_valid: Payload) -> Hash256 { + async fn import_block(&mut self, is_valid: Payload) -> Hash256 { self.import_block_parametric(is_valid, is_valid, |error| { matches!( error, @@ -166,6 +169,7 @@ impl InvalidPayloadRig { ) ) }) + .await } fn block_root_at_slot(&self, slot: Slot) -> Option { @@ -178,13 +182,13 @@ impl InvalidPayloadRig { fn validate_manually(&self, block_root: Hash256) { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_valid_execution_payload(block_root) .unwrap(); } - fn import_block_parametric) -> bool>( + async fn import_block_parametric) -> bool>( &mut self, new_payload_response: Payload, forkchoice_response: Payload, @@ -192,10 +196,10 @@ impl InvalidPayloadRig { ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); - let head = self.harness.chain.head().unwrap(); - let state = head.beacon_state; + let head = self.harness.chain.head_snapshot(); + let state = head.beacon_state.clone_with_only_committee_caches(); let slot = state.slot() + 1; - let (block, post_state) = self.harness.make_block(state, slot); + let (block, post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); let set_new_payload = |payload: Payload| match payload { @@ -249,7 +253,11 @@ impl InvalidPayloadRig { } else { mock_execution_layer.server.full_payload_verification(); } - let root = self.harness.process_block(slot, block.clone()).unwrap(); + let root = self + .harness + .process_block(slot, block.clone()) + .await + .unwrap(); if self.enable_attestations { let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); @@ -294,7 +302,7 @@ impl InvalidPayloadRig { set_new_payload(new_payload_response); set_forkchoice_updated(forkchoice_response); - match self.harness.process_block(slot, block) { + match self.harness.process_block(slot, block).await { Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("evaluate_error returned false with {:?}", other) @@ -309,8 +317,12 @@ impl InvalidPayloadRig { } }; - let block_in_forkchoice = - self.harness.chain.fork_choice.read().get_block(&block_root); + let block_in_forkchoice = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root); if let Payload::Invalid { .. } = new_payload_response { // A block found to be immediately invalid should not end up in fork choice. assert_eq!(block_in_forkchoice, None); @@ -333,106 +345,111 @@ impl InvalidPayloadRig { block_root } - fn invalidate_manually(&self, block_root: Hash256) { + async fn invalidate_manually(&self, block_root: Hash256) { self.harness .chain .process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { block_root }) + .await .unwrap(); } } /// Simple test of the different import types. -#[test] -fn valid_invalid_syncing() { +#[tokio::test] +async fn valid_invalid_syncing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; rig.import_block(Payload::Invalid { latest_valid_hash: None, - }); - rig.import_block(Payload::Syncing); + }) + .await; + rig.import_block(Payload::Syncing).await; } /// Ensure that an invalid payload can invalidate its parent too (given the right /// `latest_valid_hash`. -#[test] -fn invalid_payload_invalidates_parent() { +#[tokio::test] +async fn invalid_payload_invalidates_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; let roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; let latest_valid_hash = rig.block_hash(roots[0]); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; assert!(rig.execution_status(roots[0]).is_valid_and_post_bellatrix()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); - assert_eq!(rig.head_info().block_root, roots[0]); + assert_eq!(rig.harness.head_block_root(), roots[0]); } /// Test invalidation of a payload via the fork choice updated message. /// /// The `invalid_payload` argument determines the type of invalid payload: `Invalid`, /// `InvalidBlockHash`, etc, taking the `latest_valid_hash` as an argument. -fn immediate_forkchoice_update_invalid_test( +async fn immediate_forkchoice_update_invalid_test( invalid_payload: impl FnOnce(Option) -> Payload, ) { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let valid_head_root = rig.import_block(Payload::Valid); + let valid_head_root = rig.import_block(Payload::Valid).await; let latest_valid_hash = Some(rig.block_hash(valid_head_root)); // Import a block which returns syncing when supplied via newPayload, and then // invalid when the forkchoice update is sent. rig.import_block_parametric(Payload::Syncing, invalid_payload(latest_valid_hash), |_| { false - }); + }) + .await; // The head should be the latest valid block. - assert_eq!(rig.head_info().block_root, valid_head_root); + assert_eq!(rig.harness.head_block_root(), valid_head_root); } -#[test] -fn immediate_forkchoice_update_payload_invalid() { +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid() { immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { latest_valid_hash, }) + .await } -#[test] -fn immediate_forkchoice_update_payload_invalid_block_hash() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_block_hash() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await } -#[test] -fn immediate_forkchoice_update_payload_invalid_terminal_block() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_terminal_block() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock).await } /// Ensure the client tries to exit when the justified checkpoint is invalidated. -#[test] -fn justified_checkpoint_becomes_invalid() { +#[tokio::test] +async fn justified_checkpoint_becomes_invalid() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let justified_checkpoint = rig.head_info().current_justified_checkpoint; + let justified_checkpoint = rig.harness.justified_checkpoint(); let parent_root_of_justified = rig .harness .chain @@ -456,7 +473,8 @@ fn justified_checkpoint_becomes_invalid() { // is invalid. BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) ) - }); + }) + .await; // The beacon chain should have triggered a shutdown. assert_eq!( @@ -468,18 +486,18 @@ fn justified_checkpoint_becomes_invalid() { } /// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. -#[test] -fn pre_finalized_latest_valid_hash() { +#[tokio::test] +async fn pre_finalized_latest_valid_hash() { let num_blocks = E::slots_per_epoch() * 4; let finalized_epoch = 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); let pre_finalized_block_hash = rig.block_hash(pre_finalized_block_root); @@ -490,10 +508,11 @@ fn pre_finalized_latest_valid_hash() { // Import a pre-finalized block. rig.import_block(Payload::Invalid { latest_valid_hash: Some(pre_finalized_block_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -514,16 +533,16 @@ fn pre_finalized_latest_valid_hash() { /// /// - Invalidate descendants of `latest_valid_root`. /// - Validate `latest_valid_root` and its ancestors. -#[test] -fn latest_valid_hash_will_validate() { +#[tokio::test] +async fn latest_valid_hash_will_validate() { const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(4, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(4, Payload::Syncing).await); let latest_valid_root = rig .block_root_at_slot(Slot::new(LATEST_VALID_SLOT)) @@ -532,9 +551,10 @@ fn latest_valid_hash_will_validate() { rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; - assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT); + assert_eq!(rig.harness.head_slot(), LATEST_VALID_SLOT); for slot in 0..=5 { let slot = Slot::new(slot); @@ -558,18 +578,18 @@ fn latest_valid_hash_will_validate() { } /// Check behaviour when the `latest_valid_hash` is a junk value. -#[test] -fn latest_valid_hash_is_junk() { +#[tokio::test] +async fn latest_valid_hash_is_junk() { let num_blocks = E::slots_per_epoch() * 5; let finalized_epoch = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); @@ -577,10 +597,11 @@ fn latest_valid_hash_is_junk() { let junk_hash = ExecutionBlockHash::repeat_byte(42); rig.import_block(Payload::Invalid { latest_valid_hash: Some(junk_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -598,19 +619,19 @@ fn latest_valid_hash_is_junk() { } /// Check that descendants of invalid blocks are also invalidated. -#[test] -fn invalidates_all_descendants() { +#[tokio::test] +async fn invalidates_all_descendants() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -621,9 +642,14 @@ fn invalidates_all_descendants() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block)) + .await + .unwrap(); + rig.recompute_head().await; // The latest valid hash will be set to the grandparent of the fork block. This means that the // parent of the fork block will become invalid. @@ -638,14 +664,15 @@ fn invalidates_all_descendants() { let latest_valid_hash = rig.block_hash(latest_valid_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The block before the fork should become the head. - assert_eq!(rig.head_info().block_root, latest_valid_root); + assert_eq!(rig.harness.head_block_root(), latest_valid_root); // The fork block should be invalidated, even though it's not an ancestor of the block that // triggered the INVALID response from the EL. @@ -677,19 +704,19 @@ fn invalidates_all_descendants() { } /// Check that the head will switch after the canonical branch is invalidated. -#[test] -fn switches_heads() { +#[tokio::test] +async fn switches_heads() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -700,23 +727,29 @@ fn switches_heads() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block)) + .await + .unwrap(); + rig.recompute_head().await; let latest_valid_slot = fork_parent_slot; let latest_valid_hash = rig.block_hash(fork_parent_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The fork block should become the head. - assert_eq!(rig.head_info().block_root, fork_block_root); + assert_eq!(rig.harness.head_block_root(), fork_block_root); // The fork block has not yet been validated. assert!(rig.execution_status(fork_block_root).is_optimistic()); @@ -746,17 +779,18 @@ fn switches_heads() { } } -#[test] -fn invalid_during_processing() { +#[tokio::test] +async fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); let roots = &[ - rig.import_block(Payload::Valid), + rig.import_block(Payload::Valid).await, rig.import_block(Payload::Invalid { latest_valid_hash: None, - }), - rig.import_block(Payload::Valid), + }) + .await, + rig.import_block(Payload::Valid).await, ]; // 0 should be present in the chain. @@ -772,20 +806,20 @@ fn invalid_during_processing() { None ); // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head_block_root = rig.harness.head_block_root(); + assert_eq!(head_block_root, roots[2]); } -#[test] -fn invalid_after_optimistic_sync() { +#[tokio::test] +async fn invalid_after_optimistic_sync() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. let mut roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; for root in &roots { @@ -793,29 +827,32 @@ fn invalid_after_optimistic_sync() { } // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[2]); - roots.push(rig.import_block(Payload::Invalid { - latest_valid_hash: Some(rig.block_hash(roots[1])), - })); + roots.push( + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(rig.block_hash(roots[1])), + }) + .await, + ); // Running fork choice is necessary since a block has been invalidated. - rig.fork_choice(); + rig.recompute_head().await; // 1 should be the head, since 2 was invalidated. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[1]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[1]); } -#[test] -fn manually_validate_child() { +#[tokio::test] +async fn manually_validate_child() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; assert!(rig.execution_status(parent).is_optimistic()); assert!(rig.execution_status(child).is_optimistic()); @@ -826,14 +863,14 @@ fn manually_validate_child() { assert!(rig.execution_status(child).is_valid_and_post_bellatrix()); } -#[test] -fn manually_validate_parent() { +#[tokio::test] +async fn manually_validate_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; assert!(rig.execution_status(parent).is_optimistic()); assert!(rig.execution_status(child).is_optimistic()); @@ -844,14 +881,14 @@ fn manually_validate_parent() { assert!(rig.execution_status(child).is_optimistic()); } -#[test] -fn payload_preparation() { +#[tokio::test] +async fn payload_preparation() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let current_slot = rig.harness.chain.slot().unwrap(); assert_eq!(head.beacon_state.slot(), 1); assert_eq!(current_slot, 1); @@ -865,18 +902,19 @@ fn payload_preparation() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(1), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(rig.harness.chain.slot().unwrap()) + .await .unwrap(); let payload_attributes = PayloadAttributes { @@ -896,15 +934,15 @@ fn payload_preparation() { assert_eq!(rig.previous_payload_attributes(), payload_attributes); } -#[test] -fn invalid_parent() { +#[tokio::test] +async fn invalid_parent() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. // Import a syncing block atop the transition block (we'll call this the "parent block" since we // build another block on it later). - let parent_root = rig.import_block(Payload::Syncing); + let parent_root = rig.import_block(Payload::Syncing).await; let parent_block = rig.harness.get_block(parent_root.into()).unwrap(); let parent_state = rig .harness @@ -914,34 +952,34 @@ fn invalid_parent() { // Produce another block atop the parent, but don't import yet. let slot = parent_block.slot() + 1; rig.harness.set_current_slot(slot); - let (block, state) = rig.harness.make_block(parent_state, slot); + let (block, state) = rig.harness.make_block(parent_state, slot).await; + let block = Arc::new(block); let block_root = block.canonical_root(); assert_eq!(block.parent_root(), parent_root); // Invalidate the parent block. - rig.invalidate_manually(parent_root); + rig.invalidate_manually(parent_root).await; assert!(rig.execution_status(parent_root).is_invalid()); // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.verify_block_for_gossip(block.clone()), + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.clone()), + rig.harness.chain.process_block(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload cannot be imported to fork choice. - let (block, _block_signature) = block.deconstruct(); assert!(matches!( - rig.harness.chain.fork_choice.write().on_block( + rig.harness.chain.canonical_head.fork_choice_write_lock().on_block( slot, - &block, + block.message(), block_root, Duration::from_secs(0), &state, @@ -960,21 +998,21 @@ fn invalid_parent() { } /// Tests to ensure that we will still send a proposer preparation -#[test] -fn payload_preparation_before_transition_block() { +#[tokio::test] +async fn payload_preparation_before_transition_block() { let rig = InvalidPayloadRig::new(); let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); - let head_info = rig.head_info(); - assert!( - !head_info.is_merge_transition_complete, - "the head block is pre-transition" - ); + let head = rig.harness.chain.head_snapshot(); assert_eq!( - head_info.execution_payload_block_hash, - Some(ExecutionBlockHash::zero()), - "the head block is post-bellatrix" + head.beacon_block + .message() + .body() + .execution_payload() + .unwrap() + .block_hash(), + ExecutionBlockHash::zero(), + "the head block is post-bellatrix but pre-transition" ); let current_slot = rig.harness.chain.slot().unwrap(); @@ -986,24 +1024,32 @@ fn payload_preparation_before_transition_block() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(0), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.move_to_terminal_block(); rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(current_slot) + .await .unwrap(); + let forkchoice_update_params = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_forkchoice_update_parameters(); rig.harness .chain - .update_execution_engine_forkchoice_blocking(current_slot) + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await .unwrap(); let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); @@ -1012,15 +1058,15 @@ fn payload_preparation_before_transition_block() { assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); } -#[test] -fn attesting_to_optimistic_head() { +#[tokio::test] +async fn attesting_to_optimistic_head() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let root = rig.import_block(Payload::Syncing); + let root = rig.import_block(Payload::Syncing).await; - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let slot = head.beacon_block.slot(); assert_eq!( head.beacon_block_root, root, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 771295c415..560e865a8f 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -72,18 +72,20 @@ fn get_harness( harness } -#[test] -fn full_participation_no_skips() { +#[tokio::test] +async fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store); @@ -91,8 +93,8 @@ fn full_participation_no_skips() { check_iterators(&harness); } -#[test] -fn randomised_skips() { +#[tokio::test] +async fn randomised_skips() { let num_slots = E::slots_per_epoch() * 5; let mut num_blocks_produced = 0; let db_path = tempdir().unwrap(); @@ -104,14 +106,16 @@ fn randomised_skips() { for slot in 1..=num_slots { if rng.gen_bool(0.8) { - harness.extend_chain( - 1, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(head_slot), - first_slot: Slot::new(slot), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(head_slot), + first_slot: Slot::new(slot), + }, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); num_blocks_produced += 1; head_slot = slot; @@ -120,7 +124,7 @@ fn randomised_skips() { } } - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -133,8 +137,8 @@ fn randomised_skips() { check_iterators(&harness); } -#[test] -fn long_skip() { +#[tokio::test] +async fn long_skip() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -148,11 +152,13 @@ fn long_skip() { // Having this set lower ensures that we start justifying and finalizing quickly after a skip. let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2; - harness.extend_chain( - initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks); @@ -162,14 +168,16 @@ fn long_skip() { } // 3. Produce more blocks, establish a new finalized epoch - harness.extend_chain( - final_blocks as usize, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + final_blocks as usize, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(initial_blocks), + first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + }, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks + skip_slots + final_blocks); check_split_slot(&harness, store); @@ -183,8 +191,8 @@ fn long_skip() { /// 1. The chunked vector scheme doesn't attempt to store an incorrect genesis value /// 2. We correctly load the genesis value for all required slots /// NOTE: this test takes about a minute to run -#[test] -fn randao_genesis_storage() { +#[tokio::test] +async fn randao_genesis_storage() { let validator_count = 8; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -195,24 +203,24 @@ fn randao_genesis_storage() { // Check we have a non-trivial genesis value let genesis_value = *harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .get_randao_mix(Epoch::new(0)) .expect("randao mix ok"); assert!(!genesis_value.is_zero()); - harness.extend_chain( - num_slots as usize - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_slots as usize - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Check that genesis value is still present assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -221,15 +229,16 @@ fn randao_genesis_storage() { // Then upon adding one more block, it isn't harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -243,8 +252,8 @@ fn randao_genesis_storage() { } // Check that closing and reopening a freezer DB restores the split slot to its correct value. -#[test] -fn split_slot_restore() { +#[tokio::test] +async fn split_slot_restore() { let db_path = tempdir().unwrap(); let split_slot = { @@ -253,11 +262,13 @@ fn split_slot_restore() { let num_blocks = 4 * E::slots_per_epoch(); - harness.extend_chain( - num_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; store.get_split_slot() }; @@ -272,8 +283,8 @@ fn split_slot_restore() { // Check attestation processing and `load_epoch_boundary_state` in the presence of a split DB. // This is a bit of a monster test in that it tests lots of different things, but until they're // tested elsewhere, this is as good a place as any. -#[test] -fn epoch_boundary_state_attestation_processing() { +#[tokio::test] +async fn epoch_boundary_state_attestation_processing() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -285,13 +296,15 @@ fn epoch_boundary_state_attestation_processing() { let mut late_attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(timely_validators.clone()), - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(timely_validators.clone()), + ) + .await; - let head = harness.chain.head().expect("head ok"); + let head = harness.chain.head_snapshot(); late_attestations.extend(harness.get_unaggregated_attestations( &AttestationStrategy::SomeValidators(late_validators.clone()), &head.beacon_state, @@ -328,12 +341,7 @@ fn epoch_boundary_state_attestation_processing() { assert_eq!(epoch_boundary_state, ebs_of_ebs); // If the attestation is pre-finalization it should be rejected. - let finalized_epoch = harness - .chain - .head_info() - .expect("should get head") - .finalized_checkpoint - .epoch; + let finalized_epoch = harness.finalized_checkpoint().epoch; let res = harness .chain @@ -364,8 +372,8 @@ fn epoch_boundary_state_attestation_processing() { } // Test that the `end_slot` for forwards block and state root iterators works correctly. -#[test] -fn forwards_iter_block_and_state_roots_until() { +#[tokio::test] +async fn forwards_iter_block_and_state_roots_until() { let num_blocks_produced = E::slots_per_epoch() * 17; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -373,13 +381,14 @@ fn forwards_iter_block_and_state_roots_until() { let all_validators = &harness.get_all_validators(); let (mut head_state, mut head_state_root) = harness.get_current_state_and_root(); - let head_block_root = harness.chain.head_info().unwrap().block_root; + let head_block_root = harness.head_block_root(); let mut block_roots = vec![head_block_root]; let mut state_roots = vec![head_state_root]; for slot in (1..=num_blocks_produced).map(Slot::from) { let (block_root, mut state) = harness .add_attested_block_at_slot(slot, head_state, head_state_root, all_validators) + .await .unwrap(); head_state_root = state.update_tree_hash_cache().unwrap(); head_state = state; @@ -429,19 +438,21 @@ fn forwards_iter_block_and_state_roots_until() { test_range(Slot::new(0), head_state.slot()); } -#[test] -fn block_replay_with_inaccurate_state_roots() { +#[tokio::test] +async fn block_replay_with_inaccurate_state_roots() { let num_blocks_produced = E::slots_per_epoch() * 3 + 31; let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let chain = &harness.chain; - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Slot must not be 0 mod 32 or else no blocks will be replayed. let (mut head_state, head_root) = harness.get_current_state_and_root(); @@ -471,8 +482,8 @@ fn block_replay_with_inaccurate_state_roots() { ); } -#[test] -fn block_replayer_hooks() { +#[tokio::test] +async fn block_replayer_hooks() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -487,12 +498,9 @@ fn block_replayer_hooks() { let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - let (_, _, end_block_root, mut end_state) = harness.add_attested_blocks_at_slots( - state.clone(), - state_root, - &block_slots, - &all_validators, - ); + let (_, _, end_block_root, mut end_state) = harness + .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) + .await; let blocks = store .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) @@ -548,8 +556,8 @@ fn block_replayer_hooks() { assert_eq!(end_state, replay_state); } -#[test] -fn delete_blocks_and_states() { +#[tokio::test] +async fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let validators_keypairs = @@ -567,7 +575,9 @@ fn delete_blocks_and_states() { let initial_slots: Vec = (1..=unforked_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; // Create a fork post-finalization. let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; @@ -587,20 +597,21 @@ fn delete_blocks_and_states() { let fork1_state = harness.get_current_state(); let fork2_state = fork1_state.clone(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, honest_validators), - (fork2_state, fork2_slots, faulty_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, honest_validators), + (fork2_state, fork2_slots, faulty_validators), + ]) + .await; let honest_head = results[0].2; let faulty_head = results[1].2; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let head_info = harness.chain.head_info().expect("should get head"); - assert_eq!(head_info.slot, unforked_blocks + fork_blocks); + assert_eq!(harness.head_slot(), unforked_blocks + fork_blocks); assert_eq!( - head_info.block_root, + harness.head_block_root(), honest_head.into(), "the honest chain should be the canonical chain", ); @@ -671,7 +682,7 @@ fn delete_blocks_and_states() { // Check that we never produce invalid blocks when there is deep forking that changes the shuffling. // See https://github.com/sigp/lighthouse/issues/845 -fn multi_epoch_fork_valid_blocks_test( +async fn multi_epoch_fork_valid_blocks_test( initial_blocks: usize, num_fork1_blocks_: usize, num_fork2_blocks_: usize, @@ -696,7 +707,9 @@ fn multi_epoch_fork_valid_blocks_test( let initial_slots: Vec = (1..=initial_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; } assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); @@ -714,10 +727,12 @@ fn multi_epoch_fork_valid_blocks_test( .map(Into::into) .collect(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, fork1_validators), - (fork2_state, fork2_slots, fork2_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, fork1_validators), + (fork2_state, fork2_slots, fork2_validators), + ]) + .await; let head1 = results[0].2; let head2 = results[1].2; @@ -726,43 +741,47 @@ fn multi_epoch_fork_valid_blocks_test( } // This is the minimal test of block production with different shufflings. -#[test] -fn block_production_different_shuffling_early() { +#[tokio::test] +async fn block_production_different_shuffling_early() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( slots_per_epoch - 2, slots_per_epoch + 3, slots_per_epoch + 3, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } -#[test] -fn block_production_different_shuffling_long() { +#[tokio::test] +async fn block_production_different_shuffling_long() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } // Check that the op pool safely includes multiple attestations per block when necessary. // This checks the correctness of the shuffling compatibility memoization. -#[test] -fn multiple_attestations_per_block() { +#[tokio::test] +async fn multiple_attestations_per_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store, HIGH_VALIDATOR_COUNT); - harness.extend_chain( - E::slots_per_epoch() as usize * 3, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize * 3, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let committees_per_slot = head .beacon_state .get_committee_count_at_slot(head.beacon_state.slot()) @@ -774,6 +793,8 @@ fn multiple_attestations_per_block() { assert_eq!( snapshot .beacon_block + .as_ref() + .clone() .deconstruct() .0 .body() @@ -784,18 +805,20 @@ fn multiple_attestations_per_block() { } } -#[test] -fn shuffling_compatible_linear_chain() { +#[tokio::test] +async fn shuffling_compatible_linear_chain() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - let head_block_root = harness.extend_chain( - 4 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 4 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -808,25 +831,29 @@ fn shuffling_compatible_linear_chain() { ); } -#[test] -fn shuffling_compatible_missing_pivot_block() { +#[tokio::test] +async fn shuffling_compatible_missing_pivot_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - harness.extend_chain( - E::slots_per_epoch() as usize - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize - 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); harness.advance_slot(); - let head_block_root = harness.extend_chain( - 2 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 2 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -839,15 +866,16 @@ fn shuffling_compatible_missing_pivot_block() { ); } -#[test] -fn shuffling_compatible_simple_fork() { +#[tokio::test] +async fn shuffling_compatible_simple_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -860,15 +888,16 @@ fn shuffling_compatible_simple_fork() { drop(db_path); } -#[test] -fn shuffling_compatible_short_fork() { +#[tokio::test] +async fn shuffling_compatible_short_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, slots_per_epoch + 2, slots_per_epoch + 2, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -973,8 +1002,8 @@ fn check_shuffling_compatible( } // Ensure blocks from abandoned forks are pruned from the Hot DB -#[test] -fn prunes_abandoned_fork_between_two_finalized_checkpoints() { +#[tokio::test] +async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -997,7 +1026,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { state_root, &canonical_chain_slots, &honest_validators, - ); + ) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -1005,12 +1035,14 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let (current_state, current_state_root) = rig.get_current_state_and_root(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - current_state, - current_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + current_state, + current_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known for &block_hash in stray_blocks.values() { @@ -1040,12 +1072,9 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postcondition: New blocks got finalized assert_eq!( @@ -1083,8 +1112,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { assert!(!rig.chain.knows_head(&stray_head)); } -#[test] -fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { +#[tokio::test] +async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1103,12 +1132,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { // Fill up 0th epoch let canonical_chain_slots_zeroth_epoch: Vec = (1..rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (_, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &canonical_chain_slots_zeroth_epoch, - &honest_validators, - ); + let (_, _, _, mut state) = rig + .add_attested_blocks_at_slots( + state, + state_root, + &canonical_chain_slots_zeroth_epoch, + &honest_validators, + ) + .await; // Fill up 1st epoch let canonical_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) @@ -1122,7 +1153,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { state_root, &canonical_chain_slots_first_epoch, &honest_validators, - ); + ) + .await; let canonical_chain_slot: u64 = rig.get_current_slot().into(); let stray_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 2 @@ -1130,12 +1162,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &stray_chain_slots_first_epoch, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &stray_chain_slots_first_epoch, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1169,12 +1203,9 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1213,8 +1244,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { assert!(get_blocks(&chain_dump).contains(&shared_head)); } -#[test] -fn pruning_does_not_touch_blocks_prior_to_finalization() { +#[tokio::test] +async fn pruning_does_not_touch_blocks_prior_to_finalization() { const HONEST_VALIDATOR_COUNT: usize = 16; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1232,12 +1263,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_chain_blocks, _, _, new_state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, new_state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -1246,12 +1274,14 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &first_epoch_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &first_epoch_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1279,8 +1309,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, _, _) = - rig.add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators); + let (_, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1308,8 +1339,8 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { assert!(rig.chain.knows_head(&stray_head)); } -#[test] -fn prunes_fork_growing_past_youngest_finalized_checkpoint() { +#[tokio::test] +async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1326,12 +1357,9 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; // Fill up 1st epoch. Contains a fork. let slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 1..rig.epoch_start_slot(2)) @@ -1344,9 +1372,11 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { state_root, &slots_first_epoch, &adversarial_validators, - ); - let (canonical_blocks_first_epoch, _, _, mut canonical_state) = - rig.add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators); + ) + .await; + let (canonical_blocks_first_epoch, _, _, mut canonical_state) = rig + .add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators) + .await; // Fill up 2nd epoch. Extends both the canonical chain and the fork. let stray_slots_second_epoch: Vec = (rig.epoch_start_slot(2) @@ -1360,7 +1390,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { stray_state_root, &stray_slots_second_epoch, &adversarial_validators, - ); + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known let stray_blocks: HashMap = stray_blocks_first_epoch @@ -1400,12 +1431,14 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1451,8 +1484,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn prunes_skipped_slots_states() { +#[tokio::test] +async fn prunes_skipped_slots_states() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1475,7 +1508,8 @@ fn prunes_skipped_slots_states() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = (rig.epoch_start_slot(1) + 1).into(); @@ -1483,12 +1517,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1526,12 +1562,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1575,8 +1613,8 @@ fn prunes_skipped_slots_states() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn finalizes_non_epoch_start_slot() { +#[tokio::test] +async fn finalizes_non_epoch_start_slot() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1599,7 +1637,8 @@ fn finalizes_non_epoch_start_slot() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = rig.epoch_start_slot(1).into(); @@ -1607,12 +1646,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1650,12 +1691,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1759,14 +1802,14 @@ fn check_no_blocks_exist<'a>( } } -#[test] -fn prune_single_block_fork() { +#[tokio::test] +async fn prune_single_block_fork() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1); + pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1).await; } -#[test] -fn prune_single_block_long_skip() { +#[tokio::test] +async fn prune_single_block_long_skip() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( 2 * slots_per_epoch, @@ -1774,11 +1817,12 @@ fn prune_single_block_long_skip() { 2 * slots_per_epoch, 2 * slots_per_epoch as u64, 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_mid_epoch() { +#[tokio::test] +async fn prune_shared_skip_states_mid_epoch() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( slots_per_epoch + slots_per_epoch / 2, @@ -1786,39 +1830,43 @@ fn prune_shared_skip_states_mid_epoch() { slots_per_epoch, 2, slots_per_epoch - 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_epoch_boundaries() { +#[tokio::test] +async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch); - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch); + pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; + pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch - 1, slots_per_epoch as u64, 1, 0, 2 * slots_per_epoch, - ); + ) + .await; } /// Generic harness for pruning tests. -fn pruning_test( +async fn pruning_test( // Number of blocks to start the chain with before forking. num_initial_blocks: u64, // Number of skip slots on the main chain after the initial blocks. @@ -1850,30 +1898,34 @@ fn pruning_test( let start_slot = Slot::new(1); let divergence_slot = start_slot + num_initial_blocks; let (state, state_root) = harness.get_current_state_and_root(); - let (_, _, _, divergence_state) = harness.add_attested_blocks_at_slots( - state, - state_root, - &slots(start_slot, num_initial_blocks)[..], - &honest_validators, - ); + let (_, _, _, divergence_state) = harness + .add_attested_blocks_at_slots( + state, + state_root, + &slots(start_slot, num_initial_blocks)[..], + &honest_validators, + ) + .await; - let mut chains = harness.add_blocks_on_multiple_chains(vec![ - // Canonical chain - ( - divergence_state.clone(), - slots( - divergence_slot + num_canonical_skips, - num_canonical_middle_blocks, + let mut chains = harness + .add_blocks_on_multiple_chains(vec![ + // Canonical chain + ( + divergence_state.clone(), + slots( + divergence_slot + num_canonical_skips, + num_canonical_middle_blocks, + ), + honest_validators.clone(), ), - honest_validators.clone(), - ), - // Fork chain - ( - divergence_state.clone(), - slots(divergence_slot + num_fork_skips, num_fork_blocks), - faulty_validators, - ), - ]); + // Fork chain + ( + divergence_state.clone(), + slots(divergence_slot + num_fork_skips, num_fork_blocks), + faulty_validators, + ), + ]) + .await; let (_, _, _, mut canonical_state) = chains.remove(0); let (stray_blocks, stray_states, _, stray_head_state) = chains.remove(0); @@ -1899,20 +1951,19 @@ fn pruning_test( let num_finalization_blocks = 4 * E::slots_per_epoch(); let canonical_slot = divergence_slot + num_canonical_skips + num_canonical_middle_blocks; let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - harness.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &slots(canonical_slot, num_finalization_blocks), - &honest_validators, - ); + harness + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &slots(canonical_slot, num_finalization_blocks), + &honest_validators, + ) + .await; // Check that finalization has advanced past the divergence slot. assert!( harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()) > divergence_slot @@ -1940,43 +1991,48 @@ fn garbage_collect_temp_states_from_failed_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let slots_per_epoch = E::slots_per_epoch(); - let genesis_state = harness.get_current_state(); - let block_slot = Slot::new(2 * slots_per_epoch); - let (signed_block, state) = harness.make_block(genesis_state, block_slot); + // Use a `block_on_dangerous` rather than an async test to stop spawned processes from holding + // a reference to the store. + harness.chain.task_executor.clone().block_on_dangerous( + async move { + let slots_per_epoch = E::slots_per_epoch(); - let (mut block, _) = signed_block.deconstruct(); + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; - // Mutate the block to make it invalid, and re-sign it. - *block.state_root_mut() = Hash256::repeat_byte(0xff); - let proposer_index = block.proposer_index() as usize; - let block = block.sign( - &harness.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &harness.spec, + let (mut block, _) = signed_block.deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + ); + + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness.process_block_result(block).await.unwrap_err(); + + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + }, + "test", ); - // The block should be rejected, but should store a bunch of temporary states. - harness.set_current_slot(block_slot); - harness.process_block_result(block).unwrap_err(); - - assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 - ); - - drop(harness); - drop(store); - // On startup, the store should garbage collect all the temporary states. let store = get_store(&db_path); assert_eq!(store.iter_temporary_state_roots().count(), 0); } -#[test] -fn weak_subjectivity_sync() { +#[tokio::test] +async fn weak_subjectivity_sync() { // Build an initial chain on one harness, representing a synced node with full history. let num_initial_blocks = E::slots_per_epoch() * 11; let num_final_blocks = E::slots_per_epoch() * 2; @@ -1985,17 +2041,19 @@ fn weak_subjectivity_sync() { let full_store = get_store(&temp1); let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let genesis_state = full_store .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) .unwrap() .unwrap(); - let wss_checkpoint = harness.chain.head_info().unwrap().finalized_checkpoint; + let wss_checkpoint = harness.finalized_checkpoint(); let wss_block = harness .chain .store @@ -2010,11 +2068,13 @@ fn weak_subjectivity_sync() { // Add more blocks that advance finalization further. harness.advance_slot(); - harness.extend_chain( - num_final_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); let log = test_logger(); @@ -2028,6 +2088,7 @@ fn weak_subjectivity_sync() { BeaconChainBuilder::new(MinimalEthSpec) .store(store.clone()) .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) .unwrap() .logger(log.clone()) @@ -2058,12 +2119,15 @@ fn weak_subjectivity_sync() { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, block.clone()) + .make_full_block(&snapshot.beacon_block_root, block.as_ref().clone()) .unwrap(); beacon_chain.slot_clock.set_slot(block.slot().as_u64()); - beacon_chain.process_block(full_block).unwrap(); - beacon_chain.fork_choice().unwrap(); + beacon_chain + .process_block(Arc::new(full_block)) + .await + .unwrap(); + beacon_chain.recompute_head_at_current_slot().await.unwrap(); // Check that the new block's state can be loaded correctly. let state_root = block.state_root(); @@ -2157,8 +2221,8 @@ fn weak_subjectivity_sync() { assert_eq!(store.get_anchor_slot(), None); } -#[test] -fn finalizes_after_resuming_from_db() { +#[tokio::test] +async fn finalizes_after_resuming_from_db() { let validator_count = 16; let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8; let first_half = num_blocks_produced / 2; @@ -2175,17 +2239,18 @@ fn finalizes_after_resuming_from_db() { harness.advance_slot(); - harness.extend_chain( - first_half as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + first_half as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!( harness .chain - .head() - .expect("should read head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2227,17 +2292,15 @@ fn finalizes_after_resuming_from_db() { .slot_clock .set_slot(latest_slot.as_u64() + 1); - resumed_harness.extend_chain( - (num_blocks_produced - first_half) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + resumed_harness + .extend_chain( + (num_blocks_produced - first_half) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &resumed_harness - .chain - .head() - .expect("should read head") - .beacon_state; + let state = &resumed_harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), num_blocks_produced, @@ -2260,8 +2323,8 @@ fn finalizes_after_resuming_from_db() { ); } -#[test] -fn revert_minority_fork_on_resume() { +#[tokio::test] +async fn revert_minority_fork_on_resume() { let validator_count = 16; let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); @@ -2317,17 +2380,17 @@ fn revert_minority_fork_on_resume() { harness1.process_attestations(attestations.clone()); harness2.process_attestations(attestations); - let (block, new_state) = harness1.make_block(state, slot); + let (block, new_state) = harness1.make_block(state, slot).await; - harness1.process_block(slot, block.clone()).unwrap(); - harness2.process_block(slot, block.clone()).unwrap(); + harness1.process_block(slot, block.clone()).await.unwrap(); + harness2.process_block(slot, block.clone()).await.unwrap(); state = new_state; block_root = block.canonical_root(); } - assert_eq!(harness1.chain.head_info().unwrap().slot, fork_slot - 1); - assert_eq!(harness2.chain.head_info().unwrap().slot, fork_slot - 1); + assert_eq!(harness1.head_slot(), fork_slot - 1); + assert_eq!(harness2.head_slot(), fork_slot - 1); // Fork the two chains. let mut state1 = state.clone(); @@ -2352,13 +2415,13 @@ fn revert_minority_fork_on_resume() { harness2.process_attestations(attestations); // Minority chain block (no attesters). - let (block1, new_state1) = harness1.make_block(state1, slot); - harness1.process_block(slot, block1).unwrap(); + let (block1, new_state1) = harness1.make_block(state1, slot).await; + harness1.process_block(slot, block1).await.unwrap(); state1 = new_state1; // Majority chain block (all attesters). - let (block2, new_state2) = harness2.make_block(state2, slot); - harness2.process_block(slot, block2.clone()).unwrap(); + let (block2, new_state2) = harness2.make_block(state2, slot).await; + harness2.process_block(slot, block2.clone()).await.unwrap(); state2 = new_state2; block_root = block2.canonical_root(); @@ -2367,8 +2430,8 @@ fn revert_minority_fork_on_resume() { } let end_slot = fork_slot + post_fork_blocks - 1; - assert_eq!(harness1.chain.head_info().unwrap().slot, end_slot); - assert_eq!(harness2.chain.head_info().unwrap().slot, end_slot); + assert_eq!(harness1.head_slot(), end_slot); + assert_eq!(harness2.head_slot(), end_slot); // Resume from disk with the hard-fork activated: this should revert the post-fork blocks. // We have to do some hackery with the `slot_clock` so that the correct slot is set when @@ -2396,24 +2459,35 @@ fn revert_minority_fork_on_resume() { .build(); // Head should now be just before the fork. - resumed_harness.chain.fork_choice().unwrap(); - let head = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head.slot, fork_slot - 1); + resumed_harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); + assert_eq!(resumed_harness.head_slot(), fork_slot - 1); // Head track should know the canonical head and the rogue head. assert_eq!(resumed_harness.chain.heads().len(), 2); - assert!(resumed_harness.chain.knows_head(&head.block_root.into())); + assert!(resumed_harness + .chain + .knows_head(&resumed_harness.head_block_root().into())); // Apply blocks from the majority chain and trigger finalization. let initial_split_slot = resumed_harness.chain.store.get_split_slot(); for block in &majority_blocks { - resumed_harness.process_block_result(block.clone()).unwrap(); + resumed_harness + .process_block_result(block.clone()) + .await + .unwrap(); // The canonical head should be the block from the majority chain. - resumed_harness.chain.fork_choice().unwrap(); - let head_info = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head_info.slot, block.slot()); - assert_eq!(head_info.block_root, block.canonical_root()); + resumed_harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); + assert_eq!(resumed_harness.head_slot(), block.slot()); + assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); } let advanced_split_slot = resumed_harness.chain.store.get_split_slot(); @@ -2432,10 +2506,22 @@ fn revert_minority_fork_on_resume() { fn assert_chains_pretty_much_the_same(a: &BeaconChain, b: &BeaconChain) { assert_eq!(a.spec, b.spec, "spec should be equal"); assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal"); + let a_head = a.head_snapshot(); + let b_head = b.head_snapshot(); assert_eq!( - a.head().unwrap(), - b.head().unwrap(), - "head() should be equal" + a_head.beacon_block_root, b_head.beacon_block_root, + "head block roots should be equal" + ); + assert_eq!( + a_head.beacon_block, b_head.beacon_block, + "head blocks should be equal" + ); + // Clone with committee caches only to prevent other caches from messing with the equality + // check. + assert_eq!( + a_head.beacon_state.clone_with_only_committee_caches(), + b_head.beacon_state.clone_with_only_committee_caches(), + "head states should be equal" ); assert_eq!(a.heads(), b.heads(), "heads() should be equal"); assert_eq!( @@ -2446,15 +2532,21 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b let slot = a.slot().unwrap(); let spec = T::EthSpec::default_spec(); assert!( - a.fork_choice.write().get_head(slot, &spec).unwrap() - == b.fork_choice.write().get_head(slot, &spec).unwrap(), + a.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap() + == b.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap(), "fork_choice heads should be equal" ); } /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -2465,7 +2557,7 @@ fn check_slot(harness: &TestHarness, expected_slot: u64) { /// Check that the chain has finalized under best-case assumptions, and check the head slot. fn check_finalization(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; check_slot(harness, expected_slot); @@ -2487,8 +2579,7 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L assert_eq!( harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2575,10 +2666,7 @@ fn check_iterators(harness: &TestHarness) { max_slot = Some(slot); } // Assert that we reached the head. - assert_eq!( - max_slot, - Some(harness.chain.head_info().expect("should get head").slot) - ); + assert_eq!(max_slot, Some(harness.head_slot())); // Assert that the block root iterator reaches the head. assert_eq!( harness @@ -2588,7 +2676,7 @@ fn check_iterators(harness: &TestHarness) { .last() .map(Result::unwrap) .map(|(_, slot)| slot), - Some(harness.chain.head_info().expect("should get head").slot) + Some(harness.head_slot()) ); } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 626c132d69..1e51b0ffb9 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -46,15 +46,8 @@ fn get_valid_sync_committee_message( slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_state = harness.chain.head_beacon_state_cloned(); + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let (signature, _) = harness .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) @@ -77,16 +70,9 @@ fn get_valid_sync_contribution( harness: &BeaconChainHarness>, relative_sync_committee: RelativeSyncCommittee, ) -> (SignedContributionAndProof, usize, SecretKey) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); + let head_state = harness.chain.head_beacon_state_cloned(); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let sync_contributions = harness.make_sync_contributions( &head_state, head_block_root, @@ -116,7 +102,7 @@ fn get_non_aggregator( harness: &BeaconChainHarness>, slot: Slot, ) -> (usize, SecretKey) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; let sync_subcommittee_size = E::sync_committee_size() .safe_div(SYNC_COMMITTEE_SUBNET_COUNT as usize) .expect("should determine sync subcommittee size"); @@ -162,17 +148,19 @@ fn get_non_aggregator( } /// Tests verification of `SignedContributionAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -406,7 +394,7 @@ fn aggregated_gossip_verification() { valid_aggregate.message.contribution.clone(), None, &non_aggregator_sk, - &harness.chain.head_info().expect("should get head info").fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ) @@ -474,6 +462,7 @@ fn aggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync contribution using the current sync committee @@ -488,17 +477,19 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for sync committee messages on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -648,6 +639,7 @@ fn unaggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync message using the current sync committee diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 7b17937a21..f98580db3f 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,14 +6,16 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - StateSkipConfig, WhenSlotSkipped, + BeaconChain, StateSkipConfig, WhenSlotSkipped, }; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError, }; -use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; +use types::{ + BeaconState, BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot, +}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -40,7 +42,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness = harness .chain @@ -122,7 +126,7 @@ fn iterators() { ) }); - let head = &harness.chain.head().expect("should get head"); + let head = harness.chain.head_snapshot(); assert_eq!( *block_roots.last().expect("should have some block roots"), @@ -137,20 +141,44 @@ fn iterators() { ); } -#[test] -fn find_reorgs() { +fn find_reorg_slot( + chain: &BeaconChain>, + new_state: &BeaconState, + new_block_root: Hash256, +) -> Slot { + let (old_state, old_block_root) = { + let head = chain.canonical_head.cached_head(); + let old_state = head.snapshot.beacon_state.clone(); + let old_block_root = head.head_block_root(); + (old_state, old_block_root) + }; + beacon_chain::canonical_head::find_reorg_slot( + &old_state, + old_block_root, + new_state, + new_block_root, + &chain.spec, + ) + .unwrap() +} + +#[tokio::test] +async fn find_reorgs() { let num_blocks_produced = MinimalEthSpec::slots_per_historical_root() + 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - // No need to produce attestations for this test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + // No need to produce attestations for this test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head_state = harness.chain.head_beacon_state().unwrap(); + let head = harness.chain.head_snapshot(); + let head_state = &head.beacon_state; let head_slot = head_state.slot(); let genesis_state = harness .chain @@ -160,10 +188,11 @@ fn find_reorgs() { // because genesis is more than `SLOTS_PER_HISTORICAL_ROOT` away, this should return with the // finalized slot. assert_eq!( - harness - .chain - .find_reorg_slot(&genesis_state, harness.chain.genesis_block_root) - .unwrap(), + find_reorg_slot( + &harness.chain, + &genesis_state, + harness.chain.genesis_block_root + ), head_state .finalized_checkpoint() .epoch @@ -172,13 +201,11 @@ fn find_reorgs() { // test head assert_eq!( - harness - .chain - .find_reorg_slot( - &head_state, - harness.chain.head_beacon_block().unwrap().canonical_root() - ) - .unwrap(), + find_reorg_slot( + &harness.chain, + &head_state, + harness.chain.head_beacon_block().canonical_root() + ), head_slot ); @@ -194,16 +221,13 @@ fn find_reorgs() { .unwrap() .unwrap(); assert_eq!( - harness - .chain - .find_reorg_slot(&prev_state, prev_block_root) - .unwrap(), + find_reorg_slot(&harness.chain, &prev_state, prev_block_root), prev_slot ); } -#[test] -fn chooses_fork() { +#[tokio::test] +async fn chooses_fork() { let harness = get_harness(VALIDATOR_COUNT); let two_thirds = (VALIDATOR_COUNT / 3) * 2; @@ -217,22 +241,27 @@ fn chooses_fork() { let faulty_fork_blocks = delay + 2; // Build an initial chain where all validators agree. - harness.extend_chain( - initial_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + initial_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( - &honest_validators, - &faulty_validators, - honest_fork_blocks, - faulty_fork_blocks, - ); + let (honest_head, faulty_head) = harness + .generate_two_forks_by_skipping_a_block( + &honest_validators, + &faulty_validators, + honest_fork_blocks, + faulty_fork_blocks, + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -241,29 +270,28 @@ fn chooses_fork() { ); assert_eq!( - harness - .chain - .head() - .expect("should get head") - .beacon_block_root, + harness.chain.head_snapshot().beacon_block_root, honest_head, "the honest chain should be the canonical chain" ); } -#[test] -fn finalizes_with_full_participation() { +#[tokio::test] +async fn finalizes_with_full_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -287,8 +315,8 @@ fn finalizes_with_full_participation() { ); } -#[test] -fn finalizes_with_two_thirds_participation() { +#[tokio::test] +async fn finalizes_with_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -296,13 +324,16 @@ fn finalizes_with_two_thirds_participation() { let two_thirds = (VALIDATOR_COUNT / 3) * 2; let attesters = (0..two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -331,8 +362,8 @@ fn finalizes_with_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_with_less_than_two_thirds_participation() { +#[tokio::test] +async fn does_not_finalize_with_less_than_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -341,13 +372,16 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { let less_than_two_thirds = two_thirds - 1; let attesters = (0..less_than_two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -371,19 +405,22 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_without_attestation() { +#[tokio::test] +async fn does_not_finalize_without_attestation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -407,18 +444,20 @@ fn does_not_finalize_without_attestation() { ); } -#[test] -fn roundtrip_operation_pool() { +#[tokio::test] +async fn roundtrip_operation_pool() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); // Add some attestations - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness.chain.op_pool.num_attestations() > 0); // TODO: could add some other operations @@ -439,20 +478,23 @@ fn roundtrip_operation_pool() { assert_eq!(harness.chain.op_pool, restored_op_pool); } -#[test] -fn unaggregated_attestations_added_to_fork_choice_some_none() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_some_none() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); @@ -493,8 +535,8 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() { } } -#[test] -fn attestations_with_increasing_slots() { +#[tokio::test] +async fn attestations_with_increasing_slots() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -502,14 +544,16 @@ fn attestations_with_increasing_slots() { let mut attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - // Don't produce & include any attestations (we'll collect them later). - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + // Don't produce & include any attestations (we'll collect them later). + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let head_state_root = head.beacon_state_root(); attestations.extend(harness.get_unaggregated_attestations( @@ -548,20 +592,23 @@ fn attestations_with_increasing_slots() { } } -#[test] -fn unaggregated_attestations_added_to_fork_choice_all_updated() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_all_updated() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); @@ -605,7 +652,7 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() { } } -fn run_skip_slot_test(skip_slots: u64) { +async fn run_skip_slot_test(skip_slots: u64) { let num_validators = 8; let harness_a = get_harness(num_validators); let harness_b = get_harness(num_validators); @@ -615,83 +662,60 @@ fn run_skip_slot_test(skip_slots: u64) { harness_b.advance_slot(); } - harness_a.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - // No attestation required for test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness_a + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; assert_eq!( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_a.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(0) ); assert_eq!( harness_b .chain - .process_block( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .clone(), - ) + .process_block(harness_a.chain.head_snapshot().beacon_block.clone()) + .await .unwrap(), - harness_a - .chain - .head() - .expect("should get head") - .beacon_block_root + harness_a.chain.head_snapshot().beacon_block_root ); harness_b .chain - .fork_choice() + .recompute_head_at_current_slot() + .await .expect("should run fork choice"); assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); } -#[test] -fn produces_and_processes_with_genesis_skip_slots() { +#[tokio::test] +async fn produces_and_processes_with_genesis_skip_slots() { for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { - run_skip_slot_test(i) + run_skip_slot_test(i).await } } -#[test] -fn block_roots_skip_slot_behaviour() { +#[tokio::test] +async fn block_roots_skip_slot_behaviour() { let harness = get_harness(VALIDATOR_COUNT); // Test should be longer than the block roots to ensure a DB lookup is triggered. let chain_length = harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .block_roots() .len() as u64 @@ -708,11 +732,13 @@ fn block_roots_skip_slot_behaviour() { let slot = harness.chain.slot().unwrap().as_u64(); if !skipped_slots.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } } @@ -820,7 +846,7 @@ fn block_roots_skip_slot_behaviour() { let future_slot = harness.chain.slot().unwrap() + 1; assert_eq!( - harness.chain.head().unwrap().beacon_block.slot(), + harness.chain.head_snapshot().beacon_block.slot(), future_slot - 2, "test precondition" ); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 95ba1b5657..b7f06183f1 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -684,26 +684,20 @@ where if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { // Only send a head update *after* genesis. if let Ok(current_slot) = beacon_chain.slot() { - let head = beacon_chain - .head_info() - .map_err(|e| format!("Unable to read beacon chain head: {:?}", e))?; - - // Issue the head to the execution engine on startup. This ensures it can start - // syncing. - if head - .execution_payload_block_hash - .map_or(false, |h| h != ExecutionBlockHash::zero()) + let params = beacon_chain + .canonical_head + .cached_head() + .forkchoice_update_parameters(); + if params + .head_hash + .map_or(false, |hash| hash != ExecutionBlockHash::zero()) { - // Spawn a new task using the "async" fork choice update method, rather than - // using the "blocking" method. - // - // Using the blocking method may cause a panic if this code is run inside an - // async context. + // Spawn a new task to update the EE without waiting for it to complete. let inner_chain = beacon_chain.clone(); runtime_context.executor.spawn( async move { let result = inner_chain - .update_execution_engine_forkchoice_async(current_slot) + .update_execution_engine_forkchoice(current_slot, params) .await; // No need to exit early if setting the head fails. It will be set again if/when the @@ -811,8 +805,16 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); + let inner_spec = spec.clone(); let schema_upgrade = |db, from, to| { - migrate_schema::>(db, datadir, from, to, log) + migrate_schema::>( + db, + datadir, + from, + to, + log, + &inner_spec, + ) }; let store = HotColdDB::open( diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 22c3bfcb3a..9476819a4b 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,5 @@ use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainTypes, HeadSafetyStatus}; +use beacon_chain::{BeaconChain, BeaconChainTypes, ExecutionStatus}; use lighthouse_network::{types::SyncState, NetworkGlobals}; use parking_lot::Mutex; use slog::{crit, debug, error, info, warn, Logger}; @@ -100,15 +100,10 @@ pub fn spawn_notifier( current_sync_state = sync_state; } - let head_info = match beacon_chain.head_info() { - Ok(head_info) => head_info, - Err(e) => { - error!(log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e)); - break; - } - }; - - let head_slot = head_info.slot; + let cached_head = beacon_chain.canonical_head.cached_head(); + let head_slot = cached_head.head_slot(); + let head_root = cached_head.head_block_root(); + let finalized_checkpoint = cached_head.finalized_checkpoint(); metrics::set_gauge(&metrics::NOTIFIER_HEAD_SLOT, head_slot.as_u64() as i64); @@ -125,9 +120,6 @@ pub fn spawn_notifier( }; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let finalized_epoch = head_info.finalized_checkpoint.epoch; - let finalized_root = head_info.finalized_checkpoint.root; - let head_root = head_info.block_root; // The default is for regular sync but this gets modified if backfill sync is in // progress. @@ -177,8 +169,8 @@ pub fn spawn_notifier( log, "Slot timer"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_block" => format!("{}", head_root), "head_slot" => head_slot, "current_slot" => current_slot, @@ -264,35 +256,29 @@ pub fn spawn_notifier( head_root.to_string() }; - let block_hash = match beacon_chain.head_safety_status() { - Ok(HeadSafetyStatus::Safe(hash_opt)) => hash_opt - .map(|hash| format!("{} (verified)", hash)) - .unwrap_or_else(|| "n/a".to_string()), - Ok(HeadSafetyStatus::Unsafe(block_hash)) => { + let block_hash = match beacon_chain.canonical_head.head_execution_status() { + Ok(ExecutionStatus::Irrelevant(_)) => "n/a".to_string(), + Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash), + Ok(ExecutionStatus::Optimistic(hash)) => { warn!( log, - "Head execution payload is unverified"; - "execution_block_hash" => ?block_hash, + "Head is optimistic"; + "info" => "chain not fully verified, \ + block and attestation production disabled until execution engine syncs", + "execution_block_hash" => ?hash, ); - format!("{} (unverified)", block_hash) + format!("{} (unverified)", hash) } - Ok(HeadSafetyStatus::Invalid(block_hash)) => { + Ok(ExecutionStatus::Invalid(hash)) => { crit!( log, "Head execution payload is invalid"; "msg" => "this scenario may be unrecoverable", - "execution_block_hash" => ?block_hash, + "execution_block_hash" => ?hash, ); - format!("{} (invalid)", block_hash) - } - Err(e) => { - error!( - log, - "Failed to read head safety status"; - "error" => ?e - ); - "n/a".to_string() + format!("{} (invalid)", hash) } + Err(_) => "unknown".to_string(), }; info!( @@ -300,8 +286,8 @@ pub fn spawn_notifier( "Synced"; "peers" => peer_count_pretty(connected_peer_count), "exec_hash" => block_hash, - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "epoch" => current_epoch, "block" => block_info, "slot" => current_slot, @@ -312,8 +298,8 @@ pub fn spawn_notifier( log, "Searching for peers"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_slot" => head_slot, "current_slot" => current_slot, ); @@ -332,57 +318,52 @@ pub fn spawn_notifier( fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); - if let Ok(head_info) = beacon_chain.head_info() { - // Perform some logging about the eth1 chain - if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { - // No need to do logging if using the dummy backend. - if eth1_chain.is_dummy_backend() { - return; - } - - if let Some(status) = - eth1_chain.sync_status(head_info.genesis_time, current_slot_opt, &beacon_chain.spec) - { - debug!( - log, - "Eth1 cache sync status"; - "eth1_head_block" => status.head_block_number, - "latest_cached_block_number" => status.latest_cached_block_number, - "latest_cached_timestamp" => status.latest_cached_block_timestamp, - "voting_target_timestamp" => status.voting_target_timestamp, - "ready" => status.lighthouse_is_cached_and_ready - ); - - if !status.lighthouse_is_cached_and_ready { - let voting_target_timestamp = status.voting_target_timestamp; - - let distance = status - .latest_cached_block_timestamp - .map(|latest| { - voting_target_timestamp.saturating_sub(latest) - / beacon_chain.spec.seconds_per_eth1_block - }) - .map(|distance| distance.to_string()) - .unwrap_or_else(|| "initializing deposits".to_string()); - - warn!( - log, - "Syncing eth1 block cache"; - "est_blocks_remaining" => distance, - ); - } - } else { - error!( - log, - "Unable to determine eth1 sync status"; - ); - } + // Perform some logging about the eth1 chain + if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { + // No need to do logging if using the dummy backend. + if eth1_chain.is_dummy_backend() { + return; + } + + if let Some(status) = eth1_chain.sync_status( + beacon_chain.genesis_time, + current_slot_opt, + &beacon_chain.spec, + ) { + debug!( + log, + "Eth1 cache sync status"; + "eth1_head_block" => status.head_block_number, + "latest_cached_block_number" => status.latest_cached_block_number, + "latest_cached_timestamp" => status.latest_cached_block_timestamp, + "voting_target_timestamp" => status.voting_target_timestamp, + "ready" => status.lighthouse_is_cached_and_ready + ); + + if !status.lighthouse_is_cached_and_ready { + let voting_target_timestamp = status.voting_target_timestamp; + + let distance = status + .latest_cached_block_timestamp + .map(|latest| { + voting_target_timestamp.saturating_sub(latest) + / beacon_chain.spec.seconds_per_eth1_block + }) + .map(|distance| distance.to_string()) + .unwrap_or_else(|| "initializing deposits".to_string()); + + warn!( + log, + "Syncing eth1 block cache"; + "est_blocks_remaining" => distance, + ); + } + } else { + error!( + log, + "Unable to determine eth1 sync status"; + ); } - } else { - error!( - log, - "Unable to get head info"; - ); } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 156382c481..61f1c569d4 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -274,31 +274,6 @@ impl ExecutionLayer { self.inner.execution_engine_forkchoice_lock.lock().await } - /// Convenience function to allow calling async functions in a non-async context. - pub fn block_on<'a, F, U, V>(&'a self, generate_future: F) -> Result - where - F: Fn(&'a Self) -> U, - U: Future>, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - runtime.block_on(generate_future(self)) - } - - /// Convenience function to allow calling async functions in a non-async context. - /// - /// The function is "generic" since it does not enforce a particular return type on - /// `generate_future`. - pub fn block_on_generic<'a, F, U, V>(&'a self, generate_future: F) -> Result - where - F: Fn(&'a Self) -> U, - U: Future, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - Ok(runtime.block_on(generate_future(self))) - } - /// Convenience function to allow spawning a task without waiting for the result. pub fn spawn(&self, generate_future: F, name: &'static str) where @@ -431,18 +406,6 @@ impl ExecutionLayer { self.engines().is_synced().await } - /// Updates the proposer preparation data provided by validators - pub fn update_proposer_preparation_blocking( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) -> Result<(), Error> { - self.block_on_generic(|_| async move { - self.update_proposer_preparation(update_epoch, preparation_data) - .await - }) - } - /// Updates the proposer preparation data provided by validators pub async fn update_proposer_preparation( &self, diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 9207067e33..35a35bcb74 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -58,12 +58,10 @@ fn cached_attestation_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let head = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_block_root = chain.canonical_head.cached_head().head_block_root(); - let (duties, dependent_root) = chain - .validator_attestation_duties(request_indices, request_epoch, head.block_root) + let (duties, dependent_root, _execution_status) = chain + .validator_attestation_duties(request_indices, request_epoch, head_block_root) .map_err(warp_utils::reject::beacon_chain_error)?; convert_to_api_response(duties, request_indices, dependent_root, chain) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 727215bfca..73f50985bd 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,6 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; use std::str::FromStr; +use std::sync::Arc; use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given @@ -23,19 +24,18 @@ impl BlockId { chain: &BeaconChain, ) -> Result { match &self.0 { - CoreBlockId::Head => chain - .head_info() - .map(|head| head.block_root) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.canonical_head.cached_head().head_block_root()), CoreBlockId::Genesis => Ok(chain.genesis_block_root), - CoreBlockId::Finalized => chain - .head_info() - .map(|head| head.finalized_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), - CoreBlockId::Justified => chain - .head_info() - .map(|head| head.current_justified_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Finalized => Ok(chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .root), + CoreBlockId::Justified => Ok(chain + .canonical_head + .cached_head() + .justified_checkpoint() + .root), CoreBlockId::Slot(slot) => chain .block_root_at_slot(*slot, WhenSlotSkipped::None) .map_err(warp_utils::reject::beacon_chain_error) @@ -57,10 +57,7 @@ impl BlockId { chain: &BeaconChain, ) -> Result>, warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map(Into::into) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.head_beacon_block().clone_as_blinded()), CoreBlockId::Slot(slot) => { let root = self.root(chain)?; chain @@ -103,11 +100,9 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result, warp::Rejection> { + ) -> Result>, warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.head_beacon_block()), CoreBlockId::Slot(slot) => { let root = self.root(chain)?; chain @@ -122,7 +117,7 @@ impl BlockId { slot ))); } - Ok(block) + Ok(Arc::new(block)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -136,8 +131,8 @@ impl BlockId { .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { + .and_then(|block_opt| { + block_opt.map(Arc::new).ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", root diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 014db8a602..645c19c40e 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -22,7 +22,7 @@ pub fn info( pub fn historical_blocks( chain: Arc>, - blocks: Vec>, + blocks: Vec>>, ) -> Result { chain .import_historical_block_batch(blocks) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 06dc968764..ff4d46efcb 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -23,7 +23,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, validator_monitor::{get_block_delay_ms, timestamp_now}, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - HeadSafetyStatus, ProduceBlockVerification, WhenSlotSkipped, + ProduceBlockVerification, WhenSlotSkipped, }; use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; @@ -369,9 +369,7 @@ pub fn serve( chain: Arc>| async move { match *network_globals.sync_state.read() { SyncState::SyncingFinalized { .. } => { - let head_slot = chain - .best_slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { @@ -404,35 +402,6 @@ pub fn serve( ) .untuple_one(); - // Create a `warp` filter that rejects requests unless the head has been verified by the - // execution layer. - let only_with_safe_head = warp::any() - .and(chain_filter.clone()) - .and_then(move |chain: Arc>| async move { - let status = chain.head_safety_status().map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to read head safety status: {:?}", - e - )) - })?; - match status { - HeadSafetyStatus::Safe(_) => Ok(()), - HeadSafetyStatus::Unsafe(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "optimistic head hash {:?} has not been verified by the execution layer", - hash - ))) - } - HeadSafetyStatus::Invalid(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "the head block has an invalid payload {:?}, this may be unrecoverable", - hash - ))) - } - } - }) - .untuple_one(); - // Create a `warp` filter that provides access to the logger. let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); @@ -451,15 +420,12 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|head| api_types::GenesisData { - genesis_time: head.genesis_time, - genesis_validators_root: head.genesis_validators_root, - genesis_fork_version: chain.spec.genesis_fork_version, - }) - .map(api_types::GenericResponse::from) + let genesis_data = api_types::GenesisData { + genesis_time: chain.genesis_time, + genesis_validators_root: chain.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }; + Ok(api_types::GenericResponse::from(genesis_data)) }) }); @@ -835,10 +801,10 @@ pub fn serve( blocking_json_task(move || { let (root, block) = match (query.slot, query.parent_root) { // No query parameters, return the canonical head block. - (None, None) => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|block| (block.canonical_root(), block.into()))?, + (None, None) => { + let block = chain.head_beacon_block(); + (block.canonical_root(), block.clone_as_blinded()) + } // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; @@ -945,93 +911,85 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: SignedBeaconBlock, + |block: Arc>, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| { - blocking_json_task(move || { - let seen_timestamp = timestamp_now(); + log: Logger| async move { + let seen_timestamp = timestamp_now(); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(block.clone())), - )?; + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message(&network_tx, PubsubMessage::BeaconBlock(block.clone()))?; - // Determine the delay after the start of the slot, register it with metrics. - let delay = - get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration( - &metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, - delay, - ); + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); - match chain.process_block(block.clone()) { - Ok(root) => { - info!( + match chain.process_block(block.clone()).await { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "block_delay" => ?delay, + "root" => format!("{}", root), + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), + ); + + // Notify the validator monitor. + chain.validator_monitor.read().register_api_block( + seen_timestamp, + block.message(), + root, + &chain.slot_clock, + ); + + // Update the head since it's likely this block will become the new + // head. + chain + .recompute_head_at_current_slot() + .await + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let error_threshold = crit_threshold / 2; + if delay >= crit_threshold { + crit!( log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "delay_ms" => delay.as_millis(), "slot" => block.slot(), - ); - - // Notify the validator monitor. - chain.validator_monitor.read().register_api_block( - seen_timestamp, - block.message(), - root, - &chain.slot_clock, - ); - - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; - - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= error_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } - - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); + "root" => ?root, + ) + } else if delay >= error_threshold { error!( log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) } + + Ok(warp::reply::json(&())) } - }) + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } }, ); @@ -1049,99 +1007,90 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: SignedBeaconBlock>, + |block: Arc>>, chain: Arc>, network_tx: UnboundedSender>, - _log: Logger| { - blocking_json_task(move || { - if let Some(el) = chain.execution_layer.as_ref() { - //FIXME(sean): we may not always receive the payload in this response because it - // should be the relay's job to propogate the block. However, since this block is - // already signed and sent this might be ok (so long as the relay validates - // the block before revealing the payload). + _log: Logger| async move { + if let Some(el) = chain.execution_layer.as_ref() { + //FIXME(sean): we may not always receive the payload in this response because it + // should be the relay's job to propogate the block. However, since this block is + // already signed and sent this might be ok (so long as the relay validates + // the block before revealing the payload). - //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should - // be able to support the normal block proposal flow, because at some point full block endpoints - // will be deprecated from the beacon API. This will entail creating full blocks in - // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded - // blocks. We will access the payload of those blocks here. This flow should happen if the - // execution layer has no payload builders or if we have not yet finalized post-merge transition. - let payload = el - .block_on(|el| el.propose_blinded_beacon_block(&block)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "proposal failed: {:?}", - e - )) - })?; - let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { - message: BeaconBlockMerge { - slot: block.message().slot(), - proposer_index: block.message().proposer_index(), - parent_root: block.message().parent_root(), - state_root: block.message().state_root(), - body: BeaconBlockBodyMerge { - randao_reveal: block.message().body().randao_reveal().clone(), - eth1_data: block.message().body().eth1_data().clone(), - graffiti: *block.message().body().graffiti(), - proposer_slashings: block - .message() - .body() - .proposer_slashings() - .clone(), - attester_slashings: block - .message() - .body() - .attester_slashings() - .clone(), - attestations: block.message().body().attestations().clone(), - deposits: block.message().body().deposits().clone(), - voluntary_exits: block - .message() - .body() - .voluntary_exits() - .clone(), - sync_aggregate: block - .message() - .body() - .sync_aggregate() - .unwrap() - .clone(), - execution_payload: payload.into(), - }, + //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should + // be able to support the normal block proposal flow, because at some point full block endpoints + // will be deprecated from the beacon API. This will entail creating full blocks in + // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded + // blocks. We will access the payload of those blocks here. This flow should happen if the + // execution layer has no payload builders or if we have not yet finalized post-merge transition. + let payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { + warp_utils::reject::custom_server_error(format!("proposal failed: {:?}", e)) + })?; + let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { + message: BeaconBlockMerge { + slot: block.message().slot(), + proposer_index: block.message().proposer_index(), + parent_root: block.message().parent_root(), + state_root: block.message().state_root(), + body: BeaconBlockBodyMerge { + randao_reveal: block.message().body().randao_reveal().clone(), + eth1_data: block.message().body().eth1_data().clone(), + graffiti: *block.message().body().graffiti(), + proposer_slashings: block + .message() + .body() + .proposer_slashings() + .clone(), + attester_slashings: block + .message() + .body() + .attester_slashings() + .clone(), + attestations: block.message().body().attestations().clone(), + deposits: block.message().body().deposits().clone(), + voluntary_exits: block.message().body().voluntary_exits().clone(), + sync_aggregate: block + .message() + .body() + .sync_aggregate() + .unwrap() + .clone(), + execution_payload: payload.into(), }, - signature: block.signature().clone(), - }); + }, + signature: block.signature().clone(), + }); + let new_block = Arc::new(new_block); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(new_block.clone())), - )?; + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message( + &network_tx, + PubsubMessage::BeaconBlock(new_block.clone()), + )?; - match chain.process_block(new_block) { - Ok(_) => { - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; + match chain.process_block(new_block).await { + Ok(_) => { + // Update the head since it's likely this block will become the new + // head. + chain + .recompute_head_at_current_slot() + .await + .map_err(warp_utils::reject::beacon_chain_error)?; - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); - - Err(warp_utils::reject::broadcast_without_import(msg)) - } + Ok(warp::reply::json(&())) + } + Err(e) => { + let msg = format!("{:?}", e); + + Err(warp_utils::reject::broadcast_without_import(msg)) } - } else { - Err(warp_utils::reject::custom_server_error( - "no execution layer found".to_string(), - )) } - }) + } else { + Err(warp_utils::reject::custom_server_error( + "no execution layer found".to_string(), + )) + } }, ); @@ -1401,9 +1350,7 @@ pub fn serve( )), )?; - chain - .import_attester_slashing(slashing) - .map_err(warp_utils::reject::beacon_chain_error)?; + chain.import_attester_slashing(slashing); } Ok(()) @@ -1744,10 +1691,7 @@ pub fn serve( .and_then( |network_globals: Arc>, chain: Arc>| { blocking_json_task(move || { - let head_slot = chain - .head_info() - .map(|info| info.slot) - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to read slot clock".into()) })?; @@ -1993,48 +1937,49 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); @@ -2055,48 +2000,48 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); @@ -2107,7 +2052,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { @@ -2140,7 +2084,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { @@ -2217,7 +2160,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head) .and(chain_filter.clone()) .and_then( |sync_committee_data: SyncContributionData, chain: Arc>| { @@ -2413,40 +2355,39 @@ pub fn serve( .and_then( |chain: Arc>, log: Logger, - preparation_data: Vec| { - blocking_json_task(move || { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + preparation_data: Vec| async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; - debug!( - log, - "Received proposer preparation data"; - "count" => preparation_data.len(), - ); + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - execution_layer - .update_proposer_preparation_blocking(current_epoch, &preparation_data) - .map_err(|_e| { - warp_utils::reject::custom_bad_request( - "error processing proposer preparations".to_string(), - ) - })?; + debug!( + log, + "Received proposer preparation data"; + "count" => preparation_data.len(), + ); - chain.prepare_beacon_proposer_blocking().map_err(|e| { + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; + + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { warp_utils::reject::custom_bad_request(format!( "error updating proposer preparations: {:?}", e )) })?; - Ok(()) - }) + Ok::<_, warp::reject::Rejection>(warp::reply::json(&())) }, ); @@ -2461,69 +2402,66 @@ pub fn serve( .and_then( |chain: Arc>, log: Logger, - register_val_data: Vec| { - blocking_json_task(move || { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::beacon_chain_error)? - .epoch(T::EthSpec::slots_per_epoch()); + register_val_data: Vec| async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - debug!( - log, - "Received register validator request"; - "count" => register_val_data.len(), - ); + debug!( + log, + "Received register validator request"; + "count" => register_val_data.len(), + ); - let preparation_data = register_val_data - .iter() - .filter_map(|register_data| { - chain - .validator_index(®ister_data.message.pubkey) - .ok() - .flatten() - .map(|validator_index| ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data.message.fee_recipient, - }) - }) - .collect::>(); + let preparation_data = register_val_data + .iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .map(|validator_index| ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data.message.fee_recipient, + }) + }) + .collect::>(); - debug!( - log, - "Resolved validator request pubkeys"; - "count" => preparation_data.len() - ); + debug!( + log, + "Resolved validator request pubkeys"; + "count" => preparation_data.len() + ); - // Update the prepare beacon proposer cache based on this request. - execution_layer - .update_proposer_preparation_blocking(current_epoch, &preparation_data) - .map_err(|_e| { - warp_utils::reject::custom_bad_request( - "error processing proposer preparations".to_string(), - ) - })?; + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; - // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blined block - // flow failing. - chain.prepare_beacon_proposer_blocking().map_err(|e| { + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blined block + // flow failing. + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { warp_utils::reject::custom_bad_request(format!( "error updating proposer preparations: {:?}", e )) })?; - //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder + //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder - Ok(()) - }) + Ok::<_, warp::Rejection>(warp::reply::json(&())) }, ); // POST validator/sync_committee_subscriptions @@ -2689,7 +2627,11 @@ pub fn serve( .and_then(|chain: Arc>| { blocking_task(move || { Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( - chain.fork_choice.read().proto_array().core_proto_array(), + chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .core_proto_array(), ))) }) }); @@ -2732,9 +2674,6 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - let head_info = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; let current_slot_opt = chain.slot().ok(); chain @@ -2746,7 +2685,7 @@ pub fn serve( ) }) .and_then(|eth1| { - eth1.sync_status(head_info.genesis_time, current_slot_opt, &chain.spec) + eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) .ok_or_else(|| { warp_utils::reject::custom_server_error( "Unable to determine Eth1 sync status".to_string(), @@ -2869,7 +2808,7 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( - |blocks: Vec>, + |blocks: Vec>>, chain: Arc>, log: Logger| { info!( diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index b040eec779..bddae55549 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -55,7 +55,7 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, _) = + let (proposers, dependent_root, _execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; convert_to_api_response(chain, request_epoch, dependent_root, proposers) @@ -88,16 +88,23 @@ fn try_proposer_duties_from_cache( request_epoch: Epoch, chain: &BeaconChain, ) -> Result, warp::reject::Rejection> { - let head = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + let (head_slot, head_block_root, head_decision_root) = { + let head = chain.canonical_head.cached_head(); + let head_block_root = head.head_block_root(); + let decision_root = head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; + (head.head_slot(), head_block_root, decision_root) + }; + let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch - Ordering::Equal => head.proposer_shuffling_decision_root, + Ordering::Equal => head_decision_root, // head_epoch < request_epoch - Ordering::Less => head.block_root, + Ordering::Less => head_block_root, // head_epoch > request_epoch Ordering::Greater => { return Err(warp_utils::reject::custom_server_error(format!( @@ -132,8 +139,9 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, fork) = compute_proposer_duties_from_head(current_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + let (indices, dependent_root, _execution_status, fork) = + compute_proposer_duties_from_head(current_epoch, chain) + .map_err(warp_utils::reject::beacon_chain_error)?; // Prime the proposer shuffling cache with the newly-learned value. chain diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 95c049d997..8604c91899 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -18,27 +18,23 @@ impl StateId { chain: &BeaconChain, ) -> Result { let slot = match &self.0 { - CoreStateId::Head => { - return chain - .head_info() - .map(|head| head.state_root) - .map_err(warp_utils::reject::beacon_chain_error) - } + CoreStateId::Head => return Ok(chain.canonical_head.cached_head().head_state_root()), CoreStateId::Genesis => return Ok(chain.genesis_state_root), - CoreStateId::Finalized => chain.head_info().map(|head| { - head.finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Justified => chain.head_info().map(|head| { - head.current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Slot(slot) => Ok(*slot), + CoreStateId::Finalized => chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + CoreStateId::Justified => chain + .canonical_head + .cached_head() + .justified_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + CoreStateId::Slot(slot) => *slot, CoreStateId::Root(root) => return Ok(*root), - } - .map_err(warp_utils::reject::beacon_chain_error)?; + }; chain .state_root_at_slot(slot) @@ -62,11 +58,7 @@ impl StateId { chain: &BeaconChain, ) -> Result, warp::Rejection> { let (state_root, slot_opt) = match &self.0 { - CoreStateId::Head => { - return chain - .head_beacon_state() - .map_err(warp_utils::reject::beacon_chain_error) - } + CoreStateId::Head => return Ok(chain.head_beacon_state_cloned()), CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), _ => (self.root(chain)?, None), }; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 6b4f79fa5d..942a1167c2 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -45,6 +45,7 @@ async fn sync_committee_duties_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -61,6 +62,7 @@ async fn sync_committee_duties_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot, state, state_root, &all_validators) + .await .unwrap(); assert_eq!( @@ -244,6 +246,7 @@ async fn sync_committee_indices_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -277,6 +280,7 @@ async fn sync_committee_indices_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators) + .await .unwrap(); let current_period = fork_epoch.sync_committee_period(&spec).unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 8b12aa4a5b..3327093d09 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -47,11 +47,13 @@ pub async fn fork_choice_before_proposal() { // Create some chain depth. harness.advance_slot(); - harness.extend_chain( - num_initial as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // We set up the following block graph, where B is a block that is temporarily orphaned by C, // but is then reinstated and built upon by D. @@ -64,8 +66,8 @@ pub async fn fork_choice_before_proposal() { let slot_d = slot_a + 3; let state_a = harness.get_current_state(); - let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b); - let block_root_b = harness.process_block(slot_b, block_b).unwrap(); + let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; + let block_root_b = harness.process_block(slot_b, block_b).await.unwrap(); // Create attestations to B but keep them in reserve until after C has been processed. let attestations_b = harness.make_attestations( @@ -76,8 +78,11 @@ pub async fn fork_choice_before_proposal() { slot_b, ); - let (block_c, state_c) = harness.make_block(state_a, slot_c); - let block_root_c = harness.process_block(slot_c, block_c.clone()).unwrap(); + let (block_c, state_c) = harness.make_block(state_a, slot_c).await; + let block_root_c = harness + .process_block(slot_c, block_c.clone()) + .await + .unwrap(); // Create attestations to C from a small number of validators and process them immediately. let attestations_c = harness.make_attestations( @@ -94,7 +99,7 @@ pub async fn fork_choice_before_proposal() { // Due to proposer boost, the head should be C during slot C. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_c.into() ); @@ -102,7 +107,7 @@ pub async fn fork_choice_before_proposal() { // Manually prod the per-slot task, because the slot timer doesn't run in the background in // these tests. harness.advance_slot(); - harness.chain.per_slot_task(); + harness.chain.per_slot_task().await; let proposer_index = state_b .get_beacon_proposer_index(slot_d, &harness.chain.spec) @@ -119,7 +124,7 @@ pub async fn fork_choice_before_proposal() { // Head is now B. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_b.into() ); // D's parent is B. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2b0cfd7c41..b57a87dfca 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,7 +11,6 @@ use eth2::{ types::*, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; -use execution_layer::test_utils::MockExecutionLayer; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use lighthouse_network::{Enr, EnrExt, PeerId}; @@ -21,7 +20,6 @@ use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; -use task_executor::test_utils::TestRuntime; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; @@ -52,6 +50,7 @@ const SKIPPED_SLOTS: &[u64] = &[ ]; struct ApiTester { + harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, next_block: SignedBeaconBlock, @@ -62,14 +61,9 @@ struct ApiTester { proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, _server_shutdown: oneshot::Sender<()>, - validator_keypairs: Vec, network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, - // This is never directly accessed, but adding it creates a payload cache, which we use in tests here. - #[allow(dead_code)] - mock_el: Option>, - _runtime: TestRuntime, } impl ApiTester { @@ -81,12 +75,14 @@ impl ApiTester { } pub async fn new_from_spec(spec: ChainSpec) -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(), + ); harness.advance_slot(); @@ -94,17 +90,19 @@ impl ApiTester { let slot = harness.chain.slot().unwrap().as_u64(); if !SKIPPED_SLOTS.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -112,12 +110,14 @@ impl ApiTester { "precondition: current slot is one after head" ); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -168,15 +168,19 @@ impl ApiTester { let chain = harness.chain.clone(); assert_eq!( - chain.head_info().unwrap().finalized_checkpoint.epoch, + chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, 2, "precondition: finality" ); assert_eq!( chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch, 3, "precondition: justification" @@ -206,6 +210,7 @@ impl ApiTester { ); Self { + harness, chain, client, next_block, @@ -216,32 +221,33 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - mock_el: harness.mock_execution_layer, - _runtime: harness.runtime, } } pub async fn new_from_genesis() -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(), + ); harness.advance_slot(); - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -286,6 +292,7 @@ impl ApiTester { ); Self { + harness, chain, client, next_block, @@ -296,15 +303,16 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - mock_el: None, - _runtime: harness.runtime, } } + fn validator_keypairs(&self) -> &[Keypair] { + &self.harness.validator_keypairs + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -329,7 +337,9 @@ impl ApiTester { StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), StateId::Root(Hash256::zero()), ]; - ids.push(StateId::Root(self.chain.head_info().unwrap().state_root)); + ids.push(StateId::Root( + self.chain.canonical_head.cached_head().head_state_root(), + )); ids } @@ -347,13 +357,20 @@ impl ApiTester { BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), BlockId::Root(Hash256::zero()), ]; - ids.push(BlockId::Root(self.chain.head_info().unwrap().block_root)); + ids.push(BlockId::Root( + self.chain.canonical_head.cached_head().head_block_root(), + )); ids } fn get_state(&self, state_id: StateId) -> Option> { match state_id { - StateId::Head => Some(self.chain.head().unwrap().beacon_state), + StateId::Head => Some( + self.chain + .head_snapshot() + .beacon_state + .clone_with_only_committee_caches(), + ), StateId::Genesis => self .chain .get_state(&self.chain.genesis_state_root, None) @@ -361,9 +378,9 @@ impl ApiTester { StateId::Finalized => { let finalized_slot = self .chain - .head_info() - .unwrap() - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -378,9 +395,9 @@ impl ApiTester { StateId::Justified => { let justified_slot = self .chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -404,7 +421,7 @@ impl ApiTester { pub async fn test_beacon_genesis(self) -> Self { let result = self.client.get_beacon_genesis().await.unwrap().data; - let state = self.chain.head().unwrap().beacon_state; + let state = &self.chain.head_snapshot().beacon_state; let expected = GenesisData { genesis_time: state.genesis_time(), genesis_validators_root: state.genesis_validators_root(), @@ -426,14 +443,14 @@ impl ApiTester { .map(|res| res.data.root); let expected = match state_id { - StateId::Head => Some(self.chain.head_info().unwrap().state_root), + StateId::Head => Some(self.chain.canonical_head.cached_head().head_state_root()), StateId::Genesis => Some(self.chain.genesis_state_root), StateId::Finalized => { let finalized_slot = self .chain - .head_info() - .unwrap() - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -442,9 +459,9 @@ impl ApiTester { StateId::Justified => { let justified_slot = self .chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -754,14 +771,20 @@ impl ApiTester { fn get_block_root(&self, block_id: BlockId) -> Option { match block_id { - BlockId::Head => Some(self.chain.head_info().unwrap().block_root), + BlockId::Head => Some(self.chain.canonical_head.cached_head().head_block_root()), BlockId::Genesis => Some(self.chain.genesis_block_root), - BlockId::Finalized => Some(self.chain.head_info().unwrap().finalized_checkpoint.root), + BlockId::Finalized => Some( + self.chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .root, + ), BlockId::Justified => Some( self.chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .root, ), BlockId::Slot(slot) => self @@ -1322,7 +1345,7 @@ impl ApiTester { pub async fn test_get_node_syncing(self) -> Self { let result = self.client.get_node_syncing().await.unwrap().data; - let head_slot = self.chain.head_info().unwrap().slot; + let head_slot = self.chain.canonical_head.cached_head().head_slot(); let sync_distance = self.chain.slot().unwrap() - head_slot; let expected = SyncingData { @@ -1536,7 +1559,7 @@ impl ApiTester { } fn validator_count(&self) -> usize { - self.chain.head().unwrap().beacon_state.validators().len() + self.chain.head_snapshot().beacon_state.validators().len() } fn interesting_validator_indices(&self) -> Vec> { @@ -1621,7 +1644,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); assert_eq!(results.dependent_root, dependent_root); @@ -1696,7 +1719,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); // Presently, the beacon chain harness never runs the code that primes the proposer // cache. If this changes in the future then we'll need some smarter logic here, but @@ -1824,7 +1847,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); self.client .get_validator_duties_proposer(current_epoch) @@ -1878,7 +1901,7 @@ impl ApiTester { } pub async fn test_block_production(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() * 3 { @@ -1898,7 +1921,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -1926,7 +1949,7 @@ impl ApiTester { self.client.post_beacon_blocks(&signed_block).await.unwrap(); - assert_eq!(self.chain.head_beacon_block().unwrap(), signed_block); + assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -1957,7 +1980,7 @@ impl ApiTester { } pub async fn test_block_production_verify_randao_invalid(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() { @@ -1977,7 +2000,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -2040,7 +2063,7 @@ impl ApiTester { } pub async fn test_get_validator_attestation_data(self) -> Self { - let mut state = self.chain.head_beacon_state().unwrap(); + let mut state = self.chain.head_beacon_state_cloned(); let slot = state.slot(); state .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) @@ -2070,7 +2093,6 @@ impl ApiTester { let attestation = self .chain .head_beacon_block() - .unwrap() .message() .body() .attestations()[0] @@ -2098,7 +2120,7 @@ impl ApiTester { let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); - let mut head = self.chain.head().unwrap(); + let mut head = self.chain.head_snapshot().as_ref().clone(); while head.beacon_state.current_epoch() < epoch { per_slot_processing(&mut head.beacon_state, None, &self.chain.spec).unwrap(); } @@ -2114,7 +2136,7 @@ impl ApiTester { .client .post_validator_duties_attester( epoch, - (0..self.validator_keypairs.len() as u64) + (0..self.validator_keypairs().len() as u64) .collect::>() .as_slice(), ) @@ -2123,7 +2145,7 @@ impl ApiTester { .data; let (i, kp, duty, proof) = self - .validator_keypairs + .validator_keypairs() .iter() .enumerate() .find_map(|(i, kp)| { @@ -2238,9 +2260,9 @@ impl ApiTester { let mut registrations = vec![]; let mut fee_recipients = vec![]; - let fork = self.chain.head().unwrap().beacon_state.fork(); + let fork = self.chain.head_snapshot().beacon_state.fork(); - for (val_index, keypair) in self.validator_keypairs.iter().enumerate() { + for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); let fee_recipient = Address::from_low_u64_be(val_index as u64); @@ -2273,8 +2295,7 @@ impl ApiTester { for (val_index, (_, fee_recipient)) in self .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .validators() .into_iter() @@ -2416,7 +2437,7 @@ impl ApiTester { pub async fn test_post_lighthouse_liveness(self) -> Self { let epoch = self.chain.epoch().unwrap(); - let head_state = self.chain.head_beacon_state().unwrap(); + let head_state = self.chain.head_beacon_state_cloned(); let indices = (0..head_state.validators().len()) .map(|i| i as u64) .collect::>(); @@ -2533,7 +2554,7 @@ impl ApiTester { let block_root = self.next_block.canonical_root(); // current_duty_dependent_root = block root because this is the first slot of the epoch - let current_duty_dependent_root = self.chain.head_beacon_block_root().unwrap(); + let current_duty_dependent_root = self.chain.head_beacon_block_root(); let current_slot = self.chain.slot().unwrap(); let next_slot = self.next_block.slot(); let finalization_distance = E::slots_per_epoch() * 2; @@ -2556,17 +2577,21 @@ impl ApiTester { epoch_transition: true, }); + let finalized_block_root = self + .chain + .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let finalized_block = self + .chain + .get_blinded_block(&finalized_block_root) + .unwrap() + .unwrap(); + let finalized_state_root = finalized_block.state_root(); + let expected_finalized = EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - block: self - .chain - .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(), - state: self - .chain - .state_root_at_slot(next_slot - finalization_distance) - .unwrap() - .unwrap(), + block: finalized_block_root, + state: finalized_state_root, epoch: Epoch::new(3), }); @@ -2578,7 +2603,7 @@ impl ApiTester { let block_events = poll_events(&mut events_future, 3, Duration::from_millis(10000)).await; assert_eq!( block_events.as_slice(), - &[expected_block, expected_finalized, expected_head] + &[expected_block, expected_head, expected_finalized] ); // Test a reorg event diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index bf1918662a..9c9e094db6 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1358,9 +1358,9 @@ pub enum Response { /// A Status message. Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. - BlocksByRange(Option>>), + BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Option>>), + BlocksByRoot(Option>>), } impl std::convert::From> for RPCCodedResponse { diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index f6c3e61b0b..a46a05a8ce 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -532,10 +532,10 @@ fn handle_v1_response( Protocol::Goodbye => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), )), - Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Box::new( + Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { @@ -572,31 +572,31 @@ fn handle_v2_response( })?; match protocol { Protocol::BlocksByRange => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), )))), }, Protocol::BlocksByRoot => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), @@ -898,10 +898,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -911,7 +911,7 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -924,11 +924,11 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); @@ -937,7 +937,7 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -1013,10 +1013,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1028,10 +1028,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1040,10 +1040,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) ); let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); @@ -1053,12 +1053,12 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() )))) ); @@ -1085,11 +1085,11 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))), ); @@ -1100,11 +1100,11 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); @@ -1112,22 +1112,22 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) ); assert_eq!( encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(merge_block_small)))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) ); let mut encoded = @@ -1179,7 +1179,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1200,7 +1200,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1222,7 +1222,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1247,7 +1247,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap(); @@ -1292,7 +1292,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1316,7 +1316,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 46de772d8d..26d755a6e0 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -9,6 +9,7 @@ use ssz_types::{ VariableList, }; use std::ops::Deref; +use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -237,10 +238,10 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the /// batch. - BlocksByRange(Box>), + BlocksByRange(Arc>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Box>), + BlocksByRoot(Arc>), /// A PONG response to a PING request. Pong(Ping), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index af2656a275..a01072f8e4 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,6 +7,7 @@ use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; +use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, @@ -17,7 +18,7 @@ use types::{ #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - BeaconBlock(Box>), + BeaconBlock(Arc>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -173,7 +174,7 @@ impl PubsubMessage { )) } }; - Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))) + Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 973485fc4a..90052859bc 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -174,15 +174,15 @@ fn test_blocks_by_range_chunked_rpc() { // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -311,7 +311,7 @@ fn test_blocks_by_range_over_limit() { // BlocksByRange Response let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_large = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let request_id = messages_to_send as usize; // build the sender future @@ -409,7 +409,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); // keep count of the number of messages received let mut messages_received: u64 = 0; @@ -540,7 +540,7 @@ fn test_blocks_by_range_single_empty_rpc() { let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); let messages_to_send = 1; @@ -660,15 +660,15 @@ fn test_blocks_by_root_chunked_rpc() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -803,7 +803,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 6d7375cca7..6f75e1fb23 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -52,6 +52,7 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; +use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -386,7 +387,7 @@ impl WorkEvent { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, ) -> Self { Self { @@ -490,7 +491,7 @@ impl WorkEvent { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Self { @@ -507,7 +508,7 @@ impl WorkEvent { /// Create a new work event to import `blocks` as a beacon chain segment. pub fn chain_segment( process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, ) -> Self { Self { drop_during_sync: false, @@ -654,7 +655,7 @@ pub enum Work { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, }, DelayedImportBlock { @@ -691,13 +692,13 @@ pub enum Work { seen_timestamp: Duration, }, RpcBlock { - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, }, ChainSegment { process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, }, Status { peer_id: PeerId, @@ -1307,15 +1308,6 @@ impl BeaconProcessor { let idle_tx = toolbox.idle_tx; let work_reprocessing_tx = toolbox.work_reprocessing_tx; - // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. - // - // This helps ensure that the worker is always freed in the case of an early exit or panic. - // As such, this instantiation should happen as early in the function as possible. - let send_idle_on_drop = SendOnDrop { - tx: idle_tx, - log: self.log.clone(), - }; - let work_id = work.str_id(); let worker_timer = metrics::start_timer_vec(&metrics::BEACON_PROCESSOR_WORKER_TIME, &[work_id]); @@ -1325,6 +1317,16 @@ impl BeaconProcessor { &[work.str_id()], ); + // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. + // + // This helps ensure that the worker is always freed in the case of an early exit or panic. + // As such, this instantiation should happen as early in the function as possible. + let send_idle_on_drop = SendOnDrop { + tx: idle_tx, + _worker_timer: worker_timer, + log: self.log.clone(), + }; + let worker_id = self.current_workers; self.current_workers = self.current_workers.saturating_add(1); @@ -1338,7 +1340,6 @@ impl BeaconProcessor { return; }; - let log = self.log.clone(); let executor = self.executor.clone(); let worker = Worker { @@ -1357,252 +1358,308 @@ impl BeaconProcessor { "worker" => worker_id, ); - let sub_executor = executor.clone(); - executor.spawn_blocking( - move || { - let _worker_timer = worker_timer; + let task_spawner = TaskSpawner { + executor: executor.clone(), + send_idle_on_drop, + }; - match work { - /* - * Individual unaggregated attestation verification. - */ - Work::GossipAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched unaggregated attestation verification. - */ - Work::GossipAttestationBatch { packages } => worker - .process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)), - /* - * Individual aggregated attestation verification. - */ - Work::GossipAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched aggregated attestation verification. - */ - Work::GossipAggregateBatch { packages } => { - worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) - } - /* - * Verification for beacon blocks received on gossip. - */ - Work::GossipBlock { + let sub_executor = executor; + match work { + /* + * Individual unaggregated attestation verification. + */ + Work::GossipAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched unaggregated attestation verification. + */ + Work::GossipAttestationBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Individual aggregated attestation verification. + */ + Work::GossipAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched aggregated attestation verification. + */ + Work::GossipAggregateBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Verification for beacon blocks received on gossip. + */ + Work::GossipBlock { + message_id, + peer_id, + peer_client, + block, + seen_timestamp, + } => task_spawner.spawn_async(async move { + worker + .process_gossip_block( message_id, peer_id, peer_client, block, - seen_timestamp, - } => worker.process_gossip_block( - message_id, - peer_id, - peer_client, - *block, - work_reprocessing_tx.clone(), + work_reprocessing_tx, duplicate_cache, seen_timestamp, - ), - /* - * Import for blocks that we received earlier than their intended slot. - */ - Work::DelayedImportBlock { - peer_id, - block, - seen_timestamp, - } => worker.process_gossip_verified_block( - peer_id, - *block, - work_reprocessing_tx, - seen_timestamp, - ), - /* - * Voluntary exits received on gossip. - */ - Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, - } => worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit), - /* - * Proposer slashings received on gossip. - */ - Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - } => worker.process_gossip_proposer_slashing( - message_id, - peer_id, - *proposer_slashing, - ), - /* - * Attester slashings received on gossip. - */ - Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - } => worker.process_gossip_attester_slashing( - message_id, - peer_id, - *attester_slashing, - ), - /* - * Sync committee message verification. - */ - Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature, - subnet_id, - seen_timestamp, - } => worker.process_gossip_sync_committee_signature( - message_id, - peer_id, - *sync_signature, - subnet_id, - seen_timestamp, - ), - /* - * Syn contribution verification. - */ - Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution, - seen_timestamp, - } => worker.process_sync_committee_contribution( - message_id, - peer_id, - *sync_contribution, - seen_timestamp, - ), - /* - * Verification for beacon blocks received during syncing via RPC. - */ - Work::RpcBlock { - block, - seen_timestamp, - process_type, - } => { - worker.process_rpc_block( - *block, - seen_timestamp, - process_type, - work_reprocessing_tx.clone(), - duplicate_cache, - ); - } - /* - * Verification for a chain segment (multiple blocks). - */ - Work::ChainSegment { process_id, blocks } => { - worker.process_chain_segment(process_id, blocks) - } - /* - * Processing of Status Messages. - */ - Work::Status { peer_id, message } => worker.process_status(peer_id, message), - /* - * Processing of range syncing requests from other peers. - */ - Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - /* - * Processing of blocks by roots requests from other peers. - */ - Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_root_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - None, // Do not allow this attestation to be re-processed beyond this point. - seen_timestamp, - ), - Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - None, - seen_timestamp, - ), - }; + ) + .await + }), + /* + * Import for blocks that we received earlier than their intended slot. + */ + Work::DelayedImportBlock { + peer_id, + block, + seen_timestamp, + } => task_spawner.spawn_async(worker.process_gossip_verified_block( + peer_id, + *block, + work_reprocessing_tx, + seen_timestamp, + )), + /* + * Voluntary exits received on gossip. + */ + Work::GossipVoluntaryExit { + message_id, + peer_id, + voluntary_exit, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit) + }), + /* + * Proposer slashings received on gossip. + */ + Work::GossipProposerSlashing { + message_id, + peer_id, + proposer_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) + }), + /* + * Attester slashings received on gossip. + */ + Work::GossipAttesterSlashing { + message_id, + peer_id, + attester_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) + }), + /* + * Sync committee message verification. + */ + Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_sync_committee_signature( + message_id, + peer_id, + *sync_signature, + subnet_id, + seen_timestamp, + ) + }), + /* + * Syn contribution verification. + */ + Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_sync_committee_contribution( + message_id, + peer_id, + *sync_contribution, + seen_timestamp, + ) + }), + /* + * Verification for beacon blocks received during syncing via RPC. + */ + Work::RpcBlock { + block, + seen_timestamp, + process_type, + } => task_spawner.spawn_async(worker.process_rpc_block( + block, + seen_timestamp, + process_type, + work_reprocessing_tx, + duplicate_cache, + )), + /* + * Verification for a chain segment (multiple blocks). + */ + Work::ChainSegment { process_id, blocks } => task_spawner + .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + /* + * Processing of Status Messages. + */ + Work::Status { peer_id, message } => { + task_spawner.spawn_blocking(move || worker.process_status(peer_id, message)) + } + /* + * Processing of range syncing requests from other peers. + */ + Work::BlocksByRangeRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_range_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + /* + * Processing of blocks by roots requests from other peers. + */ + Work::BlocksByRootsRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_root_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + Work::UnknownBlockAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), + Work::UnknownBlockAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + None, + seen_timestamp, + ) + }), + }; + } +} - trace!( - log, - "Beacon processor worker done"; - "work" => work_id, - "worker" => worker_id, - ); +/// Spawns tasks that are either: +/// +/// - Blocking (i.e. intensive methods that shouldn't run on the core `tokio` executor) +/// - Async (i.e. `async` methods) +/// +/// Takes a `SendOnDrop` and ensures it is dropped after the task completes. This frees the beacon +/// processor worker so a new task can be started. +struct TaskSpawner { + executor: TaskExecutor, + send_idle_on_drop: SendOnDrop, +} - // This explicit `drop` is used to remind the programmer that this variable must - // not be dropped until the worker is complete. Dropping it early will cause the - // worker to be marked as "free" and cause an over-spawning of workers. - drop(send_idle_on_drop); +impl TaskSpawner { + /// Spawn an async task, dropping the `SendOnDrop` after the task has completed. + fn spawn_async(self, task: impl Future + Send + 'static) { + self.executor.spawn( + async { + task.await; + drop(self.send_idle_on_drop) }, WORKER_TASK_NAME, - ); + ) + } + + /// Spawn a blocking task, dropping the `SendOnDrop` after the task has completed. + fn spawn_blocking(self, task: F) + where + F: FnOnce() + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(); + drop(self.send_idle_on_drop) + }, + WORKER_TASK_NAME, + ) + } + + /// Spawn a blocking task, passing the `SendOnDrop` into the task. + /// + /// ## Notes + /// + /// Users must ensure the `SendOnDrop` is dropped at the appropriate time! + pub fn spawn_blocking_with_manual_send_idle(self, task: F) + where + F: FnOnce(SendOnDrop) + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(self.send_idle_on_drop); + }, + WORKER_TASK_NAME, + ) } } @@ -1618,6 +1675,8 @@ impl BeaconProcessor { /// https://doc.rust-lang.org/std/ops/trait.Drop.html#panics pub struct SendOnDrop { tx: mpsc::Sender<()>, + // The field is unused, but it's here to ensure the timer is dropped once the task has finished. + _worker_timer: Option, log: Logger, } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 1c9d323576..a39ca2ec33 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -8,7 +8,6 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use environment::{null_logger, Environment, EnvironmentBuilder}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -20,7 +19,6 @@ use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; -use tokio::runtime::Handle; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, @@ -45,7 +43,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); /// Provides utilities for testing the `BeaconProcessor`. struct TestRig { chain: Arc>, - next_block: SignedBeaconBlock, + next_block: Arc>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -56,7 +54,7 @@ struct TestRig { work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, - environment: Option>, + _harness: BeaconChainHarness, } /// This custom drop implementation ensures that we shut down the tokio runtime gracefully. Without @@ -65,12 +63,11 @@ impl Drop for TestRig { fn drop(&mut self) { // Causes the beacon processor to shutdown. self.beacon_processor_tx = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0; - self.environment.take().unwrap().shutdown_on_idle(); } } impl TestRig { - pub fn new(chain_length: u64) -> Self { + pub async fn new(chain_length: u64) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -84,16 +81,18 @@ impl TestRig { harness.advance_slot(); for _ in 0..chain_length { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -101,8 +100,9 @@ impl TestRig { "precondition: current slot is one after head" ); - let (next_block, next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -155,11 +155,11 @@ impl TestRig { let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); - let chain = harness.chain; + let chain = harness.chain.clone(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let log = null_logger().unwrap(); + let log = harness.logger().clone(); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); @@ -181,15 +181,7 @@ impl TestRig { &log, )); - let mut environment = EnvironmentBuilder::mainnet() - .null_logger() - .unwrap() - .multi_threaded_tokio_runtime() - .unwrap() - .build() - .unwrap(); - - let executor = environment.core_context().executor; + let executor = harness.runtime.task_executor.clone(); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); @@ -208,7 +200,7 @@ impl TestRig { Self { chain, - next_block, + next_block: Arc::new(next_block), attestations, next_block_attestations, next_block_aggregate_attestations, @@ -219,12 +211,16 @@ impl TestRig { work_journal_rx, _network_rx, _sync_rx, - environment: Some(environment), + _harness: harness, } } + pub async fn recompute_head(&self) { + self.chain.recompute_head_at_current_slot().await.unwrap() + } + pub fn head_root(&self) -> Hash256 { - self.chain.head().unwrap().beacon_block_root + self.chain.head_snapshot().beacon_block_root } pub fn enqueue_gossip_block(&self) { @@ -233,7 +229,7 @@ impl TestRig { junk_message_id(), junk_peer_id(), Client::default(), - Box::new(self.next_block.clone()), + self.next_block.clone(), Duration::from_secs(0), )) .unwrap(); @@ -241,7 +237,7 @@ impl TestRig { pub fn enqueue_rpc_block(&self) { let event = WorkEvent::rpc_beacon_block( - Box::new(self.next_block.clone()), + self.next_block.clone(), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -324,28 +320,16 @@ impl TestRig { .unwrap(); } - fn handle(&mut self) -> Handle { - self.environment - .as_mut() - .unwrap() - .core_context() - .executor - .handle() - .unwrap() - } - /// Assert that the `BeaconProcessor` doesn't produce any events in the given `duration`. - pub fn assert_no_events_for(&mut self, duration: Duration) { - self.handle().block_on(async { - tokio::select! { - _ = tokio::time::sleep(duration) => (), - event = self.work_journal_rx.recv() => panic!( - "received {:?} within {:?} when expecting no events", - event, - duration - ), - } - }) + pub async fn assert_no_events_for(&mut self, duration: Duration) { + tokio::select! { + _ = tokio::time::sleep(duration) => (), + event = self.work_journal_rx.recv() => panic!( + "received {:?} within {:?} when expecting no events", + event, + duration + ), + } } /// Checks that the `BeaconProcessor` event journal contains the `expected` events in the given @@ -354,57 +338,54 @@ impl TestRig { /// /// Given the described logic, `expected` must not contain `WORKER_FREED` or `NOTHING_TO_DO` /// events. - pub fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { + pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { assert!(expected .iter() .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); - let (events, worker_freed_remaining) = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); - let mut worker_freed_remaining = expected.len(); + let mut events = Vec::with_capacity(expected.len()); + let mut worker_freed_remaining = expected.len(); - let drain_future = async { - loop { - match self.work_journal_rx.recv().await { - Some(event) if event == WORKER_FREED => { - worker_freed_remaining -= 1; - if worker_freed_remaining == 0 { - // Break when all expected events are finished. - break; - } + let drain_future = async { + loop { + match self.work_journal_rx.recv().await { + Some(event) if event == WORKER_FREED => { + worker_freed_remaining -= 1; + if worker_freed_remaining == 0 { + // Break when all expected events are finished. + break; } - Some(event) if event == NOTHING_TO_DO => { - // Ignore these. - } - Some(event) => { - events.push(event); - } - None => break, } + Some(event) if event == NOTHING_TO_DO => { + // Ignore these. + } + Some(event) => { + events.push(event); + } + None => break, } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", - STANDARD_TIMEOUT, - expected, - events, - worker_freed_remaining, - ), - _ = drain_future => {}, } + }; - (events, worker_freed_remaining) - }); + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", + STANDARD_TIMEOUT, + expected, + events, + worker_freed_remaining, + ), + _ = drain_future => {}, + } assert_eq!(events, expected); assert_eq!(worker_freed_remaining, 0); } - pub fn assert_event_journal(&mut self, expected: &[&str]) { - self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT); + pub async fn assert_event_journal(&mut self, expected: &[&str]) { + self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT) + .await } /// Assert that the `BeaconProcessor` event journal is as `expected`. @@ -413,34 +394,34 @@ impl TestRig { /// /// We won't attempt to listen for any more than `expected.len()` events. As such, it makes sense /// to use the `NOTHING_TO_DO` event to ensure that execution has completed. - pub fn assert_event_journal_with_timeout(&mut self, expected: &[&str], timeout: Duration) { - let events = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); + pub async fn assert_event_journal_with_timeout( + &mut self, + expected: &[&str], + timeout: Duration, + ) { + let mut events = Vec::with_capacity(expected.len()); - let drain_future = async { - while let Some(event) = self.work_journal_rx.recv().await { - events.push(event); + let drain_future = async { + while let Some(event) = self.work_journal_rx.recv().await { + events.push(event); - // Break as soon as we collect the desired number of events. - if events.len() >= expected.len() { - break; - } + // Break as soon as we collect the desired number of events. + if events.len() >= expected.len() { + break; } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(timeout) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", - timeout, - expected, - events - ), - _ = drain_future => {}, } + }; - events - }); + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(timeout) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", + timeout, + expected, + events + ), + _ = drain_future => {}, + } assert_eq!(events, expected); } @@ -455,9 +436,9 @@ fn junk_message_id() -> MessageId { } /// Blocks that arrive early should be queued for later processing. -#[test] -fn import_gossip_block_acceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_acceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -477,7 +458,8 @@ fn import_gossip_block_acceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for @@ -492,7 +474,8 @@ fn import_gossip_block_acceptably_early() { "block not yet imported" ); - rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -502,9 +485,9 @@ fn import_gossip_block_acceptably_early() { } /// Blocks that are *too* early shouldn't get into the delay queue. -#[test] -fn import_gossip_block_unacceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_unacceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -524,11 +507,12 @@ fn import_gossip_block_unacceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Waiting for 5 seconds is a bit arbitrary, however it *should* be long enough to ensure the // block isn't imported. - rig.assert_no_events_for(Duration::from_secs(5)); + rig.assert_no_events_for(Duration::from_secs(5)).await; assert!( rig.head_root() != rig.next_block.canonical_root(), @@ -537,9 +521,9 @@ fn import_gossip_block_unacceptably_early() { } /// Blocks that arrive on-time should be processed normally. -#[test] -fn import_gossip_block_at_current_slot() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_at_current_slot() { + let mut rig = TestRig::new(SMALL_CHAIN).await; assert_eq!( rig.chain.slot().unwrap(), @@ -549,7 +533,8 @@ fn import_gossip_block_at_current_slot() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -559,15 +544,16 @@ fn import_gossip_block_at_current_slot() { } /// Ensure a valid attestation can be imported. -#[test] -fn import_gossip_attestation() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_attestation() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let initial_attns = rig.chain.naive_aggregation_pool.read().num_items(); rig.enqueue_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -583,8 +569,8 @@ enum BlockImportMethod { /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -592,7 +578,8 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -613,11 +600,12 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -632,20 +620,20 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { ); } -#[test] -fn attestation_to_unknown_block_processed_after_gossip_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Gossip) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_gossip_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Gossip).await } -#[test] -fn attestation_to_unknown_block_processed_after_rpc_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Rpc) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_rpc_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Empty the op pool. rig.chain @@ -659,7 +647,8 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -680,11 +669,12 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -699,21 +689,21 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { ); } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip).await } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -721,7 +711,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -734,7 +725,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_ATTESTATION, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -745,9 +737,9 @@ fn requeue_unknown_block_gossip_attestation_without_import() { /// Ensure that aggregate that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -755,7 +747,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -768,7 +761,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_AGGREGATE, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -778,10 +772,10 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { } /// Ensure a bunch of valid operations can be imported. -#[test] -fn import_misc_gossip_ops() { +#[tokio::test] +async fn import_misc_gossip_ops() { // Exits need the long chain so validators aren't too young to exit. - let mut rig = TestRig::new(LONG_CHAIN); + let mut rig = TestRig::new(LONG_CHAIN).await; /* * Attester slashing @@ -791,7 +785,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_attester_slashing(); - rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attester_slashings(), @@ -807,7 +802,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_proposer_slashing(); - rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_proposer_slashings(), @@ -823,7 +819,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_voluntary_exit(); - rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_voluntary_exits(), diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index f014af4c55..56f38c7f22 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -13,6 +13,7 @@ use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerI use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; +use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; @@ -636,24 +637,27 @@ impl Worker { /// /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] - pub fn process_gossip_block( + pub async fn process_gossip_block( self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, seen_duration: Duration, ) { - if let Some(gossip_verified_block) = self.process_gossip_unverified_block( - message_id, - peer_id, - peer_client, - block, - reprocess_tx.clone(), - seen_duration, - ) { + if let Some(gossip_verified_block) = self + .process_gossip_unverified_block( + message_id, + peer_id, + peer_client, + block, + reprocess_tx.clone(), + seen_duration, + ) + .await + { let block_root = gossip_verified_block.block_root; if let Some(handle) = duplicate_cache.check_and_insert(block_root) { self.process_gossip_verified_block( @@ -661,7 +665,8 @@ impl Worker { gossip_verified_block, reprocess_tx, seen_duration, - ); + ) + .await; // Drop the handle to remove the entry from the cache drop(handle); } else { @@ -678,12 +683,12 @@ impl Worker { /// if it passes gossip propagation criteria, tell the network thread to forward it. /// /// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors. - pub fn process_gossip_unverified_block( + pub async fn process_gossip_unverified_block( &self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, seen_duration: Duration, ) -> Option> { @@ -704,7 +709,7 @@ impl Worker { Some(peer_client.to_string()), ); - let verified_block = match self.chain.verify_block_for_gossip(block) { + let verified_block = match self.chain.clone().verify_block_for_gossip(block).await { Ok(verified_block) => { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); @@ -887,7 +892,7 @@ impl Worker { /// Process the beacon block that has already passed gossip verification. /// /// Raises a log if there are errors. - pub fn process_gossip_verified_block( + pub async fn process_gossip_verified_block( self, peer_id: PeerId, verified_block: GossipVerifiedBlock, @@ -895,9 +900,9 @@ impl Worker { // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { - let block = Box::new(verified_block.block.clone()); + let block: Arc<_> = verified_block.block.clone(); - match self.chain.process_block(verified_block) { + match self.chain.process_block(verified_block).await { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -913,24 +918,27 @@ impl Worker { ) }; - trace!( + debug!( self.log, "Gossipsub block processed"; + "block" => ?block_root, "peer_id" => %peer_id ); - match self.chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "location" => "block gossip" - ), - Err(e) => error!( + if let Err(e) = self.chain.recompute_head_at_current_slot().await { + error!( self.log, "Fork choice failed"; "error" => ?e, - "location" => "block gossip" - ), + "location" => "block_gossip" + ) + } else { + debug!( + self.log, + "Fork choice success"; + "block" => ?block_root, + "location" => "block_gossip" + ) } } Err(BlockError::ParentUnknown { .. }) => { @@ -1144,13 +1152,9 @@ impl Worker { .read() .register_gossip_attester_slashing(slashing.as_inner()); - if let Err(e) = self.chain.import_attester_slashing(slashing) { - debug!(self.log, "Error importing attester slashing"; "error" => ?e); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL); - } else { - debug!(self.log, "Successfully imported attester slashing"); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); - } + self.chain.import_attester_slashing(slashing); + debug!(self.log, "Successfully imported attester slashing"); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } /// Process the sync committee signature received from the gossip network and: diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index cf113ca1fa..87d4da2c6d 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -9,6 +9,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; use slot_clock::SlotClock; +use std::sync::Arc; use task_executor::TaskExecutor; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -62,7 +63,7 @@ impl Worker { &self, remote: &StatusMessage, ) -> Result, BeaconChainError> { - let local = self.chain.status_message()?; + let local = self.chain.status_message(); let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); let irrelevant_reason = if local.fork_digest != remote.fork_digest { @@ -143,7 +144,7 @@ impl Worker { Ok(Some(block)) => { self.send_response( peer_id, - Response::BlocksByRoot(Some(Box::new(block))), + Response::BlocksByRoot(Some(block)), request_id, ); send_block_count += 1; @@ -266,7 +267,7 @@ impl Worker { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - response: Response::BlocksByRange(Some(Box::new(block))), + response: Response::BlocksByRange(Some(Arc::new(block))), id: request_id, }); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 04ed1ff608..804cfbe463 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -11,7 +11,8 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; use lighthouse_network::PeerAction; -use slog::{debug, error, info, trace, warn}; +use slog::{debug, error, info, warn}; +use std::sync::Arc; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -45,9 +46,9 @@ pub enum FailureMode { impl Worker { /// Attempt to process a block received from a direct RPC request. - pub fn process_rpc_block( + pub async fn process_rpc_block( self, - block: SignedBeaconBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, reprocess_tx: mpsc::Sender>, @@ -66,7 +67,7 @@ impl Worker { } }; let slot = block.slot(); - let result = self.chain.process_block(block); + let result = self.chain.process_block(block).await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -87,7 +88,8 @@ impl Worker { None, None, ); - self.run_fork_choice() + + self.recompute_head("process_rpc_block").await; } } // Sync handles these results @@ -102,10 +104,10 @@ impl Worker { /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. - pub fn process_chain_segment( + pub async fn process_chain_segment( &self, sync_type: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, ) { let result = match sync_type { // this a request from the range sync @@ -114,7 +116,7 @@ impl Worker { let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_blocks(downloaded_blocks.iter()) { + match self.process_blocks(downloaded_blocks.iter()).await { (_, Ok(_)) => { debug!(self.log, "Batch processed"; "batch_epoch" => epoch, @@ -183,7 +185,7 @@ impl Worker { ); // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse - match self.process_blocks(downloaded_blocks.iter().rev()) { + match self.process_blocks(downloaded_blocks.iter().rev()).await { (imported_blocks, Err(e)) => { debug!(self.log, "Parent lookup failed"; "error" => %e.message); BatchProcessResult::Failed { @@ -204,19 +206,17 @@ impl Worker { } /// Helper function to process blocks batches which only consumes the chain and blocks to process. - fn process_blocks<'a>( + async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>, + downloaded_blocks: impl Iterator>>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blocks = downloaded_blocks.cloned().collect::>(); - match self.chain.process_chain_segment(blocks) { + let blocks: Vec> = downloaded_blocks.cloned().collect(); + match self.chain.process_chain_segment(blocks).await { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if imported_blocks > 0 { - // Batch completed successfully with at least one block, run fork choice. - self.run_fork_choice(); + self.recompute_head("process_blocks_ok").await; } - (imported_blocks, Ok(())) } ChainSegmentResult::Failed { @@ -226,7 +226,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); let r = self.handle_failed_chain_segment(error); if imported_blocks > 0 { - self.run_fork_choice(); + self.recompute_head("process_blocks_err").await; } (imported_blocks, r) } @@ -236,9 +236,13 @@ impl Worker { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: Vec>, + blocks: Vec>>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blinded_blocks = blocks.into_iter().map(Into::into).collect(); + let blinded_blocks = blocks + .iter() + .map(|full_block| full_block.clone_as_blinded()) + .map(Arc::new) + .collect(); match self.chain.import_historical_block_batch(blinded_blocks) { Ok(imported_blocks) => { metrics::inc_counter( @@ -357,18 +361,18 @@ impl Worker { /// Runs fork-choice on a given chain. This is used during block processing after one successful /// block import. - fn run_fork_choice(&self) { - match self.chain.fork_choice() { - Ok(()) => trace!( + async fn recompute_head(&self, location: &str) { + match self.chain.recompute_head_at_current_slot().await { + Ok(()) => debug!( self.log, "Fork choice success"; - "location" => "batch processing" + "location" => location ), Err(e) => error!( self.log, "Fork choice failed"; "error" => ?e, - "location" => "batch import error" + "location" => location ), } } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index cc0165131c..3605b94acf 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -143,10 +143,6 @@ lazy_static! { "beacon_processor_attester_slashing_imported_total", "Total number of attester slashings imported to the op pool." ); - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL: Result = try_create_int_counter( - "beacon_processor_attester_slashing_error_total", - "Total number of attester slashings that raised an error during processing." - ); // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_rpc_block_queue_total", diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index b8db9c17f8..9d86c3e55a 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -2,9 +2,10 @@ use crate::beacon_processor::{ BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, }; use crate::service::{NetworkMessage, RequestId}; +use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::*; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, @@ -114,11 +115,10 @@ impl Processor { /// Called when we first connect to a peer, or when the PeerManager determines we need to /// re-status. pub fn send_status(&mut self, peer_id: PeerId) { - if let Ok(status_message) = status_message(&self.chain) { - debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); - self.network - .send_processor_request(peer_id, Request::Status(status_message)); - } + let status_message = status_message(&self.chain); + debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); + self.network + .send_processor_request(peer_id, Request::Status(status_message)); } /// Handle a `Status` request. @@ -132,12 +132,12 @@ impl Processor { ) { debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); - // ignore status responses if we are shutting down - if let Ok(status_message) = status_message(&self.chain) { - // Say status back. - self.network - .send_response(peer_id, Response::Status(status_message), request_id); - } + // Say status back. + self.network.send_response( + peer_id, + Response::Status(status_message(&self.chain)), + request_id, + ); self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) } @@ -178,7 +178,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -209,7 +209,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -244,7 +244,7 @@ impl Processor { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, ) { self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( message_id, @@ -370,22 +370,6 @@ impl Processor { } } -/// Build a `StatusMessage` representing the state of the given `beacon_chain`. -pub(crate) fn status_message( - beacon_chain: &BeaconChain, -) -> Result { - let head_info = beacon_chain.head_info()?; - let fork_digest = beacon_chain.enr_fork_id().fork_digest; - - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) -} - /// Wraps a Network Channel to employ various RPC related network functionality for the /// processor. #[derive(Clone)] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a8995de2e5..c21183608a 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,7 +7,7 @@ use crate::{ subnet_service::{AttestationService, SubnetServiceMessage}, NetworkConfig, }; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; @@ -30,8 +30,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, EthSpec, ForkContext, RelativeEpoch, Slot, SubnetId, SyncCommitteeSubscription, - SyncSubnetId, Unsigned, ValidatorSubscription, + ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + Unsigned, ValidatorSubscription, }; mod tests; @@ -706,29 +706,12 @@ impl NetworkService { fn update_gossipsub_parameters(&mut self) { if let Ok(slot) = self.beacon_chain.slot() { - if let Some(active_validators) = self + let active_validators_opt = self .beacon_chain - .with_head(|head| { - Ok::<_, BeaconChainError>( - head.beacon_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .map(|indices| indices.len()) - .ok() - .or_else(|| { - // if active validator cached was not build we count the - // active validators - self.beacon_chain.epoch().ok().map(|current_epoch| { - head.beacon_state - .validators() - .iter() - .filter(|validator| validator.is_active_at(current_epoch)) - .count() - }) - }), - ) - }) - .unwrap_or(None) - { + .canonical_head + .cached_head() + .active_validator_count(); + if let Some(active_validators) = active_validators_opt { if self .libp2p .swarm @@ -742,6 +725,14 @@ impl NetworkService { "active_validators" => active_validators ); } + } else { + // This scenario will only happen if the caches on the cached canonical head aren't + // built. That should never be the case. + error!( + self.log, + "Active validator count unavailable"; + "info" => "please report this bug" + ); } } } diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index ade490e00e..865f8ee933 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,4 +1,5 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use types::{EthSpec, Hash256}; use lighthouse_network::rpc::StatusMessage; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -6,20 +7,33 @@ use lighthouse_network::rpc::StatusMessage; /// NOTE: The purpose of this is simply to obtain a `StatusMessage` from the `BeaconChain` without /// polluting/coupling the type with RPC concepts. pub trait ToStatusMessage { - fn status_message(&self) -> Result; + fn status_message(&self) -> StatusMessage; } impl ToStatusMessage for BeaconChain { - fn status_message(&self) -> Result { - let head_info = self.head_info()?; - let fork_digest = self.enr_fork_id().fork_digest; - - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) + fn status_message(&self) -> StatusMessage { + status_message(self) + } +} + +/// Build a `StatusMessage` representing the state of the given `beacon_chain`. +pub(crate) fn status_message(beacon_chain: &BeaconChain) -> StatusMessage { + let fork_digest = beacon_chain.enr_fork_id().fork_digest; + let cached_head = beacon_chain.canonical_head.cached_head(); + let mut finalized_checkpoint = cached_head.finalized_checkpoint(); + + // Alias the genesis checkpoint root to `0x00`. + let spec = &beacon_chain.spec; + let genesis_epoch = spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch()); + if finalized_checkpoint.epoch == genesis_epoch { + finalized_checkpoint.root = Hash256::zero(); + } + + StatusMessage { + fork_digest, + finalized_root: finalized_checkpoint.root, + finalized_epoch: finalized_checkpoint.epoch, + head_root: cached_head.head_block_root(), + head_slot: cached_head.head_slot(), } } diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 581f6b3270..778eb63263 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime}; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; +use task_executor::test_utils::TestRuntime; use types::{ CommitteeIndex, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, @@ -32,6 +33,7 @@ type TestBeaconChainType = Witness< pub struct TestBeaconChain { chain: Arc>, + _test_runtime: TestRuntime, } impl TestBeaconChain { @@ -46,11 +48,14 @@ impl TestBeaconChain { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let test_runtime = TestRuntime::default(); + let chain = Arc::new( BeaconChainBuilder::new(MainnetEthSpec) .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) + .task_executor(test_runtime.task_executor.clone()) .genesis_state( interop_genesis_state::( &keypairs, @@ -74,7 +79,10 @@ impl TestBeaconChain { .build() .expect("should build"), ); - Self { chain } + Self { + chain, + _test_runtime: test_runtime, + } } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index d6bb802a21..7ff640065a 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -53,7 +53,7 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; let mut hasher = DefaultHasher::new(); @@ -392,7 +392,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 2770171be9..99df8e4a66 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -7,6 +7,7 @@ use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; use slog::{crit, debug, error, trace, warn, Logger}; use smallvec::SmallVec; +use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; use tokio::sync::mpsc; @@ -105,7 +106,7 @@ impl BlockLookups { pub fn search_parent( &mut self, - block: Box>, + block: Arc>, peer_id: PeerId, cx: &mut SyncNetworkContext, ) { @@ -129,7 +130,7 @@ impl BlockLookups { return; } - let parent_lookup = ParentLookup::new(*block, peer_id); + let parent_lookup = ParentLookup::new(block, peer_id); self.request_parent(parent_lookup, cx); } @@ -139,7 +140,7 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { @@ -203,7 +204,7 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { @@ -496,7 +497,7 @@ impl BlockLookups { Err(BlockError::ParentUnknown(block)) => { // need to keep looking for parents // add the block back to the queue and continue the search - parent_lookup.add_block(*block); + parent_lookup.add_block(block); self.request_parent(parent_lookup, cx); } Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => { @@ -618,7 +619,7 @@ impl BlockLookups { fn send_block_for_processing( &mut self, - block: Box>, + block: Arc>, duration: Duration, process_type: BlockProcessType, ) -> Result<(), ()> { diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index a9a3c34bc0..62503353ad 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,4 +1,5 @@ use lighthouse_network::PeerId; +use std::sync::Arc; use store::{EthSpec, Hash256, SignedBeaconBlock}; use strum::IntoStaticStr; @@ -21,7 +22,7 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, /// Request of the last parent. current_parent_request: SingleBlockRequest, /// Id of the last parent request. @@ -48,10 +49,10 @@ impl ParentLookup { pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { self.downloaded_blocks .iter() - .any(|d_block| d_block == block) + .any(|d_block| d_block.as_ref() == block) } - pub fn new(block: SignedBeaconBlock, peer_id: PeerId) -> Self { + pub fn new(block: Arc>, peer_id: PeerId) -> Self { let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); Self { @@ -86,7 +87,7 @@ impl ParentLookup { self.current_parent_request.check_peer_disconnected(peer_id) } - pub fn add_block(&mut self, block: SignedBeaconBlock) { + pub fn add_block(&mut self, block: Arc>) { let next_parent = block.parent_root(); self.downloaded_blocks.push(block); self.current_parent_request.hash = next_parent; @@ -108,7 +109,7 @@ impl ParentLookup { self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec> { + pub fn chain_blocks(&mut self) -> Vec>> { std::mem::take(&mut self.downloaded_blocks) } @@ -116,9 +117,9 @@ impl ParentLookup { /// the processing result of the block. pub fn verify_block( &mut self, - block: Option>>, + block: Option>>, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>>, VerifyError> { + ) -> Result>>, VerifyError> { let block = self.current_parent_request.verify_block(block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 347a4ae437..debf3de8db 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,4 +1,5 @@ use std::collections::HashSet; +use std::sync::Arc; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; @@ -82,8 +83,8 @@ impl SingleBlockRequest { /// Returns the block for processing if the response is what we expected. pub fn verify_block( &mut self, - block: Option>>, - ) -> Result>>, VerifyError> { + block: Option>>, + ) -> Result>>, VerifyError> { match self.state { State::AwaitingDownload => { self.register_failure(); @@ -195,7 +196,7 @@ mod tests { let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); sl.request_block().unwrap(); - sl.verify_block(Some(Box::new(block))).unwrap().unwrap(); + sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); } #[test] diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index dde7d49953..e9c8ac8ca7 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -158,7 +158,7 @@ fn test_single_block_lookup_happy_path() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -204,7 +204,7 @@ fn test_single_block_lookup_wrong_response() { // Peer sends something else. It should be penalized. let bad_block = rig.rand_block(); - bl.single_block_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); rig.expect_block_request(); // should be retried @@ -243,7 +243,7 @@ fn test_single_block_lookup_becomes_parent_request() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -252,7 +252,7 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed(id, Err(BlockError::ParentUnknown(Box::new(block))), &mut cx); + bl.single_block_processed(id, Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx); assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); @@ -269,11 +269,11 @@ fn test_parent_lookup_happy_path() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id = rig.expect_parent_request(); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); rig.expect_empty_network(); @@ -294,12 +294,12 @@ fn test_parent_lookup_wrong_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends the wrong block, peer should be penalized and the block re-requested. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id1, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id1, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); let id2 = rig.expect_parent_request(); @@ -308,7 +308,7 @@ fn test_parent_lookup_wrong_response() { rig.expect_empty_network(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -328,7 +328,7 @@ fn test_parent_lookup_empty_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends an empty response, peer should be penalized and the block re-requested. @@ -337,7 +337,7 @@ fn test_parent_lookup_empty_response() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -357,7 +357,7 @@ fn test_parent_lookup_rpc_failure() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // The request fails. It should be tried again. @@ -365,7 +365,7 @@ fn test_parent_lookup_rpc_failure() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -385,7 +385,7 @@ fn test_parent_lookup_too_many_attempts() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE + 1 { let id = rig.expect_parent_request(); match i % 2 { @@ -397,7 +397,7 @@ fn test_parent_lookup_too_many_attempts() { _ => { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); } } @@ -427,12 +427,12 @@ fn test_parent_lookup_too_deep() { let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); for block in blocks.into_iter().rev() { let id = rig.expect_parent_request(); // the block - bl.parent_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); // the stream termination bl.parent_lookup_response(id, peer_id, None, D, &mut cx); // the processing request @@ -440,7 +440,7 @@ fn test_parent_lookup_too_deep() { // the processing result bl.parent_block_processed( chain_hash, - Err(BlockError::ParentUnknown(Box::new(block))), + Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx, ) } @@ -454,7 +454,7 @@ fn test_parent_lookup_disconnection() { let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let peer_id = PeerId::random(); let trigger_block = rig.rand_block(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_queue.is_empty()); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 311fbf67c4..3e44256655 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -88,12 +88,12 @@ pub enum SyncMessage { RpcBlock { request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, }, /// A block with an unknown parent has been received. - UnknownBlock(PeerId, Box>), + UnknownBlock(PeerId, Arc>), /// A peer has sent an object that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. @@ -229,17 +229,12 @@ impl SyncManager { /// ours that we consider it fully sync'd with respect to our current chain. fn add_peer(&mut self, peer_id: PeerId, remote: SyncInfo) { // ensure the beacon chain still exists - let local = match self.chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; let sync_type = remote_sync_type(&local, &remote, &self.chain); @@ -379,7 +374,7 @@ impl SyncManager { // advanced and will produce a head chain on re-status. Otherwise it will shift // to being synced let mut sync_state = { - let head = self.chain.best_slot().unwrap_or_else(|_| Slot::new(0)); + let head = self.chain.best_slot(); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); let peers = self.network_globals.peers.read(); @@ -482,11 +477,7 @@ impl SyncManager { SyncMessage::UnknownBlock(peer_id, block) => { // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore if !self.network_globals.sync_state.read().is_synced() { - let head_slot = self - .chain - .head_info() - .map(|info| info.slot) - .unwrap_or_else(|_| Slot::from(0u64)); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); let unknown_block_slot = block.slot(); // if the block is far in the future, ignore it. If its within the slot tolerance of @@ -571,7 +562,7 @@ impl SyncManager { &mut self, request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, ) { match request_id { @@ -599,7 +590,7 @@ impl SyncManager { batch_id, &peer_id, id, - beacon_block.map(|b| *b), + beacon_block, ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -621,7 +612,7 @@ impl SyncManager { chain_id, batch_id, id, - beacon_block.map(|b| *b), + beacon_block, ); self.update_sync_state(); } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 96bdc533f8..ffbd1a64da 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -65,27 +65,26 @@ impl SyncNetworkContext { chain: &C, peers: impl Iterator, ) { - if let Ok(status_message) = chain.status_message() { - for peer_id in peers { - debug!( - self.log, - "Sending Status Request"; - "peer" => %peer_id, - "fork_digest" => ?status_message.fork_digest, - "finalized_root" => ?status_message.finalized_root, - "finalized_epoch" => ?status_message.finalized_epoch, - "head_root" => %status_message.head_root, - "head_slot" => %status_message.head_slot, - ); + let status_message = chain.status_message(); + for peer_id in peers { + debug!( + self.log, + "Sending Status Request"; + "peer" => %peer_id, + "fork_digest" => ?status_message.fork_digest, + "finalized_root" => ?status_message.finalized_root, + "finalized_epoch" => ?status_message.finalized_epoch, + "head_root" => %status_message.head_root, + "head_slot" => %status_message.head_slot, + ); - let request = Request::Status(status_message.clone()); - let request_id = RequestId::Router; - let _ = self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - }); - } + let request = Request::Status(status_message.clone()); + let request_id = RequestId::Router; + let _ = self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + }); } } diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs index ed3f07763c..c01366f1be 100644 --- a/beacon_node/network/src/sync/peer_sync_info.rs +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -59,7 +59,7 @@ pub fn remote_sync_type( if remote.head_slot < near_range_start { PeerSyncType::Behind } else if remote.head_slot > near_range_end - && !chain.fork_choice.read().contains_block(&remote.head_root) + && !chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer has a head ahead enough of ours and we have no knowledge of their best // block. @@ -74,7 +74,7 @@ pub fn remote_sync_type( if (local.finalized_epoch + 1 == remote.finalized_epoch && near_range_start <= remote.head_slot && remote.head_slot <= near_range_end) - || chain.fork_choice.read().contains_block(&remote.head_root) + || chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer is near enough to us to be considered synced, or // we have already synced up to this peer's head diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 447f0bd11c..c642d81db8 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -4,6 +4,7 @@ use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; +use std::sync::Arc; use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; /// The number of times to retry a batch before it is considered failed. @@ -46,7 +47,7 @@ pub trait BatchConfig { /// Note that simpler hashing functions considered in the past (hash of first block, hash of last /// block, number of received blocks) are not good enough to differentiate attempts. For this /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64; + fn batch_attempt_hash(blocks: &[Arc>]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -58,7 +59,7 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); blocks.hash(&mut hasher); hasher.finish() @@ -100,9 +101,9 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>, Id), + Downloading(PeerId, Vec>>, Id), /// The batch has been completely downloaded and is ready for processing. - AwaitingProcessing(PeerId, Vec>), + AwaitingProcessing(PeerId, Vec>>), /// The batch is being processed. Processing(Attempt), /// The batch was successfully processed and is waiting to be validated. @@ -211,7 +212,7 @@ impl BatchInfo { } /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: SignedBeaconBlock) -> Result<(), WrongState> { + pub fn add_block(&mut self, block: Arc>) -> Result<(), WrongState> { match self.state.poison() { BatchState::Downloading(peer, mut blocks, req_id) => { blocks.push(block); @@ -337,7 +338,7 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result>, WrongState> { + pub fn start_processing(&mut self) -> Result>>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); @@ -436,7 +437,10 @@ pub struct Attempt { } impl Attempt { - fn new(peer_id: PeerId, blocks: &[SignedBeaconBlock]) -> Self { + fn new( + peer_id: PeerId, + blocks: &[Arc>], + ) -> Self { let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs index 5f8033bc51..df49543a6b 100644 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ b/beacon_node/network/src/sync/range_sync/block_storage.rs @@ -8,6 +8,6 @@ pub trait BlockStorage { impl BlockStorage for BeaconChain { fn is_block_known(&self, block_root: &Hash256) -> bool { - self.fork_choice.read().contains_block(block_root) + self.block_is_known_to_fork_choice(block_root) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 0f5d63ea6d..ef5ba23e66 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -9,6 +9,7 @@ use rand::seq::SliceRandom; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; +use std::sync::Arc; use tokio::sync::mpsc::Sender; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -216,7 +217,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9953df81d0..f08f8eb82a 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -53,7 +53,7 @@ use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use lru_cache::LRUTimeCache; -use slog::{crit, debug, error, trace, warn}; +use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; @@ -221,7 +221,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { @@ -365,17 +365,12 @@ where network.status_peers(self.beacon_chain.as_ref(), chain.peers()); - let local = match self.beacon_chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.beacon_chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; // update the state of the collection @@ -447,8 +442,8 @@ mod tests { } impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> Result { - Ok(self.status.read().clone()) + fn status_message(&self) -> StatusMessage { + self.status.read().clone() } } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 84d23a4562..6b8b8eb145 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -21,3 +21,4 @@ store = { path = "../store" } [dev-dependencies] beacon_chain = { path = "../beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 70eb31cd0f..771dca12f6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -710,7 +710,7 @@ mod release_tests { } /// Test state for sync contribution-related tests. - fn sync_contribution_test_state( + async fn sync_contribution_test_state( num_committees: usize, ) -> (BeaconChainHarness>, ChainSpec) { let mut spec = E::default_spec(); @@ -722,12 +722,14 @@ mod release_tests { let harness = get_harness::(num_validators, Some(spec.clone())); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1)], - (0..num_validators).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1)], + (0..num_validators).collect::>().as_slice(), + ) + .await; (harness, spec) } @@ -1454,9 +1456,9 @@ mod release_tests { } /// End-to-end test of basic sync contribution handling. - #[test] - fn sync_contribution_aggregation_insert_get_prune() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_aggregation_insert_get_prune() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1514,9 +1516,9 @@ mod release_tests { } /// Adding a sync contribution already in the pool should not increase the size of the pool. - #[test] - fn sync_contribution_duplicate() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_duplicate() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1551,9 +1553,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with more bits set should increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_more_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_more_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1631,9 +1633,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with fewer bits set should not increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_fewer_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_fewer_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index fe66a176b6..e66cee6fde 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -669,7 +669,11 @@ impl, Cold: ItemStore> HotColdDB for op in batch { match op { StoreOp::PutBlock(block_root, block) => { - self.block_as_kv_store_ops(&block_root, *block, &mut key_value_batch)?; + self.block_as_kv_store_ops( + &block_root, + block.as_ref().clone(), + &mut key_value_batch, + )?; } StoreOp::PutState(state_root, state) => { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 613c2e416c..364bda2cc4 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -39,6 +39,7 @@ pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; +use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; @@ -152,7 +153,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Reified key-value storage operation. Helps in modifying the storage atomically. /// See also https://github.com/sigp/lighthouse/issues/692 pub enum StoreOp<'a, E: EthSpec> { - PutBlock(Hash256, Box>), + PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index bf2acaf5bb..944846c863 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,7 +3,7 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{debug, info, warn}; +use slog::{info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::time::sleep; @@ -13,11 +13,8 @@ pub fn spawn_timer( executor: task_executor::TaskExecutor, beacon_chain: Arc>, ) -> Result<(), &'static str> { - let log = executor.log(); - let per_slot_executor = executor.clone(); - + let log = executor.log().clone(); let timer_future = async move { - let log = per_slot_executor.log().clone(); loop { let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { Some(duration) => duration, @@ -28,31 +25,12 @@ pub fn spawn_timer( }; sleep(duration_to_next_slot).await; - - let chain = beacon_chain.clone(); - if let Some(handle) = per_slot_executor - .spawn_blocking_handle(move || chain.per_slot_task(), "timer_per_slot_task") - { - if let Err(e) = handle.await { - warn!( - log, - "Per slot task failed"; - "info" => ?e - ); - } - } else { - debug!( - log, - "Per slot task timer stopped"; - "info" => "shutting down" - ); - break; - } + beacon_chain.per_slot_task().await; } }; executor.spawn(timer_future, "timer"); - info!(log, "Timer service started"); + info!(executor.log(), "Timer service started"); Ok(()) } diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index f344dc4735..08bb565870 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index dd525bea50..6bf4cc8e08 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -7,6 +7,8 @@ use slog::{crit, debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; +pub use tokio::task::JoinHandle; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -312,6 +314,61 @@ impl TaskExecutor { Some(future) } + /// Block the current (non-async) thread on the completion of some future. + /// + /// ## Warning + /// + /// This method is "dangerous" since calling it from an async thread will result in a panic! Any + /// use of this outside of testing should be very deeply considered as Lighthouse has been + /// burned by this function in the past. + /// + /// Determining what is an "async thread" is rather challenging; just because a function isn't + /// marked as `async` doesn't mean it's not being called from an `async` function or there isn't + /// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to + /// @paulhauner if you plan to use this function in production. He has put metrics in here to + /// track any use of it, so don't think you can pull a sneaky one on him. + pub fn block_on_dangerous( + &self, + future: F, + name: &'static str, + ) -> Option { + let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); + metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + let log = self.log.clone(); + let handle = self.handle()?; + let exit = self.exit.clone(); + + debug!( + log, + "Starting block_on task"; + "name" => name + ); + + handle.block_on(async { + let output = tokio::select! { + output = future => { + debug!( + log, + "Completed block_on task"; + "name" => name + ); + Some(output) + }, + _ = exit => { + debug!( + log, + "Cancelled block_on task"; + "name" => name, + ); + None + } + }; + metrics::dec_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + drop(timer); + output + }) + } + /// Returns a `Handle` to the current runtime. pub fn handle(&self) -> Option { self.handle_provider.handle() diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index ead5925b6e..6ecea86d65 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -18,6 +18,16 @@ lazy_static! { "Time taken by blocking tasks", &["blocking_task_hist"] ); + pub static ref BLOCK_ON_TASKS_COUNT: Result = try_create_int_gauge_vec( + "block_on_tasks_count", + "Total number of block_on_dangerous tasks spawned", + &["name"] + ); + pub static ref BLOCK_ON_TASKS_HISTOGRAM: Result = try_create_histogram_vec( + "block_on_tasks_histogram", + "Time taken by block_on_dangerous tasks", + &["name"] + ); pub static ref TASKS_HISTOGRAM: Result = try_create_histogram_vec( "async_tasks_time_histogram", "Time taken by async tasks", diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 77603d09e6..429ab1b8c5 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -15,3 +15,4 @@ eth2_ssz_derive = "0.3.0" [dev-dependencies] beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 49510e7326..7390ce7f94 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -5,7 +5,7 @@ use std::cmp::Ordering; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, + consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; @@ -248,6 +248,7 @@ fn dequeue_attestations( /// Equivalent to the `is_from_block` `bool` in: /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation +#[derive(Clone, Copy)] pub enum AttestationFromBlock { True, False, @@ -261,6 +262,13 @@ pub struct ForkchoiceUpdateParameters { pub finalized_hash: Option, } +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct ForkChoiceView { + pub head_block_root: Hash256, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, +} + /// Provides an implementation of "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#ethereum-20-phase-0----beacon-chain-fork-choice @@ -279,7 +287,9 @@ pub struct ForkChoice { /// Attestations that arrived at the current slot and must be queued for later processing. queued_attestations: Vec, /// Stores a cache of the values required to be sent to the execution layer. - forkchoice_update_parameters: Option, + forkchoice_update_parameters: ForkchoiceUpdateParameters, + /// The most recent result of running `Self::get_head`. + head_block_root: Hash256, _phantom: PhantomData, } @@ -306,6 +316,8 @@ where anchor_block_root: Hash256, anchor_block: &SignedBeaconBlock, anchor_state: &BeaconState, + current_slot: Option, + spec: &ChainSpec, ) -> Result> { // Sanity check: the anchor must lie on an epoch boundary. if anchor_block.slot() % E::slots_per_epoch() != 0 { @@ -340,6 +352,9 @@ where }, ); + // If the current slot is not provided, use the value that was last provided to the store. + let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); + let proto_array = ProtoArrayForkChoice::new( finalized_block_slot, finalized_block_state_root, @@ -350,15 +365,28 @@ where execution_status, )?; - Ok(Self { + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: vec![], - forkchoice_update_parameters: None, + // This will be updated during the next call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // This will be updated during the next call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) + }; + + // Ensure that `fork_choice.head_block_root` is updated. + fork_choice.get_head(current_slot, spec)?; + + Ok(fork_choice) } + /* /// Instantiates `Self` from some existing components. /// /// This is useful if the existing components have been loaded from disk after a process @@ -376,13 +404,13 @@ where _phantom: PhantomData, } } + */ /// Returns cached information that can be used to issue a `forkchoiceUpdated` message to an /// execution engine. /// - /// These values are updated each time `Self::get_head` is called. May return `None` if - /// `Self::get_head` has not yet been called. - pub fn get_forkchoice_update_parameters(&self) -> Option { + /// These values are updated each time `Self::get_head` is called. + pub fn get_forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { self.forkchoice_update_parameters } @@ -455,6 +483,8 @@ where spec, )?; + self.head_block_root = head_root; + // Cache some values for the next forkchoiceUpdate call to the execution layer. let head_hash = self .get_block(&head_root) @@ -463,15 +493,35 @@ where let finalized_hash = self .get_block(&finalized_root) .and_then(|b| b.execution_status.block_hash()); - self.forkchoice_update_parameters = Some(ForkchoiceUpdateParameters { + self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, head_hash, finalized_hash, - }); + }; Ok(head_root) } + /// Return information about: + /// + /// - The LMD head of the chain. + /// - The FFG checkpoints. + /// + /// The information is "cached" since the last call to `Self::get_head`. + /// + /// ## Notes + /// + /// The finalized/justified checkpoints are determined from the fork choice store. Therefore, + /// it's possible that the state corresponding to `get_state(get_block(head_block_root))` will + /// have *differing* finalized and justified information. + pub fn cached_fork_choice_view(&self) -> ForkChoiceView { + ForkChoiceView { + head_block_root: self.head_block_root, + justified_checkpoint: self.justified_checkpoint(), + finalized_checkpoint: self.finalized_checkpoint(), + } + } + /// Returns `true` if the given `store` should be updated to set /// `state.current_justified_checkpoint` its `justified_checkpoint`. /// @@ -566,7 +616,7 @@ where pub fn on_block>( &mut self, current_slot: Slot, - block: &BeaconBlock, + block: BeaconBlockRef, block_root: Hash256, block_delay: Duration, state: &BeaconState, @@ -966,6 +1016,11 @@ where } } + /// Returns the weight for the given block root. + pub fn get_block_weight(&self, block_root: &Hash256) -> Option { + self.proto_array.get_weight(block_root) + } + /// Returns the `ProtoBlock` for the justified checkpoint. /// /// ## Notes @@ -995,6 +1050,39 @@ where .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } + /// Returns `Ok(true)` if `block_root` has been imported optimistically. That is, the + /// execution payload has not been verified. + /// + /// Returns `Ok(false)` if `block_root`'s execution payload has been verfied, if it is a + /// pre-Bellatrix block or if it is before the PoW terminal block. + /// + /// In the case where the block could not be found in fork-choice, it returns the + /// `execution_status` of the current finalized block. + /// + /// This function assumes the `block_root` exists. + pub fn is_optimistic_block(&self, block_root: &Hash256) -> Result> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic()) + } else { + Ok(self.get_finalized_block()?.execution_status.is_optimistic()) + } + } + + /// The same as `is_optimistic_block` but does not fallback to `self.get_finalized_block` + /// when the block cannot be found. + /// + /// Intended to be used when checking if the head has been imported optimistically. + pub fn is_optimistic_block_no_fallback( + &self, + block_root: &Hash256, + ) -> Result> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic()) + } else { + Err(Error::MissingProtoArrayBlock(*block_root)) + } + } + /// Returns `Ok(false)` if a block is not viable to be imported optimistically. /// /// ## Notes @@ -1109,17 +1197,31 @@ where pub fn from_persisted( persisted: PersistedForkChoice, fc_store: T, + spec: &ChainSpec, ) -> Result> { let proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) .map_err(Error::InvalidProtoArrayBytes)?; - Ok(Self { + let current_slot = fc_store.get_current_slot(); + + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: persisted.queued_attestations, - forkchoice_update_parameters: None, + // Will be updated in the following call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // Will be updated in the following call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) + }; + + fork_choice.get_head(current_slot, spec)?; + + Ok(fork_choice) } /// Takes a snapshot of `Self` and stores it in `PersistedForkChoice`, allowing this struct to diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 7826007516..6df0cbc2c2 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,4 +1,4 @@ -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; +use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -33,7 +33,7 @@ pub trait ForkChoiceStore: Sized { /// choice. Allows the implementer to performing caching or other housekeeping duties. fn on_verified_block>( &mut self, - block: &BeaconBlock, + block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error>; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 157306dd5f..6f79b488dd 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,8 +2,9 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - AttestationFromBlock, Error, ForkChoice, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation, + AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, + InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, + QueuedAttestation, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 3f8a2ac6b6..2d10319cf0 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -16,9 +16,8 @@ use fork_choice::{ }; use store::MemoryStore; use types::{ - test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState, - ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, Slot, - SubnetId, + test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, + Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; @@ -74,7 +73,14 @@ impl ForkChoiceTest { where T: Fn(&BeaconForkChoiceStore, MemoryStore>) -> U, { - func(&self.harness.chain.fork_choice.read().fc_store()) + func( + &self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .fc_store(), + ) } /// Assert the epochs match. @@ -109,15 +115,7 @@ impl ForkChoiceTest { /// Assert the given slot is greater than the head slot. pub fn assert_finalized_epoch_is_less_than(self, epoch: Epoch) -> Self { - assert!( - self.harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .epoch - < epoch - ); + assert!(self.harness.finalized_checkpoint().epoch < epoch); self } @@ -150,11 +148,17 @@ impl ForkChoiceTest { { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .update_time(self.harness.chain.slot().unwrap()) .unwrap(); - func(self.harness.chain.fork_choice.read().queued_attestations()); + func( + self.harness + .chain + .canonical_head + .fork_choice_read_lock() + .queued_attestations(), + ); self } @@ -173,7 +177,7 @@ impl ForkChoiceTest { } /// Build the chain whilst `predicate` returns `true` and `process_block_result` does not error. - pub fn apply_blocks_while(self, mut predicate: F) -> Result + pub async fn apply_blocks_while(self, mut predicate: F) -> Result where F: FnMut(BeaconBlockRef<'_, E>, &BeaconState) -> bool, { @@ -182,12 +186,12 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); - let (block, state_) = self.harness.make_block(state, slot); + let (block, state_) = self.harness.make_block(state, slot).await; state = state_; if !predicate(block.message(), &state) { break; } - if let Ok(block_hash) = self.harness.process_block_result(block.clone()) { + if let Ok(block_hash) = self.harness.process_block_result(block.clone()).await { self.harness.attest_block( &state, block.state_root(), @@ -205,25 +209,29 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (with attestations). - pub fn apply_blocks(self, count: usize) -> Self { + pub async fn apply_blocks(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; self } /// Apply `count` blocks to the chain (without attestations). - pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; self } @@ -256,9 +264,9 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. - pub fn apply_block_directly_to_fork_choice(self, mut func: F) -> Self + pub async fn apply_block_directly_to_fork_choice(self, mut func: F) -> Self where - F: FnMut(&mut BeaconBlock, &mut BeaconState), + F: FnMut(&mut SignedBeaconBlock, &mut BeaconState), { let state = self .harness @@ -269,18 +277,17 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -293,13 +300,13 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts that an error occurred and allows inspecting it via `comparison_func`. - pub fn apply_invalid_block_directly_to_fork_choice( + pub async fn apply_invalid_block_directly_to_fork_choice( self, mut mutation_func: F, mut comparison_func: G, ) -> Self where - F: FnMut(&mut BeaconBlock, &mut BeaconState), + F: FnMut(&mut SignedBeaconBlock, &mut BeaconState), G: FnMut(ForkChoiceError), { let state = self @@ -311,19 +318,18 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - mutation_func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + mutation_func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); let err = self .harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -339,7 +345,7 @@ impl ForkChoiceTest { /// database. fn check_justified_balances(&self) { let harness = &self.harness; - let fc = self.harness.chain.fork_choice.read(); + let fc = self.harness.chain.canonical_head.fork_choice_read_lock(); let state_root = harness .chain @@ -377,7 +383,7 @@ impl ForkChoiceTest { /// Returns an attestation that is valid for some slot in the given `chain`. /// /// Also returns some info about who created it. - fn apply_attestation_to_chain( + async fn apply_attestation_to_chain( self, delay: MutationDelay, mut mutation_func: F, @@ -387,7 +393,7 @@ impl ForkChoiceTest { F: FnMut(&mut IndexedAttestation, &BeaconChain>), G: FnMut(Result<(), BeaconChainError>), { - let head = self.harness.chain.head().expect("should get head"); + let head = self.harness.chain.head_snapshot(); let current_slot = self.harness.chain.slot().expect("should get slot"); let mut attestation = self @@ -438,11 +444,13 @@ impl ForkChoiceTest { if let MutationDelay::Blocks(slots) = delay { self.harness.advance_slot(); - self.harness.extend_chain( - slots, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; } mutation_func( @@ -464,17 +472,9 @@ impl ForkChoiceTest { pub fn check_finalized_block_is_accessible(self) -> Self { self.harness .chain - .fork_choice - .write() - .get_block( - &self - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .root, - ) + .canonical_head + .fork_choice_read_lock() + .get_block(&self.harness.finalized_checkpoint().root) .unwrap(); self @@ -488,7 +488,7 @@ fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); - let fork_choice = tester.harness.chain.fork_choice.read(); + let fork_choice = tester.harness.chain.canonical_head.fork_choice_read_lock(); let justified_checkpoint = fork_choice.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 0); @@ -503,44 +503,50 @@ fn justified_and_finalized_blocks() { /// - The new justified checkpoint descends from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -#[test] -fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis -#[test] -fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) + .await .unwrap() .move_outside_safe_to_update() .assert_justified_epoch(2) .assert_best_justified_epoch(2) .apply_blocks(1) + .await .assert_justified_epoch(3); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is the first justification since genesis -#[test] -fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { +#[tokio::test] +async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_to_next_unsafe_period() .assert_justified_epoch(0) .assert_best_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(2); } @@ -548,12 +554,14 @@ fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_inside_safe_to_update() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -567,6 +575,7 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } @@ -574,12 +583,14 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -593,6 +604,7 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(3); } @@ -600,12 +612,14 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -619,17 +633,20 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_fina .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } /// Check that the balances are obtained correctly. -#[test] -fn justified_balances() { +#[tokio::test] +async fn justified_balances() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_justified_epoch(2) .check_justified_balances() } @@ -648,15 +665,16 @@ macro_rules! assert_invalid_block { /// Specification v0.12.1 /// /// assert block.parent_root in store.block_states -#[test] -fn invalid_block_unknown_parent() { +#[tokio::test] +async fn invalid_block_unknown_parent() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.parent_root_mut() = junk; + *block.message_mut().parent_root_mut() = junk; }, |err| { assert_invalid_block!( @@ -665,36 +683,42 @@ fn invalid_block_unknown_parent() { if parent == junk ) }, - ); + ) + .await; } /// Specification v0.12.1 /// /// assert get_current_slot(store) >= block.slot -#[test] -fn invalid_block_future_slot() { +#[tokio::test] +async fn invalid_block_future_slot() { ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() += 1; + *block.message_mut().slot_mut() += 1; }, |err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }), - ); + ) + .await; } /// Specification v0.12.1 /// /// assert block.slot > finalized_slot -#[test] -fn invalid_block_finalized_slot() { +#[tokio::test] +async fn invalid_block_finalized_slot() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; + *block.message_mut().slot_mut() = + Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; }, |err| { assert_invalid_block!( @@ -703,7 +727,8 @@ fn invalid_block_finalized_slot() { if finalized_slot == Epoch::new(2).start_slot(E::slots_per_epoch()) ) }, - ); + ) + .await; } /// Specification v0.12.1 @@ -714,18 +739,20 @@ fn invalid_block_finalized_slot() { /// Note: we technically don't do this exact check, but an equivalent check. Reference: /// /// https://github.com/ethereum/eth2.0-specs/pull/1884 -#[test] -fn invalid_block_finalized_descendant() { +#[tokio::test] +async fn invalid_block_finalized_descendant() { let invalid_ancestor = Mutex::new(Hash256::zero()); ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .apply_invalid_block_directly_to_fork_choice( |block, state| { - *block.parent_root_mut() = *state + *block.message_mut().parent_root_mut() = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); *invalid_ancestor.lock().unwrap() = block.parent_root(); @@ -737,7 +764,8 @@ fn invalid_block_finalized_descendant() { if block_ancestor == Some(*invalid_ancestor.lock().unwrap()) ) }, - ); + ) + .await; } macro_rules! assert_invalid_attestation { @@ -754,23 +782,26 @@ macro_rules! assert_invalid_attestation { } /// Ensure we can process a valid attestation. -#[test] -fn valid_attestation() { +#[tokio::test] +async fn valid_attestation() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), - ); + ) + .await; } /// This test is not in the specification, however we reject an attestation with an empty /// aggregation bitfield since it has no purpose beyond wasting our time. -#[test] -fn invalid_attestation_empty_bitfield() { +#[tokio::test] +async fn invalid_attestation_empty_bitfield() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -779,7 +810,8 @@ fn invalid_attestation_empty_bitfield() { |result| { assert_invalid_attestation!(result, InvalidAttestation::EmptyAggregationBitfield) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -787,10 +819,11 @@ fn invalid_attestation_empty_bitfield() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch after current epoch) -#[test] -fn invalid_attestation_future_epoch() { +#[tokio::test] +async fn invalid_attestation_future_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -803,7 +836,8 @@ fn invalid_attestation_future_epoch() { if attestation_epoch == Epoch::new(2) && current_epoch == Epoch::new(0) ) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -811,10 +845,11 @@ fn invalid_attestation_future_epoch() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch prior to previous epoch) -#[test] -fn invalid_attestation_past_epoch() { +#[tokio::test] +async fn invalid_attestation_past_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize * 3 + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -827,16 +862,18 @@ fn invalid_attestation_past_epoch() { if attestation_epoch == Epoch::new(0) && current_epoch == Epoch::new(3) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.epoch == compute_epoch_at_slot(attestation.data.slot) -#[test] -fn invalid_attestation_target_epoch() { +#[tokio::test] +async fn invalid_attestation_target_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -849,18 +886,20 @@ fn invalid_attestation_target_epoch() { if target == Epoch::new(1) && slot == Slot::new(1) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root in store.blocks -#[test] -fn invalid_attestation_unknown_target_root() { +#[tokio::test] +async fn invalid_attestation_unknown_target_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -873,18 +912,20 @@ fn invalid_attestation_unknown_target_root() { if root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert attestation.data.beacon_block_root in store.blocks -#[test] -fn invalid_attestation_unknown_beacon_block_root() { +#[tokio::test] +async fn invalid_attestation_unknown_beacon_block_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -897,16 +938,18 @@ fn invalid_attestation_unknown_beacon_block_root() { if beacon_block_root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot -#[test] -fn invalid_attestation_future_block() { +#[tokio::test] +async fn invalid_attestation_future_block() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::Blocks(1), |attestation, chain| { @@ -923,19 +966,21 @@ fn invalid_attestation_future_block() { if block == 2 && attestation == 1 ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) -#[test] -fn invalid_attestation_inconsistent_ffg_vote() { +#[tokio::test] +async fn invalid_attestation_inconsistent_ffg_vote() { let local_opt = Mutex::new(None); let attestation_opt = Mutex::new(None); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, chain| { @@ -962,22 +1007,25 @@ fn invalid_attestation_inconsistent_ffg_vote() { && local == local_opt.lock().unwrap().unwrap() ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert get_current_slot(store) >= attestation.data.slot + 1 -#[test] -fn invalid_attestation_delayed_slot() { +#[tokio::test] +async fn invalid_attestation_delayed_slot() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)) .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), ) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 1)) .skip_slot() .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)); @@ -985,10 +1033,11 @@ fn invalid_attestation_delayed_slot() { /// Tests that the correct target root is used when the attested-to block is in a prior epoch to /// the attestation. -#[test] -fn valid_attestation_skip_across_epoch() { +#[tokio::test] +async fn valid_attestation_skip_across_epoch() { ForkChoiceTest::new() .apply_blocks(E::slots_per_epoch() as usize - 1) + .await .skip_slots(2) .apply_attestation_to_chain( MutationDelay::NoDelay, @@ -999,15 +1048,18 @@ fn valid_attestation_skip_across_epoch() { ) }, |result| result.unwrap(), - ); + ) + .await; } -#[test] -fn can_read_finalized_block() { +#[tokio::test] +async fn can_read_finalized_block() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .check_finalized_block_is_accessible(); } @@ -1025,8 +1077,8 @@ fn weak_subjectivity_fail_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config); } -#[test] -fn weak_subjectivity_pass_on_startup() { +#[tokio::test] +async fn weak_subjectivity_pass_on_startup() { let epoch = Epoch::new(0); let root = Hash256::zero(); @@ -1037,23 +1089,21 @@ fn weak_subjectivity_pass_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config) .apply_blocks(E::slots_per_epoch() as usize) + .await .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_passes() { +#[tokio::test] +async fn weak_subjectivity_check_passes() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let checkpoint = setup_harness.harness.finalized_checkpoint(); let chain_config = ChainConfig { weak_subjectivity_checkpoint: Some(checkpoint), @@ -1062,26 +1112,25 @@ fn weak_subjectivity_check_passes() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_fails_early_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_early_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch - 1; @@ -1092,25 +1141,23 @@ fn weak_subjectivity_check_fails_early_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_late_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_late_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch + 1; @@ -1121,25 +1168,23 @@ fn weak_subjectivity_check_fails_late_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 4) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_incorrect_root() { +#[tokio::test] +async fn weak_subjectivity_check_fails_incorrect_root() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.root = Hash256::zero(); @@ -1150,27 +1195,31 @@ fn weak_subjectivity_check_fails_incorrect_root() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // the checkpoint at epoch 4 should become the root of last block of epoch 2 @@ -1187,31 +1236,37 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // Invalid checkpoint (epoch too early) @@ -1228,9 +1283,11 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 6) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index acdb42897a..22d457ca3e 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -16,6 +16,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); /// Defines an operation which may invalidate the `execution_status` of some nodes. +#[derive(Clone)] pub enum InvalidationOperation { /// Invalidate only `block_root` and it's descendants. Don't invalidate any ancestors. InvalidateOne { block_root: Hash256 }, diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index a0ce237481..c7ed4b308d 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dev-dependencies] env_logger = "0.9.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } [dependencies] bls = { path = "../../crypto/bls" } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index b75a79c72e..2daefdacad 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -27,7 +27,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( epoch_offset: u64, num_validators: usize, ) -> BeaconChainHarness> { @@ -41,27 +41,31 @@ fn get_harness( .build(); let state = harness.get_current_state(); if last_slot_of_epoch > Slot::new(0) { - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..last_slot_of_epoch.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..num_validators).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..last_slot_of_epoch.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..num_validators).collect::>().as_slice(), + ) + .await; } harness } -#[test] -fn valid_block_ok() { +#[tokio::test] +async fn valid_block_ok() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let result = per_block_processing( &mut state, @@ -75,15 +79,15 @@ fn valid_block_ok() { assert!(result.is_ok()); } -#[test] -fn invalid_block_header_state_slot() { +#[tokio::test] +async fn invalid_block_header_state_slot() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot); + let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot).await; let (mut block, signature) = signed_block.deconstruct(); *block.slot_mut() = slot + Slot::new(1); @@ -104,15 +108,17 @@ fn invalid_block_header_state_slot() { ); } -#[test] -fn invalid_parent_block_root() { +#[tokio::test] +async fn invalid_parent_block_root() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (mut block, signature) = signed_block.deconstruct(); *block.parent_root_mut() = Hash256::from([0xAA; 32]); @@ -136,14 +142,16 @@ fn invalid_parent_block_root() { ); } -#[test] -fn invalid_block_signature() { +#[tokio::test] +async fn invalid_block_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (block, _) = signed_block.deconstruct(); let result = per_block_processing( @@ -164,17 +172,19 @@ fn invalid_block_signature() { ); } -#[test] -fn invalid_randao_reveal_signature() { +#[tokio::test] +async fn invalid_randao_reveal_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_with_modifier(state, slot + 1, |block| { - *block.body_mut().randao_reveal_mut() = Signature::empty(); - }); + let (signed_block, mut state) = harness + .make_block_with_modifier(state, slot + 1, |block| { + *block.body_mut().randao_reveal_mut() = Signature::empty(); + }) + .await; let result = per_block_processing( &mut state, @@ -189,16 +199,22 @@ fn invalid_randao_reveal_signature() { assert_eq!(result, Err(BlockProcessingError::RandaoSignatureInvalid)); } -#[test] -fn valid_4_deposits() { +#[tokio::test] +async fn valid_4_deposits() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -207,16 +223,22 @@ fn valid_4_deposits() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_deposit_count_too_big() { +#[tokio::test] +async fn invalid_deposit_deposit_count_too_big() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let big_deposit_count = NUM_DEPOSITS + 1; @@ -233,16 +255,22 @@ fn invalid_deposit_deposit_count_too_big() { ); } -#[test] -fn invalid_deposit_count_too_small() { +#[tokio::test] +async fn invalid_deposit_count_too_small() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let small_deposit_count = NUM_DEPOSITS - 1; @@ -259,16 +287,22 @@ fn invalid_deposit_count_too_small() { ); } -#[test] -fn invalid_deposit_bad_merkle_proof() { +#[tokio::test] +async fn invalid_deposit_bad_merkle_proof() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let bad_index = state.eth1_deposit_index() as usize; @@ -287,17 +321,23 @@ fn invalid_deposit_bad_merkle_proof() { ); } -#[test] -fn invalid_deposit_wrong_sig() { +#[tokio::test] +async fn invalid_deposit_wrong_sig() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -305,17 +345,23 @@ fn invalid_deposit_wrong_sig() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_invalid_pub_key() { +#[tokio::test] +async fn invalid_deposit_invalid_pub_key() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -324,13 +370,19 @@ fn invalid_deposit_invalid_pub_key() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attestation_no_committee_for_index() { +#[tokio::test] +async fn invalid_attestation_no_committee_for_index() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .index += 1; @@ -352,13 +404,19 @@ fn invalid_attestation_no_committee_for_index() { ); } -#[test] -fn invalid_attestation_wrong_justified_checkpoint() { +#[tokio::test] +async fn invalid_attestation_wrong_justified_checkpoint() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let old_justified_checkpoint = head_block.body().attestations()[0].data.source; let mut new_justified_checkpoint = old_justified_checkpoint; new_justified_checkpoint.epoch += Epoch::new(1); @@ -389,13 +447,19 @@ fn invalid_attestation_wrong_justified_checkpoint() { ); } -#[test] -fn invalid_attestation_bad_aggregation_bitfield_len() { +#[tokio::test] +async fn invalid_attestation_bad_aggregation_bitfield_len() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = Bitfield::with_capacity(spec.target_committee_size).unwrap(); @@ -416,13 +480,19 @@ fn invalid_attestation_bad_aggregation_bitfield_len() { ); } -#[test] -fn invalid_attestation_bad_signature() { +#[tokio::test] +async fn invalid_attestation_bad_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, 97); // minimal number of required validators for this test + let harness = get_harness::(EPOCH_OFFSET, 97).await; // minimal number of required validators for this test let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); let result = process_operations::process_attestations( @@ -444,13 +514,19 @@ fn invalid_attestation_bad_signature() { ); } -#[test] -fn invalid_attestation_included_too_early() { +#[tokio::test] +async fn invalid_attestation_included_too_early() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot + Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -479,14 +555,20 @@ fn invalid_attestation_included_too_early() { ); } -#[test] -fn invalid_attestation_included_too_late() { +#[tokio::test] +async fn invalid_attestation_included_too_late() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot - Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -512,14 +594,20 @@ fn invalid_attestation_included_too_late() { ); } -#[test] -fn invalid_attestation_target_epoch_slot_mismatch() { +#[tokio::test] +async fn invalid_attestation_target_epoch_slot_mismatch() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .target @@ -544,10 +632,10 @@ fn invalid_attestation_target_epoch_slot_mismatch() { ); } -#[test] -fn valid_insert_attester_slashing() { +#[tokio::test] +async fn valid_insert_attester_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let attester_slashing = harness.make_attester_slashing(vec![1, 2]); @@ -563,10 +651,10 @@ fn valid_insert_attester_slashing() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attester_slashing_not_slashable() { +#[tokio::test] +async fn invalid_attester_slashing_not_slashable() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); @@ -589,10 +677,10 @@ fn invalid_attester_slashing_not_slashable() { ); } -#[test] -fn invalid_attester_slashing_1_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_1_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); @@ -618,10 +706,10 @@ fn invalid_attester_slashing_1_invalid() { ); } -#[test] -fn invalid_attester_slashing_2_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_2_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); @@ -647,10 +735,10 @@ fn invalid_attester_slashing_2_invalid() { ); } -#[test] -fn valid_insert_proposer_slashing() { +#[tokio::test] +async fn valid_insert_proposer_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); let result = process_operations::process_proposer_slashings( @@ -663,10 +751,10 @@ fn valid_insert_proposer_slashing() { assert!(result.is_ok()); } -#[test] -fn invalid_proposer_slashing_proposals_identical() { +#[tokio::test] +async fn invalid_proposer_slashing_proposals_identical() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); @@ -689,10 +777,10 @@ fn invalid_proposer_slashing_proposals_identical() { ); } -#[test] -fn invalid_proposer_slashing_proposer_unknown() { +#[tokio::test] +async fn invalid_proposer_slashing_proposer_unknown() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.proposer_index = 3_141_592; @@ -716,10 +804,10 @@ fn invalid_proposer_slashing_proposer_unknown() { ); } -#[test] -fn invalid_proposer_slashing_duplicate_slashing() { +#[tokio::test] +async fn invalid_proposer_slashing_duplicate_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); @@ -747,10 +835,10 @@ fn invalid_proposer_slashing_duplicate_slashing() { ); } -#[test] -fn invalid_bad_proposal_1_signature() { +#[tokio::test] +async fn invalid_bad_proposal_1_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -771,10 +859,10 @@ fn invalid_bad_proposal_1_signature() { ); } -#[test] -fn invalid_bad_proposal_2_signature() { +#[tokio::test] +async fn invalid_bad_proposal_2_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_2.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -795,10 +883,10 @@ fn invalid_bad_proposal_2_signature() { ); } -#[test] -fn invalid_proposer_slashing_proposal_epoch_mismatch() { +#[tokio::test] +async fn invalid_proposer_slashing_proposal_epoch_mismatch() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.slot = Slot::new(0); proposer_slashing.signed_header_2.message.slot = Slot::new(128); diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 4379547bfe..14bbfbc071 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -6,8 +6,8 @@ use bls::Hash256; use env_logger::{Builder, Env}; use types::Slot; -#[test] -fn runs_without_error() { +#[tokio::test] +async fn runs_without_error() { Builder::from_env(Env::default().default_filter_or("error")).init(); let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -22,15 +22,17 @@ fn runs_without_error() { (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch()); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..target_slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..8).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..target_slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..8).collect::>().as_slice(), + ) + .await; let mut new_head_state = harness.get_current_state(); process_epoch(&mut new_head_state, &spec).unwrap(); @@ -45,8 +47,8 @@ mod release_tests { use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use types::{Epoch, ForkName, InconsistentFork, MainnetEthSpec}; - #[test] - fn altair_state_on_base_fork() { + #[tokio::test] + async fn altair_state_on_base_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork happens at epoch 1. @@ -61,12 +63,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get an Altair block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get an Altair block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; @@ -103,8 +107,8 @@ mod release_tests { ); } - #[test] - fn base_state_on_altair_fork() { + #[tokio::test] + async fn base_state_on_altair_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork never happens. @@ -119,12 +123,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get a block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get a block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs index cb9fc9390a..e5b505bb91 100644 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ b/consensus/tree_hash/examples/flamegraph_beacon_state.rs @@ -17,7 +17,7 @@ fn get_harness() -> BeaconChainHarness> { } fn build_state() -> BeaconState { - let state = get_harness::().chain.head_beacon_state().unwrap(); + let state = get_harness::().chain.head_beacon_state_cloned(); assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 96018230f0..c3e454fdfc 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -53,6 +53,7 @@ criterion = "0.3.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } state_processing = { path = "../state_processing" } +tokio = "1.14.0" [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 6eb12ddf05..2d7e68a5c4 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -38,7 +38,7 @@ use tree_hash_derive::TreeHash; derive(Debug, PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent") ), - map_ref_into(BeaconBlockBodyRef), + map_ref_into(BeaconBlockBodyRef, BeaconBlock), map_ref_mut_into(BeaconBlockBodyRefMut) )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] @@ -541,6 +541,50 @@ impl_from!(BeaconBlockBase, >, >, |body: impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +// We can clone blocks with payloads to blocks without payloads, without cloning the payload. +macro_rules! impl_clone_as_blinded { + ($ty_name:ident, <$($from_params:ty),*>, <$($to_params:ty),*>) => { + impl $ty_name<$($from_params),*> + { + pub fn clone_as_blinded(&self) -> $ty_name<$($to_params),*> { + let $ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + } = self; + + $ty_name { + slot: *slot, + proposer_index: *proposer_index, + parent_root: *parent_root, + state_root: *state_root, + body: body.clone_as_blinded(), + } + } + } + } +} + +impl_clone_as_blinded!(BeaconBlockBase, >, >); +impl_clone_as_blinded!(BeaconBlockAltair, >, >); +impl_clone_as_blinded!(BeaconBlockMerge, >, >); + +// A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the +// execution payload. +impl<'a, E: EthSpec> From>> + for BeaconBlock> +{ + fn from( + full_block: BeaconBlockRef<'a, E, FullPayload>, + ) -> BeaconBlock> { + map_beacon_block_ref_into_beacon_block!(&'a _, full_block, |inner, cons| { + cons(inner.clone_as_blinded()) + }) + } +} + impl From>> for ( BeaconBlock>, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 34761ea9a7..381a9bd43e 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -251,6 +251,53 @@ impl From>> } } +// We can clone a full block into a blinded block, without cloning the payload. +impl BeaconBlockBodyBase> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { + let (block_body, _payload) = self.clone().into(); + block_body + } +} + +impl BeaconBlockBodyAltair> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyAltair> { + let (block_body, _payload) = self.clone().into(); + block_body + } +} + +impl BeaconBlockBodyMerge> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyMerge> { + let BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + } = self; + + BeaconBlockBodyMerge { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayload { + execution_payload_header: From::from(execution_payload), + }, + } + } +} + impl From>> for ( BeaconBlockBody>, diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index 48998e26d0..db431138aa 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -34,32 +34,34 @@ fn default_values() { assert!(cache.get_beacon_committees_at_slot(Slot::new(0)).is_err()); } -fn new_state(validator_count: usize, slot: Slot) -> BeaconState { +async fn new_state(validator_count: usize, slot: Slot) -> BeaconState { let harness = get_harness(validator_count); let head_state = harness.get_current_state(); if slot > Slot::new(0) { - harness.add_attested_blocks_at_slots( - head_state, - Hash256::zero(), - (1..slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + head_state, + Hash256::zero(), + (1..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness.get_current_state() } -#[test] +#[tokio::test] #[should_panic] -fn fails_without_validators() { - new_state::(0, Slot::new(0)); +async fn fails_without_validators() { + new_state::(0, Slot::new(0)).await; } -#[test] -fn initializes_with_the_right_epoch() { - let state = new_state::(16, Slot::new(0)); +#[tokio::test] +async fn initializes_with_the_right_epoch() { + let state = new_state::(16, Slot::new(0)).await; let spec = &MinimalEthSpec::default_spec(); let cache = CommitteeCache::default(); @@ -75,13 +77,13 @@ fn initializes_with_the_right_epoch() { assert!(cache.is_initialized_at(state.next_epoch().unwrap())); } -#[test] -fn shuffles_for_the_right_epoch() { +#[tokio::test] +async fn shuffles_for_the_right_epoch() { let num_validators = MinimalEthSpec::minimum_validator_count() * 2; let epoch = Epoch::new(6); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); - let mut state = new_state::(num_validators, slot); + let mut state = new_state::(num_validators, slot).await; let spec = &MinimalEthSpec::default_spec(); let distinct_hashes: Vec = (0..MinimalEthSpec::epochs_per_historical_vector()) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index b88b49e1a3..d65d0a9e6c 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -25,7 +25,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( validator_count: usize, slot: Slot, ) -> BeaconChainHarness> { @@ -41,24 +41,26 @@ fn get_harness( .map(Slot::new) .collect::>(); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - slots.as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + slots.as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness } -fn build_state(validator_count: usize) -> BeaconState { +async fn build_state(validator_count: usize) -> BeaconState { get_harness(validator_count, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } -fn test_beacon_proposer_index() { +async fn test_beacon_proposer_index() { let spec = T::default_spec(); // Get the i'th candidate proposer for the given state and slot @@ -85,20 +87,20 @@ fn test_beacon_proposer_index() { // Test where we have one validator per slot. // 0th candidate should be chosen every time. - let state = build_state(T::slots_per_epoch() as usize); + let state = build_state(T::slots_per_epoch() as usize).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test where we have two validators per slot. // 0th candidate should be chosen every time. - let state = build_state((T::slots_per_epoch() as usize).mul(2)); + let state = build_state((T::slots_per_epoch() as usize).mul(2)).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test with two validators per slot, first validator has zero balance. - let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)); + let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)).await; let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); state.validators_mut()[slot0_candidate0].effective_balance = 0; test(&state, Slot::new(0), 1); @@ -107,9 +109,9 @@ fn test_beacon_proposer_index() { } } -#[test] -fn beacon_proposer_index() { - test_beacon_proposer_index::(); +#[tokio::test] +async fn beacon_proposer_index() { + test_beacon_proposer_index::().await; } /// Test that @@ -144,11 +146,11 @@ fn test_cache_initialization( ); } -#[test] -fn cache_initialization() { +#[tokio::test] +async fn cache_initialization() { let spec = MinimalEthSpec::default_spec(); - let mut state = build_state::(16); + let mut state = build_state::(16).await; *state.slot_mut() = (MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch()); @@ -211,11 +213,11 @@ fn test_clone_config(base_state: &BeaconState, clone_config: Clon } } -#[test] -fn clone_config() { +#[tokio::test] +async fn clone_config() { let spec = MinimalEthSpec::default_spec(); - let mut state = build_state::(16); + let mut state = build_state::(16).await; state.build_all_caches(&spec).unwrap(); state @@ -314,7 +316,7 @@ mod committees { assert!(expected_indices_iter.next().is_none()); } - fn committee_consistency_test( + async fn committee_consistency_test( validator_count: usize, state_epoch: Epoch, cache_epoch: RelativeEpoch, @@ -322,7 +324,7 @@ mod committees { let spec = &T::default_spec(); let slot = state_epoch.start_slot(T::slots_per_epoch()); - let harness = get_harness::(validator_count, slot); + let harness = get_harness::(validator_count, slot).await; let mut new_head_state = harness.get_current_state(); let distinct_hashes: Vec = (0..T::epochs_per_historical_vector()) @@ -350,7 +352,7 @@ mod committees { ); } - fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { + async fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { let spec = T::default_spec(); let validator_count = spec @@ -359,13 +361,15 @@ mod committees { .mul(spec.target_committee_size) .add(1); - committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch); + committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch) + .await; committee_consistency_test::( validator_count as usize, T::genesis_epoch() + 4, cached_epoch, - ); + ) + .await; committee_consistency_test::( validator_count as usize, @@ -374,38 +378,39 @@ mod committees { .mul(T::slots_per_epoch()) .mul(4), cached_epoch, - ); + ) + .await; } - #[test] - fn current_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Current); + #[tokio::test] + async fn current_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Current).await; } - #[test] - fn previous_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Previous); + #[tokio::test] + async fn previous_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Previous).await; } - #[test] - fn next_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Next); + #[tokio::test] + async fn next_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Next).await; } } mod get_outstanding_deposit_len { use super::*; - fn state() -> BeaconState { + async fn state() -> BeaconState { get_harness(16, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } - #[test] - fn returns_ok() { - let mut state = state(); + #[tokio::test] + async fn returns_ok() { + let mut state = state().await; assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); state.eth1_data_mut().deposit_count = 17; @@ -413,9 +418,9 @@ mod get_outstanding_deposit_len { assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); } - #[test] - fn returns_err_if_the_state_is_invalid() { - let mut state = state(); + #[tokio::test] + async fn returns_err_if_the_state_is_invalid() { + let mut state = state().await; // The state is invalid, deposit count is lower than deposit index. state.eth1_data_mut().deposit_count = 16; *state.eth1_deposit_index_mut() = 17; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index d736f0be19..a21eeb63c2 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -28,6 +28,8 @@ pub trait ExecPayload: + Hash + TryFrom> + From> + + Send + + 'static { fn block_type() -> BlockType; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5488070688..5c40c4685c 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -346,6 +346,14 @@ impl From> for SignedBlindedBeaconBlock { } } +// We can blind borrowed blocks with payloads by converting the payload into a header (without +// cloning the payload contents). +impl SignedBeaconBlock { + pub fn clone_as_blinded(&self) -> SignedBlindedBeaconBlock { + SignedBeaconBlock::from_block(self.message().into(), self.signature().clone()) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 6717bb0f46..50295df4b0 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -222,7 +222,7 @@ pub fn migrate_db( runtime_context: &RuntimeContext, log: Logger, ) -> Result<(), Error> { - let spec = runtime_context.eth2_config.spec.clone(); + let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); @@ -236,7 +236,7 @@ pub fn migrate_db( Ok(()) }, client_config.store.clone(), - spec, + spec.clone(), log.clone(), )?; @@ -253,6 +253,7 @@ pub fn migrate_db( from, to, log, + spec, ) } diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 88feff0bbc..091a95dc4c 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -216,14 +216,7 @@ impl SlasherService { }; // Add to local op pool. - if let Err(e) = beacon_chain.import_attester_slashing(verified_slashing) { - error!( - log, - "Beacon chain refused attester slashing"; - "error" => ?e, - "slashing" => ?slashing, - ); - } + beacon_chain.import_attester_slashing(verified_slashing); // Publish to the network if broadcast is enabled. if slasher.config().broadcast { diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index ac9ca8993c..64f4aa7538 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -81,11 +81,23 @@ pub struct Cases { } impl Cases { - pub fn test_results(&self, fork_name: ForkName) -> Vec { - self.test_cases - .into_par_iter() - .enumerate() - .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i, fork_name))) - .collect() + pub fn test_results(&self, fork_name: ForkName, use_rayon: bool) -> Vec { + if use_rayon { + self.test_cases + .into_par_iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } else { + self.test_cases + .iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 92c28aeb04..4f9f4dacad 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,15 +7,17 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, HeadInfo, + BeaconChainTypes, CachedHead, }; use serde_derive::Deserialize; use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; +use std::future::Future; +use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + Attestation, BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, + Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -287,19 +289,20 @@ impl Tester { Ok(self.spec.genesis_slot + slots_since_genesis) } - fn find_head(&self) -> Result { + fn block_on_dangerous(&self, future: F) -> Result { self.harness .chain - .fork_choice() - .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; - self.harness - .chain - .head_info() - .map_err(|e| Error::InternalError(format!("failed to read head with {:?}", e))) + .task_executor + .clone() + .block_on_dangerous(future, "ef_tests_block_on") + .ok_or_else(|| Error::InternalError("runtime shutdown".into())) } - fn genesis_epoch(&self) -> Epoch { - self.spec.genesis_slot.epoch(E::slots_per_epoch()) + fn find_head(&self) -> Result, Error> { + let chain = self.harness.chain.clone(); + self.block_on_dangerous(chain.recompute_head_at_current_slot())? + .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; + Ok(self.harness.chain.canonical_head.cached_head()) } pub fn set_tick(&self, tick: u64) { @@ -314,15 +317,16 @@ impl Tester { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .update_time(slot) .unwrap(); } pub fn process_block(&self, block: SignedBeaconBlock, valid: bool) -> Result<(), Error> { - let result = self.harness.chain.process_block(block.clone()); let block_root = block.canonical_root(); + let block = Arc::new(block); + let result = self.block_on_dangerous(self.harness.chain.process_block(block.clone()))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -367,16 +371,20 @@ impl Tester { .seconds_from_current_slot_start(self.spec.seconds_per_slot) .unwrap(); - let (block, _) = block.deconstruct(); - let result = self.harness.chain.fork_choice.write().on_block( - self.harness.chain.slot().unwrap(), - &block, - block_root, - block_delay, - &state, - PayloadVerificationStatus::Irrelevant, - &self.harness.chain.spec, - ); + let result = self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_block( + self.harness.chain.slot().unwrap(), + block.message(), + block_root, + block_delay, + &state, + PayloadVerificationStatus::Irrelevant, + &self.harness.chain.spec, + ); if result.is_ok() { return Err(Error::DidntFail(format!( @@ -424,10 +432,11 @@ impl Tester { } pub fn check_head(&self, expected_head: Head) -> Result<(), Error> { - let chain_head = self.find_head().map(|head| Head { - slot: head.slot, - root: head.block_root, - })?; + let head = self.find_head()?; + let chain_head = Head { + slot: head.head_slot(), + root: head.head_block_root(), + }; check_equal("head", chain_head, expected_head) } @@ -446,15 +455,15 @@ impl Tester { } pub fn check_justified_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); - assert_checkpoints_eq( - "justified_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("justified_checkpoint", head_checkpoint, fc_checkpoint); check_equal("justified_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -463,15 +472,15 @@ impl Tester { &self, expected_checkpoint_root: Hash256, ) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); - assert_checkpoints_eq( - "justified_checkpoint_root", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("justified_checkpoint_root", head_checkpoint, fc_checkpoint); check_equal( "justified_checkpoint_root", @@ -481,15 +490,15 @@ impl Tester { } pub fn check_finalized_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.finalized_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().finalized_checkpoint(); + let head_checkpoint = self.find_head()?.finalized_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint(); - assert_checkpoints_eq( - "finalized_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("finalized_checkpoint", head_checkpoint, fc_checkpoint); check_equal("finalized_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -501,8 +510,8 @@ impl Tester { let best_justified_checkpoint = self .harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .best_justified_checkpoint(); check_equal( "best_justified_checkpoint", @@ -515,7 +524,12 @@ impl Tester { &self, expected_proposer_boost_root: Hash256, ) -> Result<(), Error> { - let proposer_boost_root = self.harness.chain.fork_choice.read().proposer_boost_root(); + let proposer_boost_root = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proposer_boost_root(); check_equal( "proposer_boost_root", proposer_boost_root, @@ -530,20 +544,8 @@ impl Tester { /// This function is necessary due to a quirk documented in this issue: /// /// https://github.com/ethereum/consensus-specs/issues/2566 -fn assert_checkpoints_eq(name: &str, genesis_epoch: Epoch, head: Checkpoint, fc: Checkpoint) { - if fc.epoch == genesis_epoch { - assert_eq!( - head, - Checkpoint { - epoch: genesis_epoch, - root: Hash256::zero() - }, - "{} (genesis)", - name - ) - } else { - assert_eq!(head, fc, "{} (non-genesis)", name) - } +fn assert_checkpoints_eq(name: &str, head: Checkpoint, fc: Checkpoint) { + assert_eq!(head, fc, "{}", name) } /// Convenience function to create `Error` messages. diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index be6c495aae..25299bf577 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -30,6 +30,10 @@ pub trait Handler { } } + fn use_rayon() -> bool { + true + } + fn run_for_fork(&self, fork_name: ForkName) { let fork_name_str = fork_name.to_string(); @@ -59,7 +63,7 @@ pub trait Handler { }) .collect(); - let results = Cases { test_cases }.test_results(fork_name); + let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); let name = format!( "{}/{}/{}", @@ -460,6 +464,11 @@ impl Handler for ForkChoiceHandler { self.handler_name.clone() } + fn use_rayon() -> bool { + // The fork choice tests use `block_on` which can cause panics with rayon. + false + } + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Merge block tests are only enabled for Bellatrix or later. if self.handler_name == "on_merge_block" diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index a5bab4ed78..5b23af4fa1 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -98,10 +98,9 @@ impl TestRig { } pub fn perform_tests_blocking(&self) { - self.ee_a - .execution_layer - .block_on_generic(|_| async { self.perform_tests().await }) - .unwrap() + self.runtime + .handle() + .block_on(async { self.perform_tests().await }); } pub async fn wait_until_synced(&self) { diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 4e93db3b32..6da9f2f4a6 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -12,3 +12,4 @@ types = { path = "../../consensus/types" } eth2_ssz = "0.4.1" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 75f82b3132..3e4bb7bf3f 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -37,11 +37,12 @@ impl Default for ExitTest { } impl ExitTest { - fn block_and_pre_state(self) -> (SignedBeaconBlock, BeaconState) { + async fn block_and_pre_state(self) -> (SignedBeaconBlock, BeaconState) { let harness = get_harness::( self.state_epoch.start_slot(E::slots_per_epoch()), VALIDATOR_COUNT, - ); + ) + .await; let mut state = harness.get_current_state(); (self.state_modifier)(&mut state); @@ -49,11 +50,12 @@ impl ExitTest { let validator_index = self.validator_index; let exit_epoch = self.exit_epoch; - let (signed_block, state) = - harness.make_block_with_modifier(state.clone(), state.slot() + 1, |block| { + let (signed_block, state) = harness + .make_block_with_modifier(state.clone(), state.slot() + 1, |block| { harness.add_voluntary_exit(block, validator_index, exit_epoch); block_modifier(&harness, block); - }); + }) + .await; (signed_block, state) } @@ -72,12 +74,12 @@ impl ExitTest { } #[cfg(all(test, not(debug_assertions)))] - fn run(self) -> BeaconState { + async fn run(self) -> BeaconState { let spec = &E::default_spec(); let expected = self.expected.clone(); assert_eq!(STATE_EPOCH, spec.shard_committee_period); - let (block, mut state) = self.block_and_pre_state(); + let (block, mut state) = self.block_and_pre_state().await; let result = Self::process(&block, &mut state); @@ -86,8 +88,8 @@ impl ExitTest { state } - fn test_vector(self, title: String) -> TestVector { - let (block, pre_state) = self.block_and_pre_state(); + async fn test_vector(self, title: String) -> TestVector { + let (block, pre_state) = self.block_and_pre_state().await; let mut post_state = pre_state.clone(); let (post_state, error) = match Self::process(&block, &mut post_state) { Ok(_) => (Some(post_state), None), @@ -334,14 +336,14 @@ mod custom_tests { ); } - #[test] - fn valid() { - let state = ExitTest::default().run(); + #[tokio::test] + async fn valid() { + let state = ExitTest::default().run().await; assert_exited(&state, VALIDATOR_INDEX as usize); } - #[test] - fn valid_three() { + #[tokio::test] + async fn valid_three() { let state = ExitTest { block_modifier: Box::new(|harness, block| { harness.add_voluntary_exit(block, 1, STATE_EPOCH); @@ -349,7 +351,8 @@ mod custom_tests { }), ..ExitTest::default() } - .run(); + .run() + .await; for i in &[VALIDATOR_INDEX, 1, 2] { assert_exited(&state, *i as usize); diff --git a/testing/state_transition_vectors/src/macros.rs b/testing/state_transition_vectors/src/macros.rs index 81f8171852..5dafbf549a 100644 --- a/testing/state_transition_vectors/src/macros.rs +++ b/testing/state_transition_vectors/src/macros.rs @@ -4,11 +4,11 @@ /// - `mod tests`: runs all the test vectors locally. macro_rules! vectors_and_tests { ($($name: ident, $test: expr),*) => { - pub fn vectors() -> Vec { + pub async fn vectors() -> Vec { let mut vec = vec![]; $( - vec.push($test.test_vector(stringify!($name).into())); + vec.push($test.test_vector(stringify!($name).into()).await); )* vec @@ -18,9 +18,9 @@ macro_rules! vectors_and_tests { mod tests { use super::*; $( - #[test] - fn $name() { - $test.run(); + #[tokio::test] + async fn $name() { + $test.run().await; } )* } diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index d66842e5a1..3e7c37af54 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -25,8 +25,9 @@ pub const BASE_VECTOR_DIR: &str = "vectors"; pub const SLOT_OFFSET: u64 = 1; /// Writes all known test vectors to `CARGO_MANIFEST_DIR/vectors`. -fn main() { - match write_all_vectors() { +#[tokio::main] +async fn main() { + match write_all_vectors().await { Ok(()) => exit(0), Err(e) => { eprintln!("Error: {}", e); @@ -49,7 +50,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( slot: Slot, validator_count: usize, ) -> BeaconChainHarness> { @@ -61,23 +62,25 @@ fn get_harness( let skip_to_slot = slot - SLOT_OFFSET; if skip_to_slot > Slot::new(0) { let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (skip_to_slot.as_u64()..slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (skip_to_slot.as_u64()..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness } /// Writes all vectors to file. -fn write_all_vectors() -> Result<(), String> { - write_vectors_to_file("exit", &exit::vectors()) +async fn write_all_vectors() -> Result<(), String> { + write_vectors_to_file("exit", &exit::vectors().await) } /// Writes a list of `vectors` to the `title` dir. From 66bb5c716ca0f419cd29a81fff9830c869a0224a Mon Sep 17 00:00:00 2001 From: Divma Date: Sun, 3 Jul 2022 05:36:51 +0000 Subject: [PATCH 051/184] Use latest tags for nethermind and geth in the execution engine integration test (#3303) ## Issue Addressed Currently the execution-engine-integration test uses latest master for nethermind and geth, and right now the test fails using the latest unreleased commits. ## Proposed Changes Fix the nethermind and geth revisions the test uses to the latest tag in each repo. This way we are not continuously testing unreleased code, which might even get reverted, and reduce the failures only to releases in each one. Also improve error handling of the commands used to manage the git repos. ## Additional Info na Co-authored-by: Michael Sproul --- .../src/build_utils.rs | 118 +++++++++++++----- .../execution_engine_integration/src/geth.rs | 19 ++- .../src/nethermind.rs | 21 ++-- 3 files changed, 102 insertions(+), 56 deletions(-) diff --git a/testing/execution_engine_integration/src/build_utils.rs b/testing/execution_engine_integration/src/build_utils.rs index 4d4a7bf1ce..966a3bfb43 100644 --- a/testing/execution_engine_integration/src/build_utils.rs +++ b/testing/execution_engine_integration/src/build_utils.rs @@ -15,51 +15,101 @@ pub fn prepare_dir() -> PathBuf { execution_clients_dir } -pub fn clone_repo(repo_dir: &Path, repo_url: &str) -> bool { - Command::new("git") - .arg("clone") - .arg(repo_url) - .arg("--recursive") - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| panic!("failed to clone repo at {}", repo_url)) - .status - .success() +pub fn clone_repo(repo_dir: &Path, repo_url: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("clone") + .arg(repo_url) + .arg("--recursive") + .current_dir(repo_dir) + .output() + .map_err(|_| format!("failed to clone repo at {repo_url}"))?, + |_| {}, + ) } -pub fn checkout_branch(repo_dir: &Path, branch_name: &str) -> bool { - Command::new("git") - .arg("checkout") - .arg(branch_name) - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| { - panic!( - "failed to checkout branch at {:?}/{}", - repo_dir, branch_name, - ) - }) - .status - .success() +pub fn checkout(repo_dir: &Path, revision_or_branch: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("checkout") + .arg(revision_or_branch) + .current_dir(repo_dir) + .output() + .map_err(|_| { + format!( + "failed to checkout branch or revision at {repo_dir:?}/{revision_or_branch}", + ) + })?, + |_| {}, + ) } -pub fn update_branch(repo_dir: &Path, branch_name: &str) -> bool { - Command::new("git") - .arg("pull") - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| panic!("failed to update branch at {:?}/{}", repo_dir, branch_name)) - .status - .success() +/// Gets the last annotated tag of the given repo. +pub fn get_latest_release(repo_dir: &Path, branch_name: &str) -> Result { + // If the directory was already present it is possible we don't have the most recent tags. + // Fetch them + output_to_result( + Command::new("git") + .arg("fetch") + .arg("--tags") + .current_dir(repo_dir) + .output() + .map_err(|e| format!("Failed to fetch tags for {repo_dir:?}: Err: {e}"))?, + |_| {}, + )?; + output_to_result( + Command::new("git") + .arg("describe") + .arg(format!("origin/{branch_name}")) + .arg("--abbrev=0") + .arg("--tags") + .current_dir(repo_dir) + .output() + .map_err(|e| format!("Failed to get latest tag for {repo_dir:?}: Err: {e}"))?, + |stdout| { + let tag = String::from_utf8_lossy(&stdout); + tag.trim().to_string() + }, + ) } -pub fn check_command_output(output: Output, failure_msg: &'static str) { +#[allow(dead_code)] +pub fn update_branch(repo_dir: &Path, branch_name: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("pull") + .current_dir(repo_dir) + .output() + .map_err(|_| format!("failed to update branch at {:?}/{}", repo_dir, branch_name))?, + |_| {}, + ) +} + +/// Checks the status of the [`std::process::Output`] and applies `f` to `stdout` if the process +/// succeedded. If not, builds a readable error containing stdout and stderr. +fn output_to_result(output: Output, f: OnSuccessFn) -> Result +where + OnSuccessFn: Fn(Vec) -> T, +{ + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + Err(format!("stderr: {stderr}\nstdout: {stdout}")) + } else { + Ok(f(output.stdout)) + } +} + +pub fn check_command_output(output: Output, failure_msg: F) +where + F: Fn() -> String, +{ if !output.status.success() { if !SUPPRESS_LOGS { dbg!(String::from_utf8_lossy(&output.stdout)); dbg!(String::from_utf8_lossy(&output.stderr)); } - panic!("{}", failure_msg); + panic!("{}", failure_msg()); } } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 7a6a3803e6..129faea907 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -23,20 +23,17 @@ pub fn build(execution_clients_dir: &Path) { if !repo_dir.exists() { // Clone the repo - assert!(build_utils::clone_repo( - execution_clients_dir, - GETH_REPO_URL - )); + build_utils::clone_repo(execution_clients_dir, GETH_REPO_URL).unwrap(); } - // Checkout the correct branch - assert!(build_utils::checkout_branch(&repo_dir, GETH_BRANCH)); - - // Update the branch - assert!(build_utils::update_branch(&repo_dir, GETH_BRANCH)); + // Get the latest tag on the branch + let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth - build_utils::check_command_output(build_result(&repo_dir), "make failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("geth make failed using release {last_release}") + }); } /* @@ -75,7 +72,7 @@ impl GenericExecutionEngine for GethEngine { .output() .expect("failed to init geth"); - build_utils::check_command_output(output, "geth init failed"); + build_utils::check_command_output(output, || "geth init failed".into()); datadir } diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index be638fe042..df345f36be 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -25,24 +25,23 @@ pub fn build(execution_clients_dir: &Path) { if !repo_dir.exists() { // Clone the repo - assert!(build_utils::clone_repo( - execution_clients_dir, - NETHERMIND_REPO_URL - )); + build_utils::clone_repo(execution_clients_dir, NETHERMIND_REPO_URL).unwrap() } - // Checkout the correct branch - assert!(build_utils::checkout_branch(&repo_dir, NETHERMIND_BRANCH)); - - // Update the branch - assert!(build_utils::update_branch(&repo_dir, NETHERMIND_BRANCH)); + // Get the latest tag + let last_release = build_utils::get_latest_release(&repo_dir, NETHERMIND_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build nethermind - build_utils::check_command_output(build_result(&repo_dir), "dotnet build failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("nethermind build failed using release {last_release}") + }); // Build nethermind a second time to enable Merge-related features. // Not sure why this is necessary. - build_utils::check_command_output(build_result(&repo_dir), "dotnet build failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("nethermind build failed using release {last_release}") + }); } /* From 61ed5f0ec626ea4fda52b98a58c94970eda96e89 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 4 Jul 2022 02:56:11 +0000 Subject: [PATCH 052/184] Optimize historic committee calculation for the HTTP API (#3272) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/3270 ## Proposed Changes Optimize the calculation of historic beacon committees in the HTTP API. This is achieved by allowing committee caches to be constructed for historic epochs, and constructing these committee caches on the fly in the API. This is much faster than reconstructing the state at the requested epoch, which usually takes upwards of 20s, and sometimes minutes with SPRP=8192. The depth of the `randao_mixes` array allows us to look back 64K epochs/0.8 years from a single state, which is pretty awesome! We always use the `state_id` provided by the caller, but will return a nice 400 error if the epoch requested is out of range for the state requested, e.g. ```bash # Prater curl "http://localhost:5052/eth/v1/beacon/states/3170304/committees?epoch=33538" ``` ```json {"code":400,"message":"BAD_REQUEST: epoch out of bounds, try state at slot 1081344","stacktraces":[]} ``` Queries will be fastest when aligned to `slot % SPRP == 0`, so the hint suggests a slot that is 0 mod 8192. --- beacon_node/http_api/src/lib.rs | 47 +++++++++++------ consensus/types/src/beacon_state.rs | 7 +++ .../types/src/beacon_state/committee_cache.rs | 14 +++++- .../src/beacon_state/committee_cache/tests.rs | 50 +++++++++++++++---- 4 files changed, 89 insertions(+), 29 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ff4d46efcb..606dfb64dc 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -657,26 +657,41 @@ pub fn serve( .and(warp::path::end()) .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { - // the api spec says if the epoch is not present then the epoch of the state should be used - let query_state_id = query.epoch.map_or(state_id, |epoch| { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) - }); - blocking_json_task(move || { - query_state_id.map_state(&chain, |state| { - let epoch = state.slot().epoch(T::EthSpec::slots_per_epoch()); + state_id.map_state(&chain, |state| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); - let committee_cache = if state - .committee_cache_is_initialized(RelativeEpoch::Current) + let committee_cache = match RelativeEpoch::from_epoch(current_epoch, epoch) { - state - .committee_cache(RelativeEpoch::Current) - .map(Cow::Borrowed) - } else { - CommitteeCache::initialized(state, epoch, &chain.spec).map(Cow::Owned) + Ok(relative_epoch) + if state.committee_cache_is_initialized(relative_epoch) => + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } + _ => CommitteeCache::initialized(state, epoch, &chain.spec) + .map(Cow::Owned), } - .map_err(BeaconChainError::BeaconStateError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(|e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = T::EthSpec::slots_per_historical_root() as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot(T::EthSpec::slots_per_epoch()) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request(format!( + "epoch out of bounds, try state at slot {}", + first_subsequent_restore_point_slot, + )) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, too far in future".into(), + ) + } + } + _ => warp_utils::reject::beacon_chain_error(e.into()), + })?; // Use either the supplied slot or all slots in the epoch. let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 66656d3589..fca200312f 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -963,6 +963,13 @@ impl BeaconState { } } + /// Return the minimum epoch for which `get_randao_mix` will return a non-error value. + pub fn min_randao_epoch(&self) -> Epoch { + self.current_epoch() + .saturating_add(1u64) + .saturating_sub(T::EpochsPerHistoricalVector::to_u64()) + } + /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. /// /// # Errors: diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 8a87cddac8..7a526acc58 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -38,8 +38,18 @@ impl CommitteeCache { epoch: Epoch, spec: &ChainSpec, ) -> Result { - RelativeEpoch::from_epoch(state.current_epoch(), epoch) - .map_err(|_| Error::EpochOutOfBounds)?; + // Check that the cache is being built for an in-range epoch. + // + // We allow caches to be constructed for historic epochs, per: + // + // https://github.com/sigp/lighthouse/issues/3270 + let reqd_randao_epoch = epoch + .saturating_sub(spec.min_seed_lookahead) + .saturating_sub(1u64); + + if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { + return Err(Error::EpochOutOfBounds); + } // May cause divide-by-zero errors. if T::slots_per_epoch() == 0 { diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index db431138aa..11cc6095da 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -42,7 +42,7 @@ async fn new_state(validator_count: usize, slot: Slot) -> BeaconStat .add_attested_blocks_at_slots( head_state, Hash256::zero(), - (1..slot.as_u64()) + (1..=slot.as_u64()) .map(Slot::new) .collect::>() .as_slice(), @@ -86,6 +86,8 @@ async fn shuffles_for_the_right_epoch() { let mut state = new_state::(num_validators, slot).await; let spec = &MinimalEthSpec::default_spec(); + assert_eq!(state.current_epoch(), epoch); + let distinct_hashes: Vec = (0..MinimalEthSpec::epochs_per_historical_vector()) .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); @@ -124,15 +126,41 @@ async fn shuffles_for_the_right_epoch() { } }; - let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(current_seed)); - assert_shuffling_positions_accurate(&cache); + // We can initialize the committee cache at recent epochs in the past, and one epoch into the + // future. + for e in (0..=epoch.as_u64() + 1).map(Epoch::new) { + let seed = state.get_seed(e, Domain::BeaconAttester, spec).unwrap(); + let cache = CommitteeCache::initialized(&state, e, spec) + .unwrap_or_else(|_| panic!("failed at epoch {}", e)); + assert_eq!(cache.shuffling(), shuffling_with_seed(seed)); + assert_shuffling_positions_accurate(&cache); + } - let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(previous_seed)); - assert_shuffling_positions_accurate(&cache); - - let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(next_seed)); - assert_shuffling_positions_accurate(&cache); + // We should *not* be able to build a committee cache for the epoch after the next epoch. + assert_eq!( + CommitteeCache::initialized(&state, epoch + 2, spec), + Err(BeaconStateError::EpochOutOfBounds) + ); +} + +#[tokio::test] +async fn min_randao_epoch_correct() { + let num_validators = MinimalEthSpec::minimum_validator_count() * 2; + let current_epoch = Epoch::new(MinimalEthSpec::epochs_per_historical_vector() as u64 * 2); + + let mut state = new_state::( + num_validators, + Epoch::new(1).start_slot(MinimalEthSpec::slots_per_epoch()), + ) + .await; + + // Override the epoch so that there's some room to move. + *state.slot_mut() = current_epoch.start_slot(MinimalEthSpec::slots_per_epoch()); + assert_eq!(state.current_epoch(), current_epoch); + + // The min_randao_epoch should be the minimum epoch such that `get_randao_mix` returns `Ok`. + let min_randao_epoch = state.min_randao_epoch(); + state.get_randao_mix(min_randao_epoch).unwrap(); + state.get_randao_mix(min_randao_epoch - 1).unwrap_err(); + state.get_randao_mix(min_randao_epoch + 1).unwrap(); } From 1219da9a45b531c064a449e2d05fd926111c623b Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 4 Jul 2022 02:56:13 +0000 Subject: [PATCH 053/184] Simplify error handling after engines fallback removal (#3283) ## Issue Addressed Part of #3118, continuation of #3257 ## Proposed Changes - the [ `first_success_without_retry` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L348-L351) function returns a single error. - the [`first_success`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L324) function returns a single error. - [ `EngineErrors` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/lib.rs#L69) carries a single error. - [`EngineError`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L173-L177) now does not need to carry an Id - [`process_multiple_payload_statuses`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/payload_status.rs#L46-L50) now doesn't need to receive an iterator of statuses and weight in different errors ## Additional Info This is built on top of #3294 --- beacon_node/execution_layer/src/engines.rs | 72 ++--- beacon_node/execution_layer/src/lib.rs | 62 ++-- .../execution_layer/src/payload_status.rs | 268 +++++++----------- 3 files changed, 154 insertions(+), 248 deletions(-) diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 88c94162f8..34eef8a3fb 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -57,7 +57,6 @@ struct PayloadIdCacheKey { /// An execution engine. pub struct Engine { - pub id: String, pub api: HttpJsonRpc, payload_id_cache: Mutex>, state: RwLock, @@ -65,9 +64,8 @@ pub struct Engine { impl Engine { /// Creates a new, offline engine. - pub fn new(id: String, api: HttpJsonRpc) -> Self { + pub fn new(api: HttpJsonRpc) -> Self { Self { - id, api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), state: RwLock::new(EngineState::Offline), @@ -135,10 +133,10 @@ pub struct Engines { #[derive(Debug)] pub enum EngineError { - Offline { id: String }, - Api { id: String, error: EngineApiError }, + Offline, + Api { error: EngineApiError }, BuilderApi { error: EngineApiError }, - Auth { id: String }, + Auth, } impl Engines { @@ -159,7 +157,6 @@ impl Engines { self.log, "No need to call forkchoiceUpdated"; "msg" => "head does not have execution enabled", - "id" => &self.engine.id, ); return; } @@ -168,7 +165,6 @@ impl Engines { self.log, "Issuing forkchoiceUpdated"; "forkchoice_state" => ?forkchoice_state, - "id" => &self.engine.id, ); // For simplicity, payload attributes are never included in this call. It may be @@ -183,14 +179,12 @@ impl Engines { self.log, "Failed to issue latest head to engine"; "error" => ?e, - "id" => &self.engine.id, ); } } else { debug!( self.log, "No head, not sending to engine"; - "id" => &self.engine.id, ); } } @@ -261,45 +255,36 @@ impl Engines { } } - /// Run `func` on all engines, in the order in which they are defined, returning the first - /// successful result that is found. + /// Run `func` on the node. /// - /// This function might try to run `func` twice. If all nodes return an error on the first time - /// it runs, it will try to upcheck all offline nodes and then run the function again. - pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result> + /// This function might try to run `func` twice. If the node returns an error it will try to + /// upcheck it and then run the function again. + pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result where F: Fn(&'a Engine) -> G + Copy, G: Future>, { match self.first_success_without_retry(func).await { Ok(result) => Ok(result), - Err(mut first_errors) => { - // Try to recover some nodes. + Err(e) => { + debug!(self.log, "First engine call failed. Retrying"; "err" => ?e); + // Try to recover the node. self.upcheck_not_synced(Logging::Enabled).await; - // Retry the call on all nodes. - match self.first_success_without_retry(func).await { - Ok(result) => Ok(result), - Err(second_errors) => { - first_errors.extend(second_errors); - Err(first_errors) - } - } + // Try again. + self.first_success_without_retry(func).await } } } - /// Run `func` on all engines, in the order in which they are defined, returning the first - /// successful result that is found. + /// Run `func` on the node. pub async fn first_success_without_retry<'a, F, G, H>( &'a self, func: F, - ) -> Result> + ) -> Result where F: Fn(&'a Engine) -> G, G: Future>, { - let mut errors = vec![]; - let (engine_synced, engine_auth_failed) = { let state = self.engine.state.read().await; ( @@ -309,32 +294,22 @@ impl Engines { }; if engine_synced { match func(&self.engine).await { - Ok(result) => return Ok(result), + Ok(result) => Ok(result), Err(error) => { debug!( self.log, "Execution engine call failed"; "error" => ?error, - "id" => &&self.engine.id ); *self.engine.state.write().await = EngineState::Offline; - errors.push(EngineError::Api { - id: self.engine.id.clone(), - error, - }) + Err(EngineError::Api { error }) } } } else if engine_auth_failed { - errors.push(EngineError::Auth { - id: self.engine.id.clone(), - }) + Err(EngineError::Auth) } else { - errors.push(EngineError::Offline { - id: self.engine.id.clone(), - }) + Err(EngineError::Offline) } - - Err(errors) } /// Runs `func` on the node. @@ -363,9 +338,7 @@ impl Engines { { let func = &func; if *self.engine.state.read().await == EngineState::Offline { - Err(EngineError::Offline { - id: self.engine.id.clone(), - }) + Err(EngineError::Offline) } else { match func(&self.engine).await { Ok(res) => Ok(res), @@ -376,10 +349,7 @@ impl Engines { "error" => ?error, ); *self.engine.state.write().await = EngineState::Offline; - Err(EngineError::Api { - id: self.engine.id.clone(), - error, - }) + Err(EngineError::Api { error }) } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 61f1c569d4..8897f8f67a 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -12,7 +12,7 @@ pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; use engines::{Engine, EngineError, Engines, Logging}; use lru::LruCache; -use payload_status::process_multiple_payload_statuses; +use payload_status::process_payload_status; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; @@ -68,11 +68,10 @@ pub enum Error { NoPayloadBuilder, ApiError(ApiError), Builder(builder_client::Error), - EngineErrors(Vec), + EngineError(Box), NotSynced, ShuttingDown, FeeRecipientUnspecified, - ConsensusFailure, MissingLatestValidHash, InvalidJWTSecret(String), } @@ -200,12 +199,11 @@ impl ExecutionLayer { }?; let engine: Engine = { - let id = execution_url.to_string(); let auth = Auth::new(jwt_key, jwt_id, jwt_version); - debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?secret_file.as_path()); + debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); let api = HttpJsonRpc::::new_with_auth(execution_url, auth) .map_err(Error::ApiError)?; - Engine::::new(id, api) + Engine::::new(api) }; let builder = builder_url @@ -709,7 +707,8 @@ impl ExecutionLayer { }) }) .await - .map_err(Error::EngineErrors) + .map_err(Box::new) + .map_err(Error::EngineError) } /// Maps to the `engine_newPayload` JSON-RPC call. @@ -742,16 +741,14 @@ impl ExecutionLayer { "block_number" => execution_payload.block_number, ); - let broadcast_results = self + let broadcast_result = self .engines() .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; - process_multiple_payload_statuses( - execution_payload.block_hash, - Some(broadcast_results).into_iter(), - self.log(), - ) + process_payload_status(execution_payload.block_hash, broadcast_result, self.log()) + .map_err(Box::new) + .map_err(Error::EngineError) } /// Register that the given `validator_index` is going to produce a block at `slot`. @@ -879,7 +876,7 @@ impl ExecutionLayer { .set_latest_forkchoice_state(forkchoice_state) .await; - let broadcast_results = self + let broadcast_result = self .engines() .broadcast(|engine| async move { engine @@ -888,13 +885,13 @@ impl ExecutionLayer { }) .await; - process_multiple_payload_statuses( + process_payload_status( head_block_hash, - Some(broadcast_results) - .into_iter() - .map(|result| result.map(|response| response.payload_status)), + broadcast_result.map(|response| response.payload_status), self.log(), ) + .map_err(Box::new) + .map_err(Error::EngineError) } pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> { @@ -909,9 +906,6 @@ impl ExecutionLayer { .broadcast(|engine| engine.api.exchange_transition_configuration_v1(local)) .await; - let mut errors = vec![]; - // Having no fallbacks, the id of the used node is 0 - let i = 0usize; match broadcast_result { Ok(remote) => { if local.terminal_total_difficulty != remote.terminal_total_difficulty @@ -922,20 +916,18 @@ impl ExecutionLayer { "Execution client config mismatch"; "msg" => "ensure lighthouse and the execution client are up-to-date and \ configured consistently", - "execution_endpoint" => i, "remote" => ?remote, "local" => ?local, ); - errors.push(EngineError::Api { - id: i.to_string(), + Err(Error::EngineError(Box::new(EngineError::Api { error: ApiError::TransitionConfigurationMismatch, - }); + }))) } else { debug!( self.log(), "Execution client config is OK"; - "execution_endpoint" => i ); + Ok(()) } } Err(e) => { @@ -943,17 +935,10 @@ impl ExecutionLayer { self.log(), "Unable to get transition config"; "error" => ?e, - "execution_endpoint" => i, ); - errors.push(e); + Err(Error::EngineError(Box::new(e))) } } - - if errors.is_empty() { - Ok(()) - } else { - Err(Error::EngineErrors(errors)) - } } /// Used during block production to determine if the merge has been triggered. @@ -992,7 +977,8 @@ impl ExecutionLayer { .await }) .await - .map_err(Error::EngineErrors)?; + .map_err(Box::new) + .map_err(Error::EngineError)?; if let Some(hash) = &hash_opt { info!( @@ -1102,7 +1088,8 @@ impl ExecutionLayer { Ok(None) }) .await - .map_err(|e| Error::EngineErrors(vec![e])) + .map_err(Box::new) + .map_err(Error::EngineError) } /// This function should remain internal. @@ -1160,7 +1147,8 @@ impl ExecutionLayer { .await }) .await - .map_err(Error::EngineErrors) + .map_err(Box::new) + .map_err(Error::EngineError) } async fn get_payload_by_block_hash_from_engine( diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index e0b1a01b43..46917a0aa5 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -1,7 +1,6 @@ use crate::engine_api::{Error as ApiError, PayloadStatusV1, PayloadStatusV1Status}; use crate::engines::EngineError; -use crate::Error; -use slog::{crit, warn, Logger}; +use slog::{warn, Logger}; use types::ExecutionBlockHash; /// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. @@ -24,168 +23,117 @@ pub enum PayloadStatus { }, } -/// Processes the responses from multiple execution engines, finding the "best" status and returning -/// it (if any). -/// -/// This function has the following basic goals: -/// -/// - Detect a consensus failure between nodes. -/// - Find the most-synced node by preferring a definite response (valid/invalid) over a -/// syncing/accepted response or error. -/// -/// # Details -/// -/// - If there are conflicting valid/invalid responses, always return an error. -/// - If there are syncing/accepted responses but valid/invalid responses exist, return the -/// valid/invalid responses since they're definite. -/// - If there are multiple valid responses, return the first one processed. -/// - If there are multiple invalid responses, return the first one processed. -/// - Syncing/accepted responses are grouped, if there are multiple of them, return the first one -/// processed. -/// - If there are no responses (only errors or nothing), return an error. -pub fn process_multiple_payload_statuses( +/// Processes the response from the execution engine. +pub fn process_payload_status( head_block_hash: ExecutionBlockHash, - statuses: impl Iterator>, + status: Result, log: &Logger, -) -> Result { - let mut errors = vec![]; - let mut valid_statuses = vec![]; - let mut invalid_statuses = vec![]; - let mut other_statuses = vec![]; - - for status in statuses { - match status { - Err(e) => errors.push(e), - Ok(response) => match &response.status { - PayloadStatusV1Status::Valid => { - if response - .latest_valid_hash - .map_or(false, |h| h == head_block_hash) - { - // The response is only valid if `latest_valid_hash` is not `null` and - // equal to the provided `block_hash`. - valid_statuses.push(PayloadStatus::Valid) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: ApiError::BadResponse( - format!( - "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", - head_block_hash, - response.latest_valid_hash, - ) - ), - }); - } - } - PayloadStatusV1Status::Invalid => { - if let Some(latest_valid_hash) = response.latest_valid_hash { - // The response is only valid if `latest_valid_hash` is not `null`. - invalid_statuses.push(PayloadStatus::Invalid { - latest_valid_hash, - validation_error: response.validation_error.clone(), - }) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: ApiError::BadResponse( - "new_payload: response.status = INVALID but null latest_valid_hash" - .to_string(), - ), - }); - } - } - PayloadStatusV1Status::InvalidBlockHash => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidBlockHash { - validation_error: response.validation_error.clone(), - }); - } - PayloadStatusV1Status::InvalidTerminalBlock => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidTerminalBlock { - validation_error: response.validation_error.clone(), - }); - } - PayloadStatusV1Status::Syncing => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - other_statuses.push(PayloadStatus::Syncing) - } - PayloadStatusV1Status::Accepted => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - other_statuses.push(PayloadStatus::Accepted) - } - }, - } - } - - if !valid_statuses.is_empty() && !invalid_statuses.is_empty() { - crit!( - log, - "Consensus failure between execution nodes"; - "invalid_statuses" => ?invalid_statuses, - "valid_statuses" => ?valid_statuses, - ); - - // Choose to exit and ignore the valid response. This preferences correctness over - // liveness. - return Err(Error::ConsensusFailure); - } - - // Log any errors to assist with troubleshooting. - for error in &errors { - warn!( +) -> Result { + match status { + Err(error) => { + warn!( log, "Error whilst processing payload status"; "error" => ?error, - ); - } + ); + Err(error) + } + Ok(response) => match &response.status { + PayloadStatusV1Status::Valid => { + if response + .latest_valid_hash + .map_or(false, |h| h == head_block_hash) + { + // The response is only valid if `latest_valid_hash` is not `null` and + // equal to the provided `block_hash`. + Ok(PayloadStatus::Valid) + } else { + let error = format!( + "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", + head_block_hash, + response.latest_valid_hash + ); + Err(EngineError::Api { + error: ApiError::BadResponse(error), + }) + } + } + PayloadStatusV1Status::Invalid => { + if let Some(latest_valid_hash) = response.latest_valid_hash { + // The response is only valid if `latest_valid_hash` is not `null`. + Ok(PayloadStatus::Invalid { + latest_valid_hash, + validation_error: response.validation_error.clone(), + }) + } else { + Err(EngineError::Api { + error: ApiError::BadResponse( + "new_payload: response.status = INVALID but null latest_valid_hash" + .to_string(), + ), + }) + } + } + PayloadStatusV1Status::InvalidBlockHash => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } - valid_statuses - .first() - .or_else(|| invalid_statuses.first()) - .or_else(|| other_statuses.first()) - .cloned() - .map(Result::Ok) - .unwrap_or_else(|| Err(Error::EngineErrors(errors))) + Ok(PayloadStatus::InvalidBlockHash { + validation_error: response.validation_error.clone(), + }) + } + PayloadStatusV1Status::InvalidTerminalBlock => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + Ok(PayloadStatus::InvalidTerminalBlock { + validation_error: response.validation_error.clone(), + }) + } + PayloadStatusV1Status::Syncing => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + Ok(PayloadStatus::Syncing) + } + PayloadStatusV1Status::Accepted => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + Ok(PayloadStatus::Accepted) + } + }, + } } From 1cc8a97d4eb6807ff7d83f4c181f3c0333c3f0cb Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 4 Jul 2022 02:56:14 +0000 Subject: [PATCH 054/184] Remove unused method in HandlerNetworkContext (#3299) ## Issue Addressed N/A ## Proposed Changes Removed unused method in `HandlerNetworkContext`. --- beacon_node/network/src/router/processor.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 9d86c3e55a..ce11cbdcef 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -409,22 +409,6 @@ impl HandlerNetworkContext { response, }) } - - /// Sends an error response to the network task. - pub fn _send_error_response( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - error: RPCResponseErrorCode, - reason: String, - ) { - self.inform_network(NetworkMessage::SendErrorResponse { - peer_id, - error, - id, - reason, - }) - } } fn timestamp_now() -> Duration { From 748475be1ded2c9a29cbd8b985f6ae2980720a36 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 4 Jul 2022 02:56:15 +0000 Subject: [PATCH 055/184] Ensure caches are built for block_rewards POST API (#3305) ## Issue Addressed Follow up to https://github.com/sigp/lighthouse/pull/3290 that fixes a caching bug ## Proposed Changes Build the committee cache for the new `POST /lighthouse/analysis/block_rewards` API. Due to an unusual quirk of the total active balance cache the API endpoint would sometimes fail after loading a state from disk which had a current epoch cache _but not_ a total active balance cache. This PR adds calls to build the caches immediately before they're required, and has been running smoothly with `blockdreamer` the last few days. --- beacon_node/http_api/src/block_rewards.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 0555037210..682828aee4 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -56,6 +56,8 @@ pub fn get_block_rewards( let block_replayer = BlockReplayer::new(state, &chain.spec) .pre_block_hook(Box::new(|state, block| { + state.build_all_committee_caches(&chain.spec)?; + // Compute block reward. let block_reward = chain.compute_block_reward( block.message(), @@ -154,8 +156,13 @@ pub fn compute_block_rewards( ); } + let mut state = block_replayer.into_state(); + state + .build_all_committee_caches(&chain.spec) + .map_err(beacon_state_error)?; + state_cache - .get_or_insert((parent_root, block.slot()), || block_replayer.into_state()) + .get_or_insert((parent_root, block.slot()), || state) .ok_or_else(|| { custom_server_error("LRU cache insert should always succeed".into()) })? From aed764c4d8313855cdb27fa851c1b5586f82eabc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 5 Jul 2022 23:36:36 +0000 Subject: [PATCH 056/184] Document min CMake version (#3310) ## Proposed Changes Add a tip about the minimum CMake version to make it more obvious what it takes to compile on Ubuntu 18.04. --- book/src/installation-source.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 4b977f5222..fc1ac4c092 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -19,6 +19,10 @@ Install the following packages: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` +> Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories +> of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA: +> [https://apt.kitware.com/](https://apt.kitware.com) + #### macOS 1. Install the [Homebrew][] package manager. From 3dc323b035d3c29f8f680be9df29d34d051dc71d Mon Sep 17 00:00:00 2001 From: Divma Date: Tue, 5 Jul 2022 23:36:42 +0000 Subject: [PATCH 057/184] Fix RUSTSEC-2022-0032 (#3311) ## Issue Addressed Failure of cargo audit for [RUSTSEC-2022-0032](https://rustsec.org/advisories/RUSTSEC-2022-0032) ## Proposed Changes Cargo update does the trick again ## Additional Info na --- Cargo.lock | 192 ++++++++++++++++++++++++++--------------------------- 1 file changed, 96 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb7308b938..a31a6b382c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,15 +141,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" +checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" [[package]] name = "arbitrary" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e0a02cf12f1b1f48b14cb7f8217b876d09992b39c816ffb3b1ba64dd979a87" +checksum = "5a7924531f38b1970ff630f03eb20a2fde69db5c590c93b0f3482e95dcc5fd60" dependencies = [ "derive_arbitrary", ] @@ -268,9 +268,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "base64ct" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" +checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "beacon_chain" @@ -472,9 +472,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c521c26a784d5c4bcd98d483a7d3518376e9ff1efbcfa9e2d456ab8183752303" +checksum = "6a30d0edd9dd1c60ddb42b80341c7852f6f985279a5c1a83659dcb65899dec99" dependencies = [ "cc", "glob", @@ -762,7 +762,7 @@ dependencies = [ "slot_clock", "store", "task_executor", - "time 0.3.9", + "time 0.3.11", "timer", "tokio", "types", @@ -936,9 +936,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" +checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" dependencies = [ "cfg-if", "once_cell", @@ -964,9 +964,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "5999502d32b9c48d492abe66392408144895020ec4709e549e840799f3bb74c0" dependencies = [ "generic-array", "typenum", @@ -1190,9 +1190,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8728db27dd9033a7456655aaeb35fde74425d0f130b4cb18a19171ef38a1b454" +checksum = "c9a577516173adb681466d517d39bd468293bc2c2a16439375ef0f35bba45f3d" dependencies = [ "proc-macro2", "quote", @@ -1400,9 +1400,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" [[package]] name = "elliptic-curve" @@ -2017,9 +2017,9 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" @@ -2743,9 +2743,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.1", @@ -3042,7 +3042,7 @@ dependencies = [ "libp2p-yamux", "multiaddr 0.14.0", "parking_lot 0.12.1", - "pin-project 1.0.10", + "pin-project 1.0.11", "rand 0.7.3", "smallvec", ] @@ -3068,7 +3068,7 @@ dependencies = [ "multihash 0.14.0", "multistream-select 0.10.4", "parking_lot 0.11.2", - "pin-project 1.0.10", + "pin-project 1.0.11", "prost 0.9.0", "prost-build 0.9.0", "rand 0.8.5", @@ -3103,7 +3103,7 @@ dependencies = [ "multihash 0.16.2", "multistream-select 0.11.0", "parking_lot 0.12.1", - "pin-project 1.0.10", + "pin-project 1.0.11", "prost 0.10.4", "prost-build 0.10.4", "rand 0.8.5", @@ -3182,9 +3182,9 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4357140141ba9739eee71b20aa735351c0fc642635b2bffc7f57a6b5c1090" +checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384" dependencies = [ "libp2p-core 0.33.0", "libp2p-gossipsub", @@ -3263,7 +3263,7 @@ dependencies = [ "instant", "libp2p-core 0.33.0", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "rand 0.7.3", "smallvec", "thiserror", @@ -3506,9 +3506,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" @@ -3703,9 +3703,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -3856,7 +3856,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "smallvec", "unsigned-varint 0.7.1", ] @@ -3870,7 +3870,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "smallvec", "unsigned-varint 0.7.1", ] @@ -4086,9 +4086,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" [[package]] name = "oorandom" @@ -4136,9 +4136,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.20.0+1.1.1o" +version = "111.22.0+1.1.1q" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92892c4f87d56e376e469ace79f1128fdaded07646ddf73aa0be4706ff712dec" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" dependencies = [ "cc", ] @@ -4353,27 +4353,27 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" +checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" dependencies = [ - "pin-project-internal 0.4.29", + "pin-project-internal 0.4.30", ] [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ - "pin-project-internal 1.0.10", + "pin-project-internal 1.0.11", ] [[package]] name = "pin-project-internal" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" +checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" dependencies = [ "proc-macro2", "quote", @@ -4382,9 +4382,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" dependencies = [ "proc-macro2", "quote", @@ -4567,9 +4567,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" dependencies = [ "unicode-ident", ] @@ -4824,21 +4824,21 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" dependencies = [ "proc-macro2", ] [[package]] name = "r2d2" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "scheduled-thread-pool", ] @@ -4990,9 +4990,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.6" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -5010,9 +5010,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.26" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -5191,7 +5191,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.10", + "semver 1.0.12", ] [[package]] @@ -5221,9 +5221,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf" [[package]] name = "rw-stream-sink" @@ -5232,7 +5232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ "futures", - "pin-project 0.4.29", + "pin-project 0.4.30", "static_assertions", ] @@ -5243,7 +5243,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", - "pin-project 1.0.10", + "pin-project 1.0.11", "static_assertions", ] @@ -5418,9 +5418,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c" +checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" [[package]] name = "semver-parser" @@ -5447,9 +5447,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" dependencies = [ "serde_derive", ] @@ -5476,9 +5476,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" dependencies = [ "proc-macro2", "quote", @@ -5487,9 +5487,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ "itoa 1.0.2", "ryu", @@ -5666,7 +5666,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.9", + "time 0.3.11", ] [[package]] @@ -5786,7 +5786,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.9", + "time 0.3.11", ] [[package]] @@ -5831,7 +5831,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.9", + "time 0.3.11", ] [[package]] @@ -5870,9 +5870,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "snap" @@ -6045,9 +6045,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.24.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" +checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" dependencies = [ "heck 0.4.0", "proc-macro2", @@ -6087,9 +6087,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.96" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ "proc-macro2", "quote", @@ -6256,9 +6256,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" dependencies = [ "itoa 1.0.2", "libc", @@ -6418,7 +6418,7 @@ checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "tokio", "tungstenite", ] @@ -6483,9 +6483,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", @@ -6494,9 +6494,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ "once_cell", "valuable", @@ -6515,13 +6515,13 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "3a713421342a5a666b7577783721d3117f1b69a393df803ee17bb73b1e122a59" dependencies = [ "ansi_term", - "lazy_static", "matchers", + "once_cell", "regex", "sharded-slab", "smallvec", @@ -6709,9 +6709,9 @@ dependencies = [ [[package]] name = "ucd-trie" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "89570599c4fe5585de2b388aab47e99f7fa4e9238a1399f707a02e356058141c" [[package]] name = "uint" @@ -6755,9 +6755,9 @@ checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" dependencies = [ "tinyvec", ] @@ -6985,7 +6985,7 @@ dependencies = [ "mime_guess", "multipart", "percent-encoding", - "pin-project 1.0.10", + "pin-project 1.0.11", "scoped-tls", "serde", "serde_json", @@ -7171,7 +7171,7 @@ dependencies = [ "log", "once_cell", "parking_lot 0.12.1", - "pin-project 1.0.10", + "pin-project 1.0.11", "reqwest", "rlp", "secp256k1", From d5e2d98970f250bdeadc91daa0bc584d713c3f69 Mon Sep 17 00:00:00 2001 From: ethDreamer Date: Wed, 6 Jul 2022 03:51:08 +0000 Subject: [PATCH 058/184] Implement feerecipient API for keymanager (#3213) ## Issue Addressed * #3173 ## Proposed Changes Moved all `fee_recipient_file` related logic inside the `ValidatorStore` as it makes more sense to have this all together there. I tested this with the validators I have on `mainnet-shadow-fork-5` and everything appeared to work well. Only technicality is that I can't get the method to return `401` when the authorization header is not specified (it returns `400` instead). Fixing this is probably quite difficult given that none of `warp`'s rejections have code `401`.. I don't really think this matters too much though as long as it fails. --- book/src/suggested-fee-recipient.md | 132 ++++++++++--- common/eth2/src/lighthouse_vc/http_client.rs | 63 +++++- common/eth2/src/lighthouse_vc/std_types.rs | 8 +- common/eth2/src/lighthouse_vc/types.rs | 5 + common/warp_utils/src/reject.rs | 9 +- lighthouse/tests/validator_client.rs | 60 ------ testing/web3signer_tests/src/lib.rs | 1 + validator_client/src/cli.rs | 8 - validator_client/src/config.rs | 17 -- validator_client/src/fee_recipient_file.rs | 184 ------------------ validator_client/src/http_api/mod.rs | 132 ++++++++++++- validator_client/src/http_api/tests.rs | 4 +- .../src/http_api/tests/keystores.rs | 182 +++++++++++++++++ .../src/initialized_validators.rs | 72 +++++++ validator_client/src/lib.rs | 4 +- validator_client/src/preparation_service.rs | 57 +----- validator_client/src/validator_store.rs | 19 +- 17 files changed, 583 insertions(+), 374 deletions(-) delete mode 100644 validator_client/src/fee_recipient_file.rs diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index 3ff71ec7d6..5c77081c39 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -26,14 +26,9 @@ Lighthouse BN also provides a method for defining this value, should the VC not Assuming trustworthy nodes, the priority for the four methods is: 1. `validator_definitions.yml` -1. `--suggested-fee-recipient-file` 1. `--suggested-fee-recipient` provided to the VC. 1. `--suggested-fee-recipient` provided to the BN. -Users may configure the fee recipient via `validator_definitions.yml` or via the -`--suggested-fee-recipient-file` flag. The value in `validator_definitions.yml` will always take -precedence. - ### 1. Setting the fee recipient in the `validator_definitions.yml` Users can set the fee recipient in `validator_definitions.yml` with the `suggested_fee_recipient` @@ -56,36 +51,111 @@ Below is an example of the validator_definitions.yml with `suggested_fee_recipie suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" ``` -### 2. Using the "--suggested-fee-recipient-file" flag on the validator client - -Users can specify a file with the `--suggested-fee-recipient-file` flag. This option is useful for dynamically -changing fee recipients. This file is reloaded each time a validator is chosen to propose a block. - -Usage: -`lighthouse vc --suggested-fee-recipient-file fee_recipient.txt` - -The file should contain key value pairs corresponding to validator public keys and their associated -fee recipient. The file can optionally contain a `default` key for the default case. - -The following example sets the default and the values for the validators with pubkeys `0x87a5` and -`0xa556`: - -``` -default: 0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21 -0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007: 0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21 -0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477: 0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d -``` - -Lighthouse will first search for the fee recipient corresponding to the public key of the proposing -validator, if there are no matches for the public key, then it uses the address corresponding to the -default key (if present). - -### 3. Using the "--suggested-fee-recipient" flag on the validator client +### 2. Using the "--suggested-fee-recipient" flag on the validator client The `--suggested-fee-recipient` can be provided to the VC to act as a default value for all validators where a `suggested_fee_recipient` is not loaded from another method. -### 4. Using the "--suggested-fee-recipient" flag on the beacon node +### 3. Using the "--suggested-fee-recipient" flag on the beacon node The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. + +## Setting the fee recipient dynamically using the keymanager API + +When the [validator client API](api-vc.md) is enabled, the +[standard keymanager API](https://ethereum.github.io/keymanager-APIs/) includes an endpoint +for setting the fee recipient dynamically for a given public key. When used, the fee recipient +will be saved in `validator_definitions.yml` so that it persists across restarts of the validator +client. + +| Property | Specification | +| --- | --- | +Path | `/eth/v1/validator/{pubkey}/feerecipient` +Method | POST +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 202, 404 + +#### Example Request Body +```json +{ + "ethaddress": "0x1D4E51167DBDC4789a014357f4029ff76381b16c" +} +``` + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 +FEE_RECIPIENT=0x1D4E51167DBDC4789a014357f4029ff76381b16c + +curl -X POST \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + -d "{ \"ethaddress\": \"${FEE_RECIPIENT}\" }" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (202) +```json +null +``` + +### Querying the fee recipient + +The same path with a `GET` request can be used to query the fee recipient for a given public key at any time. + +| Property | Specification | +| --- | --- | +Path | `/eth/v1/validator/{pubkey}/feerecipient` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200, 404 + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 + +curl -X GET \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (200) +```json +{ + "data": { + "pubkey": "0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591", + "ethaddress": "0x1d4e51167dbdc4789a014357f4029ff76381b16c" + } +} +``` + +### Removing the fee recipient + +The same path with a `DELETE` request can be used to remove the fee recipient for a given public key at any time. +This is useful if you want the fee recipient to fall back to the validator client (or beacon node) default. + +| Property | Specification | +| --- | --- | +Path | `/eth/v1/validator/{pubkey}/feerecipient` +Method | DELETE +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 204, 404 + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 + +curl -X DELETE \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (204) +```json +null +``` + + diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 5e02ec0bb2..d678ca34b7 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -303,11 +303,11 @@ impl ValidatorClientHttpClient { } /// Perform a HTTP DELETE request. - async fn delete_with_unsigned_response( + async fn delete_with_raw_response( &self, url: U, body: &T, - ) -> Result { + ) -> Result { let response = self .client .delete(url) @@ -316,7 +316,16 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::Reqwest)?; - let response = ok_or_error(response).await?; + ok_or_error(response).await + } + + /// Perform a HTTP DELETE request. + async fn delete_with_unsigned_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self.delete_with_raw_response(url, body).await?; Ok(response.json().await?) } @@ -486,6 +495,18 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_fee_recipient_url(&self, pubkey: &PublicKeyBytes) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("feerecipient"); + Ok(url) + } + /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result { let mut url = self.server.full.clone(); @@ -543,14 +564,44 @@ impl ValidatorClientHttpClient { let url = self.make_remotekeys_url()?; self.delete_with_unsigned_response(url, req).await } + + /// `GET /eth/v1/validator/{pubkey}/feerecipient` + pub async fn get_fee_recipient( + &self, + pubkey: &PublicKeyBytes, + ) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.get(url) + .await + .map(|generic: GenericResponse| generic.data) + } + + /// `POST /eth/v1/validator/{pubkey}/feerecipient` + pub async fn post_fee_recipient( + &self, + pubkey: &PublicKeyBytes, + req: &UpdateFeeRecipientRequest, + ) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.post_with_raw_response(url, req).await + } + + /// `POST /eth/v1/validator/{pubkey}/feerecipient` + pub async fn delete_fee_recipient(&self, pubkey: &PublicKeyBytes) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.delete_with_raw_response(url, &()).await + } } -/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an -/// appropriate error message. +/// Returns `Ok(response)` if the response is a `200 OK` response or a +/// `202 Accepted` response. Otherwise, creates an appropriate error message. async fn ok_or_error(response: Response) -> Result { let status = response.status(); - if status == StatusCode::OK { + if status == StatusCode::OK + || status == StatusCode::ACCEPTED + || status == StatusCode::NO_CONTENT + { Ok(response) } else if let Ok(message) = response.json().await { Err(Error::ServerMessage(message)) diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index d9fe969138..62987c1368 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -2,7 +2,13 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; use slashing_protection::interchange::Interchange; -use types::PublicKeyBytes; +use types::{Address, PublicKeyBytes}; + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct GetFeeRecipientResponse { + pub pubkey: PublicKeyBytes, + pub ethaddress: Address, +} #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct AuthResponse { diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index fe9b6a48c0..3e1c13dcf8 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -97,3 +97,8 @@ pub struct Web3SignerValidatorRequest { #[serde(skip_serializing_if = "Option::is_none")] pub client_identity_password: Option, } + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct UpdateFeeRecipientRequest { + pub ethaddress: Address, +} diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index f5ce1156e5..cf3d11af8d 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -205,8 +205,13 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result() { - code = StatusCode::BAD_REQUEST; - message = format!("BAD_REQUEST: missing {} header", e.name()); + if e.name().eq("Authorization") { + code = StatusCode::UNAUTHORIZED; + message = "UNAUTHORIZED: missing Authorization header".to_string(); + } else { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: missing {} header", e.name()); + } } else if let Some(e) = err.find::() { code = StatusCode::BAD_REQUEST; message = format!("BAD_REQUEST: invalid {} header", e.name()); diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 61c239f86d..4ff5434687 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -249,66 +249,6 @@ fn fee_recipient_flag() { ) }); } -#[test] -fn fee_recipient_file_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut file = - File::create(dir.path().join("fee_recipient.txt")).expect("Unable to create file"); - let new_key = Keypair::random(); - let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = "default:0x00000000219ab540356cbb839cbe05303d7705fa"; - file.write_all(contents.as_bytes()) - .expect("Unable to write to file"); - CommandLineTest::new() - .flag( - "suggested-fee-recipient-file", - dir.path().join("fee_recipient.txt").as_os_str().to_str(), - ) - .run() - .with_config(|config| { - // Public key not present so load default. - assert_eq!( - config - .fee_recipient_file - .clone() - .unwrap() - .load_fee_recipient(&pubkeybytes) - .unwrap(), - Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) - ) - }); -} -#[test] -fn fee_recipient_file_with_pk_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut file = - File::create(dir.path().join("fee_recipient.txt")).expect("Unable to create file"); - let new_key = Keypair::random(); - let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = format!( - "{}:0x00000000219ab540356cbb839cbe05303d7705fa", - pubkeybytes.to_string() - ); - file.write_all(contents.as_bytes()) - .expect("Unable to write to file"); - CommandLineTest::new() - .flag( - "suggested-fee-recipient-file", - dir.path().join("fee_recipient.txt").as_os_str().to_str(), - ) - .run() - .with_config(|config| { - assert_eq!( - config - .fee_recipient_file - .clone() - .unwrap() - .load_fee_recipient(&pubkeybytes) - .unwrap(), - Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) - ) - }); -} // Tests for HTTP flags. #[test] diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index e39e6515fc..5803f360a6 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -310,6 +310,7 @@ mod tests { spec, None, slot_clock, + None, executor, log.clone(), ); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index d02e26ace0..414be2d90f 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -136,14 +136,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("FEE-RECIPIENT") .takes_value(true) ) - .arg( - Arg::with_name("suggested-fee-recipient-file") - .long("suggested-fee-recipient-file") - .help("The fallback address provided to the BN if nothing suitable is found \ - in the validator definitions.") - .value_name("FEE-RECIPIENT-FILE") - .takes_value(true) - ) /* REST API related arguments */ .arg( Arg::with_name("http") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index e56e64f5ad..ddbe7f3630 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,4 +1,3 @@ -use crate::fee_recipient_file::FeeRecipientFile; use crate::graffiti_file::GraffitiFile; use crate::{http_api, http_metrics}; use clap::ArgMatches; @@ -44,8 +43,6 @@ pub struct Config { pub graffiti_file: Option, /// Fallback fallback address. pub fee_recipient: Option
, - /// Fee recipient file to load per validator suggested-fee-recipients. - pub fee_recipient_file: Option, /// Configuration for the HTTP REST API. pub http_api: http_api::Config, /// Configuration for the HTTP REST API. @@ -86,7 +83,6 @@ impl Default for Config { graffiti: None, graffiti_file: None, fee_recipient: None, - fee_recipient_file: None, http_api: <_>::default(), http_metrics: <_>::default(), monitoring_api: None, @@ -206,19 +202,6 @@ impl Config { } } - if let Some(fee_recipient_file_path) = cli_args.value_of("suggested-fee-recipient-file") { - let mut fee_recipient_file = FeeRecipientFile::new(fee_recipient_file_path.into()); - fee_recipient_file - .read_fee_recipient_file() - .map_err(|e| format!("Error reading suggested-fee-recipient file: {:?}", e))?; - config.fee_recipient_file = Some(fee_recipient_file); - info!( - log, - "Successfully loaded suggested-fee-recipient file"; - "path" => fee_recipient_file_path - ); - } - if let Some(input_fee_recipient) = parse_optional::
(cli_args, "suggested-fee-recipient")? { diff --git a/validator_client/src/fee_recipient_file.rs b/validator_client/src/fee_recipient_file.rs deleted file mode 100644 index 637ca6d3d5..0000000000 --- a/validator_client/src/fee_recipient_file.rs +++ /dev/null @@ -1,184 +0,0 @@ -use serde_derive::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::fs::File; -use std::io::{prelude::*, BufReader}; -use std::path::PathBuf; -use std::str::FromStr; - -use bls::PublicKeyBytes; -use types::Address; - -#[derive(Debug)] -#[allow(clippy::enum_variant_names)] -pub enum Error { - InvalidFile(std::io::Error), - InvalidLine(String), - InvalidPublicKey(String), - InvalidFeeRecipient(String), -} - -/// Struct to load validator fee-recipients from file. -/// The fee-recipient file is expected to have the following structure -/// -/// default: 0x00000000219ab540356cbb839cbe05303d7705fa -/// public_key1: fee-recipient1 -/// public_key2: fee-recipient2 -/// ... -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FeeRecipientFile { - fee_recipient_path: PathBuf, - fee_recipients: HashMap, - default: Option
, -} - -impl FeeRecipientFile { - pub fn new(fee_recipient_path: PathBuf) -> Self { - Self { - fee_recipient_path, - fee_recipients: HashMap::new(), - default: None, - } - } - - /// Returns the fee-recipient corresponding to the given public key if present, else returns the - /// default fee-recipient. - /// - /// Returns an error if loading from the fee-recipient file fails. - pub fn get_fee_recipient(&self, public_key: &PublicKeyBytes) -> Result, Error> { - Ok(self - .fee_recipients - .get(public_key) - .copied() - .or(self.default)) - } - - /// Loads the fee-recipient file and populates the default fee-recipient and `fee_recipients` hashmap. - /// Returns the fee-recipient corresponding to the given public key if present, else returns the - /// default fee-recipient. - /// - /// Returns an error if loading from the fee-recipient file fails. - pub fn load_fee_recipient( - &mut self, - public_key: &PublicKeyBytes, - ) -> Result, Error> { - self.read_fee_recipient_file()?; - Ok(self - .fee_recipients - .get(public_key) - .copied() - .or(self.default)) - } - - /// Reads from a fee-recipient file with the specified format and populates the default value - /// and the hashmap. - /// - /// Returns an error if the file does not exist, or if the format is invalid. - pub fn read_fee_recipient_file(&mut self) -> Result<(), Error> { - let file = File::open(self.fee_recipient_path.as_path()).map_err(Error::InvalidFile)?; - let reader = BufReader::new(file); - - let lines = reader.lines(); - - self.default = None; - self.fee_recipients.clear(); - - for line in lines { - let line = line.map_err(|e| Error::InvalidLine(e.to_string()))?; - let (pk_opt, fee_recipient) = read_line(&line)?; - match pk_opt { - Some(pk) => { - self.fee_recipients.insert(pk, fee_recipient); - } - None => self.default = Some(fee_recipient), - } - } - Ok(()) - } -} - -/// Parses a line from the fee-recipient file. -/// -/// `Ok((None, fee_recipient))` represents the fee-recipient for the default key. -/// `Ok((Some(pk), fee_recipient))` represents fee-recipient for the public key `pk`. -/// Returns an error if the line is in the wrong format or does not contain a valid public key or fee-recipient. -fn read_line(line: &str) -> Result<(Option, Address), Error> { - if let Some(i) = line.find(':') { - let (key, value) = line.split_at(i); - // Note: `value.len() >=1` so `value[1..]` is safe - let fee_recipient = Address::from_str(value[1..].trim()) - .map_err(|e| Error::InvalidFeeRecipient(e.to_string()))?; - if key == "default" { - Ok((None, fee_recipient)) - } else { - let pk = PublicKeyBytes::from_str(key).map_err(Error::InvalidPublicKey)?; - Ok((Some(pk), fee_recipient)) - } - } else { - Err(Error::InvalidLine(format!("Missing delimiter: {}", line))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bls::Keypair; - use std::io::LineWriter; - use tempfile::TempDir; - - const DEFAULT_FEE_RECIPIENT: &str = "0x00000000219ab540356cbb839cbe05303d7705fa"; - const CUSTOM_FEE_RECIPIENT1: &str = "0x4242424242424242424242424242424242424242"; - const CUSTOM_FEE_RECIPIENT2: &str = "0x0000000000000000000000000000000000000001"; - const PK1: &str = "0x800012708dc03f611751aad7a43a082142832b5c1aceed07ff9b543cf836381861352aa923c70eeb02018b638aa306aa"; - const PK2: &str = "0x80001866ce324de7d80ec73be15e2d064dcf121adf1b34a0d679f2b9ecbab40ce021e03bb877e1a2fe72eaaf475e6e21"; - - // Create a fee-recipient file in the required format and return a path to the file. - fn create_fee_recipient_file() -> PathBuf { - let temp = TempDir::new().unwrap(); - let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - - let file_name = temp.into_path().join("fee_recipient.txt"); - - let file = File::create(&file_name).unwrap(); - let mut fee_recipient_file = LineWriter::new(file); - fee_recipient_file - .write_all(format!("default: {}\n", DEFAULT_FEE_RECIPIENT).as_bytes()) - .unwrap(); - fee_recipient_file - .write_all(format!("{}: {}\n", pk1.as_hex_string(), CUSTOM_FEE_RECIPIENT1).as_bytes()) - .unwrap(); - fee_recipient_file - .write_all(format!("{}: {}\n", pk2.as_hex_string(), CUSTOM_FEE_RECIPIENT2).as_bytes()) - .unwrap(); - fee_recipient_file.flush().unwrap(); - file_name - } - - #[test] - fn test_load_fee_recipient() { - let fee_recipient_file_path = create_fee_recipient_file(); - let mut gf = FeeRecipientFile::new(fee_recipient_file_path); - - let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - - // Read once - gf.read_fee_recipient_file().unwrap(); - - assert_eq!( - gf.load_fee_recipient(&pk1).unwrap().unwrap(), - Address::from_str(CUSTOM_FEE_RECIPIENT1).unwrap() - ); - assert_eq!( - gf.load_fee_recipient(&pk2).unwrap().unwrap(), - Address::from_str(CUSTOM_FEE_RECIPIENT2).unwrap() - ); - - // Random pk should return the default fee-recipient - let random_pk = Keypair::random().pk.compress(); - assert_eq!( - gf.load_fee_recipient(&random_pk).unwrap().unwrap(), - Address::from_str(DEFAULT_FEE_RECIPIENT).unwrap() - ); - } -} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 9ee983a35a..56218cd81b 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -9,10 +9,11 @@ use account_utils::{ mnemonic_from_phrase, validator_definitions::{SigningDefinition, ValidatorDefinition}, }; +pub use api_secret::ApiSecret; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ - std_types::AuthResponse, - types::{self as api_types, PublicKey, PublicKeyBytes}, + std_types::{AuthResponse, GetFeeRecipientResponse}, + types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; @@ -35,8 +36,6 @@ use warp::{ Filter, }; -pub use api_secret::ApiSecret; - #[derive(Debug)] pub enum Error { Warp(warp::Error), @@ -562,6 +561,123 @@ pub fn serve( let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); let std_remotekeys = eth_v1.and(warp::path("remotekeys")).and(warp::path::end()); + // GET /eth/v1/validator/{pubkey}/feerecipient + let get_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .get_fee_recipient(&PublicKeyBytes::from(&validator_pubkey)) + .map(|fee_recipient| { + GenericResponse::from(GetFeeRecipientResponse { + pubkey: PublicKeyBytes::from(validator_pubkey.clone()), + ethaddress: fee_recipient, + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "no fee recipient set".to_string(), + ) + }) + }) + }, + ); + + // POST /eth/v1/validator/{pubkey}/feerecipient + let post_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::body::json()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, + request: api_types::UpdateFeeRecipientRequest, + validator_store: Arc>, + signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .set_validator_fee_recipient(&validator_pubkey, request.ethaddress) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting fee recipient: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + + // DELETE /eth/v1/validator/{pubkey}/feerecipient + let delete_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .delete_validator_fee_recipient(&validator_pubkey) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting fee recipient removal: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -647,6 +763,7 @@ pub fn serve( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) + .or(get_fee_recipient) .or(get_std_keystores) .or(get_std_remotekeys), ) @@ -655,11 +772,16 @@ pub fn serve( .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) + .or(post_fee_recipient) .or(post_std_keystores) .or(post_std_remotekeys), )) .or(warp::patch().and(patch_validators)) - .or(warp::delete().and(delete_std_keystores.or(delete_std_remotekeys))), + .or(warp::delete().and( + delete_fee_recipient + .or(delete_std_keystores) + .or(delete_std_remotekeys), + )), ) // The auth route is the only route that is allowed to be accessed without the API token. .or(warp::get().and(get_auth)) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 210555d9c0..7ee0563417 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -36,6 +36,7 @@ use tokio::runtime::Runtime; use tokio::sync::oneshot; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; +pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); type E = MainnetEthSpec; @@ -102,6 +103,7 @@ impl ApiTester { spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock, + Some(TEST_DEFAULT_FEE_RECIPIENT), executor.clone(), log.clone(), )); @@ -185,7 +187,7 @@ impl ApiTester { missing_token_client.send_authorization_header(false); match func(missing_token_client).await { Err(ApiError::ServerMessage(ApiErrorMessage { - code: 400, message, .. + code: 401, message, .. })) if message.contains("missing Authorization header") => (), Err(other) => panic!("expected missing header error, got {:?}", other), Ok(_) => panic!("expected missing header error, got Ok"), diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index a381378ffe..530993ee05 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -1,5 +1,7 @@ use super::*; use account_utils::random_password_string; +use bls::PublicKeyBytes; +use eth2::lighthouse_vc::types::UpdateFeeRecipientRequest; use eth2::lighthouse_vc::{ http_client::ValidatorClientHttpClient as HttpClient, std_types::{KeystoreJsonStr as Keystore, *}, @@ -9,6 +11,7 @@ use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; +use types::Address; fn new_keystore(password: ZeroizeString) -> Keystore { let keypair = Keypair::random(); @@ -585,6 +588,185 @@ fn import_invalid_slashing_protection() { }) } +#[test] +fn check_get_set_fee_recipient() { + run_test(|tester: ApiTester| async move { + let _ = &tester; + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response(&import_res, all_imported(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_keystore_get_response(&get_res, &keystores); + + // Before setting anything, every fee recipient should be set to TEST_DEFAULT_FEE_RECIPIENT + for pubkey in &all_pubkeys { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: TEST_DEFAULT_FEE_RECIPIENT, + } + ); + } + + use std::str::FromStr; + let fee_recipient_public_key_1 = + Address::from_str("0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b").unwrap(); + let fee_recipient_public_key_2 = + Address::from_str("0x0000000000000000000000000000000000000001").unwrap(); + let fee_recipient_override = + Address::from_str("0x0123456789abcdef0123456789abcdef01234567").unwrap(); + + // set the fee recipient for pubkey[1] using the API + tester + .client + .post_fee_recipient( + &all_pubkeys[1], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_public_key_1.clone(), + }, + ) + .await + .expect("should update fee recipient"); + // now everything but pubkey[1] should be TEST_DEFAULT_FEE_RECIPIENT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_public_key_1.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // set the fee recipient for pubkey[2] using the API + tester + .client + .post_fee_recipient( + &all_pubkeys[2], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_public_key_2.clone(), + }, + ) + .await + .expect("should update fee recipient"); + // now everything but pubkey[1] & pubkey[2] should be fee_recipient_file_default + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_public_key_1.clone() + } else if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // should be able to override previous fee_recipient + tester + .client + .post_fee_recipient( + &all_pubkeys[1], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_override.clone(), + }, + ) + .await + .expect("should update fee recipient"); + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_override.clone() + } else if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // delete fee recipient for pubkey[1] using the API + tester + .client + .delete_fee_recipient(&all_pubkeys[1]) + .await + .expect("should delete fee recipient"); + // now everything but pubkey[2] should be TEST_DEFAULT_FEE_RECIPIENT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + }) +} + fn all_indices(count: usize) -> Vec { (0..count).collect() } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 0d5d4ad76e..a0fe6dfe2a 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -617,6 +617,78 @@ impl InitializedValidators { Ok(()) } + /// Sets the `InitializedValidator` and `ValidatorDefinition` `suggested_fee_recipient` values. + /// + /// ## Notes + /// + /// Setting a validator `fee_recipient` will cause `self.definitions` to be updated and saved to + /// disk. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn set_validator_fee_recipient( + &mut self, + voting_public_key: &PublicKey, + fee_recipient: Address, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.suggested_fee_recipient = Some(fee_recipient); + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.suggested_fee_recipient = Some(fee_recipient); + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Removes the `InitializedValidator` and `ValidatorDefinition` `suggested_fee_recipient` values. + /// + /// ## Notes + /// + /// Removing a validator `fee_recipient` will cause `self.definitions` to be updated and saved to + /// disk. The fee_recipient for the validator will then fall back to the process level default if + /// it is set. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn delete_validator_fee_recipient( + &mut self, + voting_public_key: &PublicKey, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.suggested_fee_recipient = None; + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.suggested_fee_recipient = None; + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + /// Tries to decrypt the key cache. /// /// Returns the decrypted cache if decryption was successful, or an error if a required password diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 5e45847598..a69d6a9f5e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -5,7 +5,6 @@ mod check_synced; mod cli; mod config; mod duties_service; -mod fee_recipient_file; mod graffiti_file; mod http_metrics; mod key_cache; @@ -360,6 +359,7 @@ impl ProductionValidatorClient { context.eth2_config.spec.clone(), doppelganger_service.clone(), slot_clock.clone(), + config.fee_recipient, context.executor.clone(), log.clone(), )); @@ -426,8 +426,6 @@ impl ProductionValidatorClient { .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("preparation".into())) - .fee_recipient(config.fee_recipient) - .fee_recipient_file(config.fee_recipient_file.clone()) .build()?; let sync_committee_service = SyncCommitteeService::new( diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 34201180c0..01dfc0ca04 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,8 +1,5 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::{ - fee_recipient_file::FeeRecipientFile, - validator_store::{DoppelgangerStatus, ValidatorStore}, -}; +use crate::validator_store::{DoppelgangerStatus, ValidatorStore}; use bls::PublicKeyBytes; use environment::RuntimeContext; use parking_lot::RwLock; @@ -31,8 +28,6 @@ pub struct PreparationServiceBuilder { slot_clock: Option, beacon_nodes: Option>>, context: Option>, - fee_recipient: Option
, - fee_recipient_file: Option, } impl PreparationServiceBuilder { @@ -42,8 +37,6 @@ impl PreparationServiceBuilder { slot_clock: None, beacon_nodes: None, context: None, - fee_recipient: None, - fee_recipient_file: None, } } @@ -67,16 +60,6 @@ impl PreparationServiceBuilder { self } - pub fn fee_recipient(mut self, fee_recipient: Option
) -> Self { - self.fee_recipient = fee_recipient; - self - } - - pub fn fee_recipient_file(mut self, fee_recipient_file: Option) -> Self { - self.fee_recipient_file = fee_recipient_file; - self - } - pub fn build(self) -> Result, String> { Ok(PreparationService { inner: Arc::new(Inner { @@ -92,8 +75,6 @@ impl PreparationServiceBuilder { context: self .context .ok_or("Cannot build PreparationService without runtime_context")?, - fee_recipient: self.fee_recipient, - fee_recipient_file: self.fee_recipient_file, validator_registration_cache: RwLock::new(HashMap::new()), }), }) @@ -106,8 +87,6 @@ pub struct Inner { slot_clock: T, beacon_nodes: Arc>, context: RuntimeContext, - fee_recipient: Option
, - fee_recipient_file: Option, // Used to track unpublished validator registration changes. validator_registration_cache: RwLock>, @@ -301,23 +280,6 @@ impl PreparationService { { let log = self.context.log(); - let fee_recipient_file = self - .fee_recipient_file - .clone() - .map(|mut fee_recipient_file| { - fee_recipient_file - .read_fee_recipient_file() - .map_err(|e| { - error!( - log, - "Error loading fee-recipient file"; - "error" => ?e - ); - }) - .unwrap_or(()); - fee_recipient_file - }); - let all_pubkeys: Vec<_> = self .validator_store .voting_pubkeys(DoppelgangerStatus::ignored); @@ -327,22 +289,7 @@ impl PreparationService { .filter_map(|pubkey| { // Ignore fee recipients for keys without indices, they are inactive. let validator_index = self.validator_store.validator_index(&pubkey)?; - - // If there is a `suggested_fee_recipient` in the validator definitions yaml - // file, use that value. - let fee_recipient = self - .validator_store - .suggested_fee_recipient(&pubkey) - .or_else(|| { - // If there's nothing in the validator defs file, check the fee - // recipient file. - fee_recipient_file - .as_ref()? - .get_fee_recipient(&pubkey) - .ok()? - }) - // If there's nothing in the file, try the process-level default value. - .or(self.fee_recipient); + let fee_recipient = self.validator_store.get_fee_recipient(&pubkey); if let Some(fee_recipient) = fee_recipient { Some(map_fn(pubkey, validator_index, fee_recipient)) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 36ec5e8955..de39f91264 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -86,6 +86,7 @@ pub struct ValidatorStore { log: Logger, doppelganger_service: Option>, slot_clock: T, + fee_recipient_process: Option
, task_executor: TaskExecutor, _phantom: PhantomData, } @@ -101,6 +102,7 @@ impl ValidatorStore { spec: ChainSpec, doppelganger_service: Option>, slot_clock: T, + fee_recipient_process: Option
, task_executor: TaskExecutor, log: Logger, ) -> Self { @@ -113,6 +115,7 @@ impl ValidatorStore { log, doppelganger_service, slot_clock, + fee_recipient_process, task_executor, _phantom: PhantomData, } @@ -356,7 +359,21 @@ impl ValidatorStore { self.validators.read().graffiti(validator_pubkey) } - pub fn suggested_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option
{ + /// Returns the fee recipient for the given public key. The priority order for fetching + /// the fee recipient is: + /// 1. validator_definitions.yml + /// 2. process level fee recipient + pub fn get_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option
{ + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + self.suggested_fee_recipient(validator_pubkey) + // If there's nothing in the file, try the process-level default value. + .or(self.fee_recipient_process) + } + + /// Returns the suggested_fee_recipient from `validator_definitions.yml` if any. + /// This has been pulled into a private function so the read lock is dropped easily + fn suggested_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option
{ self.validators .read() .suggested_fee_recipient(validator_pubkey) From 5dbfb37d742d24d7832a0300bafa19ecf1cfe0a4 Mon Sep 17 00:00:00 2001 From: Kirill Date: Wed, 6 Jul 2022 22:00:58 +0000 Subject: [PATCH 059/184] eth2_hashing: make `cpufeatures` dep optional (#3309) ## Issue Addressed #3308 ## Proposed Changes * add `cpufeatures` feature. * make `cpufeature` default feature to preserve the compatibility; * hide all `cpufeature`-related code with `cpufeatures` feature. Co-authored-by: Kirill --- crypto/eth2_hashing/Cargo.toml | 5 +++-- crypto/eth2_hashing/src/lib.rs | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 7490ab6093..eb92d252d1 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -8,9 +8,9 @@ description = "Hashing primitives used in Ethereum 2.0" [dependencies] lazy_static = { version = "1.4.0", optional = true } +cpufeatures = { version = "0.2.2", optional = true } ring = "0.16.19" sha2 = "0.10.2" -cpufeatures = "0.2.2" [dev-dependencies] rustc-hex = "2.1.0" @@ -19,5 +19,6 @@ rustc-hex = "2.1.0" wasm-bindgen-test = "0.3.18" [features] -default = ["zero_hash_cache"] +default = ["zero_hash_cache", "detect-cpufeatures"] zero_hash_cache = ["lazy_static"] +detect-cpufeatures = ["cpufeatures"] diff --git a/crypto/eth2_hashing/src/lib.rs b/crypto/eth2_hashing/src/lib.rs index c5c034640b..36a3d14139 100644 --- a/crypto/eth2_hashing/src/lib.rs +++ b/crypto/eth2_hashing/src/lib.rs @@ -127,15 +127,15 @@ pub enum DynamicImpl { // Runtime latch for detecting the availability of SHA extensions on x86_64. // // Inspired by the runtime switch within the `sha2` crate itself. -#[cfg(target_arch = "x86_64")] +#[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] cpufeatures::new!(x86_sha_extensions, "sha", "sse2", "ssse3", "sse4.1"); #[inline(always)] pub fn have_sha_extensions() -> bool { - #[cfg(target_arch = "x86_64")] + #[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] return x86_sha_extensions::get(); - #[cfg(not(target_arch = "x86_64"))] + #[cfg(not(all(feature = "detect-cpufeatures", target_arch = "x86_64")))] return false; } From 6d42a09ff8b3914384929396755fc2f1153af48d Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 11 Jul 2022 01:44:41 +0000 Subject: [PATCH 060/184] Merge Engines and Engine struct in one in the `execution_layer` crate (#3284) ## Issue Addressed Part of https://github.com/sigp/lighthouse/issues/3118, continuation of https://github.com/sigp/lighthouse/pull/3257 and https://github.com/sigp/lighthouse/pull/3283 ## Proposed Changes - Merge the [`Engines`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L161-L165) struct and [`Engine` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L62-L67) - Remove unnecessary generics ## Additional Info There is more cleanup to do that will come in subsequent PRs --- beacon_node/execution_layer/src/engine_api.rs | 2 - .../execution_layer/src/engine_api/http.rs | 14 ++-- beacon_node/execution_layer/src/engines.rs | 82 ++++++++----------- beacon_node/execution_layer/src/lib.rs | 31 +++---- 4 files changed, 52 insertions(+), 77 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index a1e769e3e3..7e04a3fac3 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -71,8 +71,6 @@ impl From for Error { } } -pub struct EngineApi; - #[derive(Clone, Copy, Debug, PartialEq)] pub enum PayloadStatusV1Status { Valid, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 832771460e..c4811e04c1 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,7 +7,6 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; -use std::marker::PhantomData; use std::time::Duration; use types::EthSpec; @@ -169,7 +168,7 @@ pub mod deposit_log { /// state of the deposit contract. pub mod deposit_methods { use super::Log; - use crate::{EngineApi, HttpJsonRpc}; + use crate::HttpJsonRpc; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use std::fmt; @@ -298,7 +297,7 @@ pub mod deposit_methods { } } - impl HttpJsonRpc { + impl HttpJsonRpc { /// Get the eth1 chain id of the given endpoint. pub async fn get_chain_id(&self, timeout: Duration) -> Result { let chain_id: String = self @@ -517,20 +516,18 @@ pub mod deposit_methods { } } -pub struct HttpJsonRpc { +pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, auth: Option, - _phantom: PhantomData, } -impl HttpJsonRpc { +impl HttpJsonRpc { pub fn new(url: SensitiveUrl) -> Result { Ok(Self { client: Client::builder().build()?, url, auth: None, - _phantom: PhantomData, }) } @@ -539,7 +536,6 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, auth: Some(auth), - _phantom: PhantomData, }) } @@ -592,7 +588,7 @@ impl std::fmt::Display for HttpJsonRpc { } } -impl HttpJsonRpc { +impl HttpJsonRpc { pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self .rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT) diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 34eef8a3fb..d44d81c674 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,7 +1,7 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, + Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, }; use crate::HttpJsonRpc; use lru::LruCache; @@ -55,20 +55,32 @@ struct PayloadIdCacheKey { pub suggested_fee_recipient: Address, } -/// An execution engine. -pub struct Engine { - pub api: HttpJsonRpc, - payload_id_cache: Mutex>, - state: RwLock, +#[derive(Debug)] +pub enum EngineError { + Offline, + Api { error: EngineApiError }, + BuilderApi { error: EngineApiError }, + Auth, } -impl Engine { +/// An execution engine. +pub struct Engine { + pub api: HttpJsonRpc, + payload_id_cache: Mutex>, + state: RwLock, + pub latest_forkchoice_state: RwLock>, + pub log: Logger, +} + +impl Engine { /// Creates a new, offline engine. - pub fn new(api: HttpJsonRpc) -> Self { + pub fn new(api: HttpJsonRpc, log: &Logger) -> Self { Self { api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), state: RwLock::new(EngineState::Offline), + latest_forkchoice_state: Default::default(), + log: log.clone(), } } @@ -90,9 +102,7 @@ impl Engine { }) .cloned() } -} -impl Engine { pub async fn notify_forkchoice_updated( &self, forkchoice_state: ForkChoiceState, @@ -120,26 +130,7 @@ impl Engine { Ok(response) } -} -// This structure used to hold multiple execution engines managed in a fallback manner. This -// functionality has been removed following https://github.com/sigp/lighthouse/issues/3118 and this -// struct will likely be removed in the future. -pub struct Engines { - pub engine: Engine, - pub latest_forkchoice_state: RwLock>, - pub log: Logger, -} - -#[derive(Debug)] -pub enum EngineError { - Offline, - Api { error: EngineApiError }, - BuilderApi { error: EngineApiError }, - Auth, -} - -impl Engines { async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } @@ -169,12 +160,7 @@ impl Engines { // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = self - .engine - .api - .forkchoice_updated_v1(forkchoice_state, None) - .await - { + if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { debug!( self.log, "Failed to issue latest head to engine"; @@ -191,14 +177,14 @@ impl Engines { /// Returns `true` if the engine has a "synced" status. pub async fn is_synced(&self) -> bool { - *self.engine.state.read().await == EngineState::Synced + *self.state.read().await == EngineState::Synced } /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck_not_synced(&self, logging: Logging) { - let mut state_lock = self.engine.state.write().await; + let mut state_lock = self.state.write().await; if *state_lock != EngineState::Synced { - match self.engine.api.upcheck().await { + match self.api.upcheck().await { Ok(()) => { if logging.is_enabled() { info!( @@ -261,7 +247,7 @@ impl Engines { /// upcheck it and then run the function again. pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result where - F: Fn(&'a Engine) -> G + Copy, + F: Fn(&'a Engine) -> G + Copy, G: Future>, { match self.first_success_without_retry(func).await { @@ -282,18 +268,18 @@ impl Engines { func: F, ) -> Result where - F: Fn(&'a Engine) -> G, + F: Fn(&'a Engine) -> G, G: Future>, { let (engine_synced, engine_auth_failed) = { - let state = self.engine.state.read().await; + let state = self.state.read().await; ( *state == EngineState::Synced, *state == EngineState::AuthFailed, ) }; if engine_synced { - match func(&self.engine).await { + match func(self).await { Ok(result) => Ok(result), Err(error) => { debug!( @@ -301,7 +287,7 @@ impl Engines { "Execution engine call failed"; "error" => ?error, ); - *self.engine.state.write().await = EngineState::Offline; + *self.state.write().await = EngineState::Offline; Err(EngineError::Api { error }) } } @@ -318,7 +304,7 @@ impl Engines { /// it runs, it will try to upcheck all offline nodes and then run the function again. pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Result where - F: Fn(&'a Engine) -> G + Copy, + F: Fn(&'a Engine) -> G + Copy, G: Future>, { match self.broadcast_without_retry(func).await { @@ -333,14 +319,14 @@ impl Engines { /// Runs `func` on the node if it's last state is not offline. pub async fn broadcast_without_retry<'a, F, G, H>(&'a self, func: F) -> Result where - F: Fn(&'a Engine) -> G, + F: Fn(&'a Engine) -> G, G: Future>, { let func = &func; - if *self.engine.state.read().await == EngineState::Offline { + if *self.state.read().await == EngineState::Offline { Err(EngineError::Offline) } else { - match func(&self.engine).await { + match func(self).await { Ok(res) => Ok(res), Err(error) => { debug!( @@ -348,7 +334,7 @@ impl Engines { "Execution engine call failed"; "error" => ?error, ); - *self.engine.state.write().await = EngineState::Offline; + *self.state.write().await = EngineState::Offline; Err(EngineError::Api { error }) } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 8897f8f67a..47424ca0f8 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -10,7 +10,7 @@ use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; -use engines::{Engine, EngineError, Engines, Logging}; +use engines::{Engine, EngineError, Logging}; use lru::LruCache; use payload_status::process_payload_status; pub use payload_status::PayloadStatus; @@ -64,7 +64,7 @@ const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); #[derive(Debug)] pub enum Error { - NoEngines, + NoEngine, NoPayloadBuilder, ApiError(ApiError), Builder(builder_client::Error), @@ -101,7 +101,7 @@ pub struct Proposer { } struct Inner { - engines: Engines, + engine: Engine, builder: Option, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, @@ -162,7 +162,7 @@ impl ExecutionLayer { if urls.len() > 1 { warn!(log, "Only the first execution engine url will be used"); } - let execution_url = urls.into_iter().next().ok_or(Error::NoEngines)?; + let execution_url = urls.into_iter().next().ok_or(Error::NoEngine)?; // Use the default jwt secret path if not provided via cli. let secret_file = secret_files @@ -198,12 +198,11 @@ impl ExecutionLayer { .map_err(Error::InvalidJWTSecret) }?; - let engine: Engine = { + let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); - let api = HttpJsonRpc::::new_with_auth(execution_url, auth) - .map_err(Error::ApiError)?; - Engine::::new(api) + let api = HttpJsonRpc::new_with_auth(execution_url, auth).map_err(Error::ApiError)?; + Engine::new(api, &log) }; let builder = builder_url @@ -211,11 +210,7 @@ impl ExecutionLayer { .transpose()?; let inner = Inner { - engines: Engines { - engine, - latest_forkchoice_state: <_>::default(), - log: log.clone(), - }, + engine, builder, execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, @@ -234,8 +229,8 @@ impl ExecutionLayer { } impl ExecutionLayer { - fn engines(&self) -> &Engines { - &self.inner.engines + fn engines(&self) -> &Engine { + &self.inner.engine } pub fn builder(&self) -> &Option { @@ -1004,7 +999,7 @@ impl ExecutionLayer { /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md async fn get_pow_block_hash_at_total_difficulty( &self, - engine: &Engine, + engine: &Engine, spec: &ChainSpec, ) -> Result, ApiError> { let mut block = engine @@ -1118,7 +1113,7 @@ impl ExecutionLayer { /// https://github.com/ethereum/consensus-specs/issues/2636 async fn get_pow_block( &self, - engine: &Engine, + engine: &Engine, hash: ExecutionBlockHash, ) -> Result, ApiError> { if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { @@ -1153,7 +1148,7 @@ impl ExecutionLayer { async fn get_payload_by_block_hash_from_engine( &self, - engine: &Engine, + engine: &Engine, hash: ExecutionBlockHash, ) -> Result>, ApiError> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); From 4212f22ddb4f91b12756efe3a6767f52b7981314 Mon Sep 17 00:00:00 2001 From: sragss Date: Mon, 11 Jul 2022 01:44:42 +0000 Subject: [PATCH 061/184] add sync committee contribution timeout (#3291) ## Issue Addressed Resolves #3276. ## Proposed Changes Add a timeout for the sync committee contributions at 1/4 the slot length such that we may be able to try backup beacon nodes in the case of contribution post failure. ## Additional Info 1/4 slot length seemed standard for the timeouts, but may want to decrease this to 1/2. I did not find any timeout related / sync committee related tests, so there are no tests. Happy to write some with a bit of guidance. --- common/eth2/src/lib.rs | 9 ++++++++- validator_client/src/lib.rs | 3 +++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index d374101308..2ee3618386 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -110,6 +110,7 @@ pub struct Timeouts { pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, + pub sync_committee_contribution: Duration, pub sync_duties: Duration, } @@ -121,6 +122,7 @@ impl Timeouts { liveness: timeout, proposal: timeout, proposer_duties: timeout, + sync_committee_contribution: timeout, sync_duties: timeout, } } @@ -907,7 +909,12 @@ impl BeaconNodeHttpClient { .push("validator") .push("contribution_and_proofs"); - self.post(path, &signed_contributions).await?; + self.post_with_timeout( + path, + &signed_contributions, + self.timeouts.sync_committee_contribution, + ) + .await?; Ok(()) } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index a69d6a9f5e..b78b072cf8 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -72,6 +72,7 @@ const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -280,6 +281,8 @@ impl ProductionValidatorClient { liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_committee_contribution: slot_duration + / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, } } else { From a390695e0f5210904777ce2219ae5cc8e26acbff Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Jul 2022 15:54:17 +0000 Subject: [PATCH 062/184] Add `--release` to disallowed-from-async lint (#3325) ## Issue Addressed - #3251 ## Proposed Changes Adds the release tag to the `disallowed_from_async` lint. ## Additional Info ~~I haven't run this locally yet due to (minor) complexity of running the lint, I'm seeing if it will work via Github.~~ --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index a97637bfd1..53fd4143d9 100644 --- a/Makefile +++ b/Makefile @@ -146,10 +146,9 @@ lint: -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push -# FIXME: fails if --release is added due to broken HTTP API tests nightly-lint: cp .github/custom/clippy.toml . - cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests -- \ + cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \ -A clippy::all \ -D clippy::disallowed_from_async rm clippy.toml From 7a6e6928a317a8dbea0ebadb6deeefeb54a2f10e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Jul 2022 20:31:39 +0000 Subject: [PATCH 063/184] Further remove EE redundancy (#3324) ## Issue Addressed Resolves #3176 ## Proposed Changes Continues from PRs by @divagant-martian to gradually remove EL redundancy (see #3284, #3257). This PR achieves: - Removes the `broadcast` and `first_success` methods. The functional impact is that every request to the EE will always be tried immediately, regardless of the cached `EngineState` (this resolves #3176). Previously we would check the engine state before issuing requests, this doesn't make sense in a single-EE world; there's only one EE so we might as well try it for every request. - Runs the upcheck/watchdog routine once per slot rather than thrice. When we had multiple EEs frequent polling was useful to try and detect when the primary EE had come back online and we could switch to it. That's not as relevant now. - Always creates logs in the `Engines::upcheck` function. Previously we would mute some logs since they could get really noisy when one EE was down but others were functioning fine. Now we only have one EE and are upcheck-ing it less, it makes sense to always produce logs. This PR purposefully does not achieve: - Updating all occurances of "engines" to "engine". I'm trying to keep the diff small and manageable. We can come back for this. ## Additional Info NA --- .../tests/payload_invalidation.rs | 5 + beacon_node/execution_layer/src/engines.rs | 255 +++++++----------- beacon_node/execution_layer/src/lib.rs | 132 +++------ 3 files changed, 147 insertions(+), 245 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index e37ed286bc..5cd0a04c37 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1003,6 +1003,11 @@ async fn payload_preparation_before_transition_block() { let rig = InvalidPayloadRig::new(); let el = rig.execution_layer(); + // Run the watchdog routine so that the status of the execution engine is set. This ensures + // that we don't end up with `eth_syncing` requests later in this function that will impede + // testing. + el.watchdog_task().await; + let head = rig.harness.chain.head_snapshot(); assert_eq!( head.beacon_block diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index d44d81c674..eb188c61f8 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -5,8 +5,10 @@ use crate::engine_api::{ }; use crate::HttpJsonRpc; use lru::LruCache; -use slog::{crit, debug, info, warn, Logger}; +use slog::{debug, error, info, Logger}; use std::future::Future; +use std::sync::Arc; +use task_executor::TaskExecutor; use tokio::sync::{Mutex, RwLock}; use types::{Address, ExecutionBlockHash, Hash256}; @@ -16,7 +18,7 @@ use types::{Address, ExecutionBlockHash, Hash256}; const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; /// Stores the remembered state of a engine. -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone, PartialEq, Debug)] enum EngineState { Synced, Offline, @@ -31,22 +33,6 @@ pub struct ForkChoiceState { pub finalized_block_hash: ExecutionBlockHash, } -/// Used to enable/disable logging on some tasks. -#[derive(Copy, Clone, PartialEq)] -pub enum Logging { - Enabled, - Disabled, -} - -impl Logging { - pub fn is_enabled(&self) -> bool { - match self { - Logging::Enabled => true, - Logging::Disabled => false, - } - } -} - #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { pub head_block_hash: ExecutionBlockHash, @@ -69,17 +55,19 @@ pub struct Engine { payload_id_cache: Mutex>, state: RwLock, pub latest_forkchoice_state: RwLock>, + pub executor: TaskExecutor, pub log: Logger, } impl Engine { /// Creates a new, offline engine. - pub fn new(api: HttpJsonRpc, log: &Logger) -> Self { + pub fn new(api: HttpJsonRpc, executor: TaskExecutor, log: &Logger) -> Self { Self { api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), state: RwLock::new(EngineState::Offline), latest_forkchoice_state: Default::default(), + executor, log: log.clone(), } } @@ -179,164 +167,117 @@ impl Engine { pub async fn is_synced(&self) -> bool { *self.state.read().await == EngineState::Synced } + /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. - pub async fn upcheck_not_synced(&self, logging: Logging) { - let mut state_lock = self.state.write().await; - if *state_lock != EngineState::Synced { - match self.api.upcheck().await { - Ok(()) => { - if logging.is_enabled() { - info!( - self.log, - "Execution engine online"; - ); - } + pub async fn upcheck(&self) { + let state: EngineState = match self.api.upcheck().await { + Ok(()) => { + let mut state = self.state.write().await; + + if *state != EngineState::Synced { + info!( + self.log, + "Execution engine online"; + ); + // Send the node our latest forkchoice_state. self.send_latest_forkchoice_state().await; - - *state_lock = EngineState::Synced + } else { + debug!( + self.log, + "Execution engine online"; + ); } - Err(EngineApiError::IsSyncing) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine syncing"; - ) - } - // Send the node our latest forkchoice_state, it may assist with syncing. - self.send_latest_forkchoice_state().await; - - *state_lock = EngineState::Syncing - } - Err(EngineApiError::Auth(err)) => { - if logging.is_enabled() { - warn!( - self.log, - "Failed jwt authorization"; - "error" => ?err, - ); - } - - *state_lock = EngineState::AuthFailed - } - Err(e) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine offline"; - "error" => ?e, - ) - } - } + *state = EngineState::Synced; + *state } - } + Err(EngineApiError::IsSyncing) => { + let mut state = self.state.write().await; + *state = EngineState::Syncing; + *state + } + Err(EngineApiError::Auth(err)) => { + error!( + self.log, + "Failed jwt authorization"; + "error" => ?err, + ); - if *state_lock != EngineState::Synced && logging.is_enabled() { - crit!( - self.log, - "No synced execution engines"; - ) - } - } - - /// Run `func` on the node. - /// - /// This function might try to run `func` twice. If the node returns an error it will try to - /// upcheck it and then run the function again. - pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result - where - F: Fn(&'a Engine) -> G + Copy, - G: Future>, - { - match self.first_success_without_retry(func).await { - Ok(result) => Ok(result), + let mut state = self.state.write().await; + *state = EngineState::AuthFailed; + *state + } Err(e) => { - debug!(self.log, "First engine call failed. Retrying"; "err" => ?e); - // Try to recover the node. - self.upcheck_not_synced(Logging::Enabled).await; - // Try again. - self.first_success_without_retry(func).await - } - } - } + error!( + self.log, + "Error during execution engine upcheck"; + "error" => ?e, + ); - /// Run `func` on the node. - pub async fn first_success_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Result - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let (engine_synced, engine_auth_failed) = { - let state = self.state.read().await; - ( - *state == EngineState::Synced, - *state == EngineState::AuthFailed, - ) + let mut state = self.state.write().await; + *state = EngineState::Offline; + *state + } }; - if engine_synced { - match func(self).await { - Ok(result) => Ok(result), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, - ); - *self.state.write().await = EngineState::Offline; - Err(EngineError::Api { error }) - } - } - } else if engine_auth_failed { - Err(EngineError::Auth) - } else { - Err(EngineError::Offline) - } + + debug!( + self.log, + "Execution engine upcheck complete"; + "state" => ?state, + ); } - /// Runs `func` on the node. + /// Run `func` on the node regardless of the node's current state. /// - /// This function might try to run `func` twice. If all nodes return an error on the first time - /// it runs, it will try to upcheck all offline nodes and then run the function again. - pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Result - where - F: Fn(&'a Engine) -> G + Copy, - G: Future>, - { - match self.broadcast_without_retry(func).await { - Err(EngineError::Offline { .. }) => { - self.upcheck_not_synced(Logging::Enabled).await; - self.broadcast_without_retry(func).await - } - other => other, - } - } - - /// Runs `func` on the node if it's last state is not offline. - pub async fn broadcast_without_retry<'a, F, G, H>(&'a self, func: F) -> Result + /// ## Note + /// + /// This function takes locks on `self.state`, holding a conflicting lock might cause a + /// deadlock. + pub async fn request<'a, F, G, H>(self: &'a Arc, func: F) -> Result where F: Fn(&'a Engine) -> G, G: Future>, { - let func = &func; - if *self.state.read().await == EngineState::Offline { - Err(EngineError::Offline) - } else { - match func(self).await { - Ok(res) => Ok(res), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, + match func(self).await { + Ok(result) => { + // Take a clone *without* holding the read-lock since the `upcheck` function will + // take a write-lock. + let state: EngineState = *self.state.read().await; + + // If this request just returned successfully but we don't think this node is + // synced, check to see if it just became synced. This helps to ensure that the + // networking stack can get fast feedback about a synced engine. + if state != EngineState::Synced { + // Spawn the upcheck in another task to avoid slowing down this request. + let inner_self = self.clone(); + self.executor.spawn( + async move { inner_self.upcheck().await }, + "upcheck_after_success", ); - *self.state.write().await = EngineState::Offline; - Err(EngineError::Api { error }) } + + Ok(result) + } + Err(error) => { + error!( + self.log, + "Execution engine call failed"; + "error" => ?error, + ); + + // The node just returned an error, run an upcheck so we can update the endpoint + // state. + // + // Spawn the upcheck in another task to avoid slowing down this request. + let inner_self = self.clone(); + self.executor.spawn( + async move { inner_self.upcheck().await }, + "upcheck_after_error", + ); + + Err(EngineError::Api { error }) } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 47424ca0f8..9bb4ead350 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -10,7 +10,7 @@ use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; -use engines::{Engine, EngineError, Logging}; +use engines::{Engine, EngineError}; use lru::LruCache; use payload_status::process_payload_status; pub use payload_status::PayloadStatus; @@ -27,7 +27,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::{ sync::{Mutex, MutexGuard, RwLock}, - time::{sleep, sleep_until, Instant}, + time::sleep, }; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, @@ -101,7 +101,7 @@ pub struct Proposer { } struct Inner { - engine: Engine, + engine: Arc, builder: Option, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, @@ -132,22 +132,15 @@ pub struct Config { pub default_datadir: PathBuf, } -/// Provides access to one or more execution engines and provides a neat interface for consumption -/// by the `BeaconChain`. -/// -/// When there is more than one execution node specified, the others will be used in a "fallback" -/// fashion. Some requests may be broadcast to all nodes and others might only be sent to the first -/// node that returns a valid response. Ultimately, the purpose of fallback nodes is to provide -/// redundancy in the case where one node is offline. -/// -/// The fallback nodes have an ordering. The first supplied will be the first contacted, and so on. +/// Provides access to one execution engine and provides a neat interface for consumption by the +/// `BeaconChain`. #[derive(Clone)] pub struct ExecutionLayer { inner: Arc>, } impl ExecutionLayer { - /// Instantiate `Self` with Execution engines specified using `Config`, all using the JSON-RPC via HTTP. + /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, @@ -202,7 +195,7 @@ impl ExecutionLayer { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); let api = HttpJsonRpc::new_with_auth(execution_url, auth).map_err(Error::ApiError)?; - Engine::new(api, &log) + Engine::new(api, executor.clone(), &log) }; let builder = builder_url @@ -210,7 +203,7 @@ impl ExecutionLayer { .transpose()?; let inner = Inner { - engine, + engine: Arc::new(engine), builder, execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, @@ -229,7 +222,7 @@ impl ExecutionLayer { } impl ExecutionLayer { - fn engines(&self) -> &Engine { + fn engine(&self) -> &Arc { &self.inner.engine } @@ -276,54 +269,18 @@ impl ExecutionLayer { self.executor().spawn(generate_future(self.clone()), name); } - /// Spawns a routine which attempts to keep the execution engines online. + /// Spawns a routine which attempts to keep the execution engine online. pub fn spawn_watchdog_routine(&self, slot_clock: S) { let watchdog = |el: ExecutionLayer| async move { // Run one task immediately. el.watchdog_task().await; - let recurring_task = - |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { - // We run the task three times per slot. - // - // The interval between each task is 1/3rd of the slot duration. This matches nicely - // with the attestation production times (unagg. at 1/3rd, agg at 2/3rd). - // - // Each task is offset by 3/4ths of the interval. - // - // On mainnet, this means we will run tasks at: - // - // - 3s after slot start: 1s before publishing unaggregated attestations. - // - 7s after slot start: 1s before publishing aggregated attestations. - // - 11s after slot start: 1s before the next slot starts. - let interval = duration_to_next_slot / 3; - let offset = (interval / 4) * 3; - - let first_execution = duration_to_next_slot + offset; - let second_execution = first_execution + interval; - let third_execution = second_execution + interval; - - sleep_until(now + first_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - - sleep_until(now + second_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - - sleep_until(now + third_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - }; - // Start the loop to periodically update. loop { - if let Some(duration) = slot_clock.duration_to_next_slot() { - let now = Instant::now(); - - // Spawn a new task rather than waiting for this to finish. This ensure that a - // slow run doesn't prevent the next run from starting. - el.spawn(|el| recurring_task(el, now, duration), "exec_watchdog_task"); - } else { - error!(el.log(), "Failed to spawn watchdog task"); - } + el.spawn( + |el| async move { el.watchdog_task().await }, + "exec_watchdog_task", + ); sleep(slot_clock.slot_duration()).await; } }; @@ -333,8 +290,7 @@ impl ExecutionLayer { /// Performs a single execution of the watchdog routine. pub async fn watchdog_task(&self) { - // Disable logging since this runs frequently and may get annoying. - self.engines().upcheck_not_synced(Logging::Disabled).await; + self.engine().upcheck().await; } /// Spawns a routine which cleans the cached proposer data periodically. @@ -394,9 +350,9 @@ impl ExecutionLayer { self.spawn(routine, "exec_config_poll"); } - /// Returns `true` if there is at least one synced and reachable engine. + /// Returns `true` if the execution engine is synced and reachable. pub async fn is_synced(&self) -> bool { - self.engines().is_synced().await + self.engine().is_synced().await } /// Updates the proposer preparation data provided by validators @@ -632,8 +588,8 @@ impl ExecutionLayer { "timestamp" => timestamp, "parent_hash" => ?parent_hash, ); - self.engines() - .first_success(|engine| async move { + self.engine() + .request(|engine| async move { let payload_id = if let Some(id) = engine .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) .await @@ -736,12 +692,12 @@ impl ExecutionLayer { "block_number" => execution_payload.block_number, ); - let broadcast_result = self - .engines() - .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) + let result = self + .engine() + .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; - process_payload_status(execution_payload.block_hash, broadcast_result, self.log()) + process_payload_status(execution_payload.block_hash, result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } @@ -867,13 +823,13 @@ impl ExecutionLayer { finalized_block_hash, }; - self.engines() + self.engine() .set_latest_forkchoice_state(forkchoice_state) .await; - let broadcast_result = self - .engines() - .broadcast(|engine| async move { + let result = self + .engine() + .request(|engine| async move { engine .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) .await @@ -882,7 +838,7 @@ impl ExecutionLayer { process_payload_status( head_block_hash, - broadcast_result.map(|response| response.payload_status), + result.map(|response| response.payload_status), self.log(), ) .map_err(Box::new) @@ -896,12 +852,12 @@ impl ExecutionLayer { terminal_block_number: 0, }; - let broadcast_result = self - .engines() - .broadcast(|engine| engine.api.exchange_transition_configuration_v1(local)) + let result = self + .engine() + .request(|engine| engine.api.exchange_transition_configuration_v1(local)) .await; - match broadcast_result { + match result { Ok(remote) => { if local.terminal_total_difficulty != remote.terminal_total_difficulty || local.terminal_block_hash != remote.terminal_block_hash @@ -953,8 +909,8 @@ impl ExecutionLayer { ); let hash_opt = self - .engines() - .first_success(|engine| async move { + .engine() + .request(|engine| async move { let terminal_block_hash = spec.terminal_block_hash; if terminal_block_hash != ExecutionBlockHash::zero() { if self @@ -1040,8 +996,8 @@ impl ExecutionLayer { /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work /// block. - /// - `None` if the `block_hash` or its parent were not present on the execution engines. - /// - `Err(_)` if there was an error connecting to the execution engines. + /// - `None` if the `block_hash` or its parent were not present on the execution engine. + /// - `Err(_)` if there was an error connecting to the execution engine. /// /// ## Fallback Behaviour /// @@ -1069,8 +1025,8 @@ impl ExecutionLayer { &[metrics::IS_VALID_TERMINAL_POW_BLOCK_HASH], ); - self.engines() - .broadcast(|engine| async move { + self.engine() + .request(|engine| async move { if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? { if let Some(pow_parent) = self.get_pow_block(engine, pow_block.parent_hash).await? @@ -1136,8 +1092,8 @@ impl ExecutionLayer { &self, hash: ExecutionBlockHash, ) -> Result>, Error> { - self.engines() - .first_success(|engine| async move { + self.engine() + .request(|engine| async move { self.get_payload_by_block_hash_from_engine(engine, hash) .await }) @@ -1240,7 +1196,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; assert_eq!(el.get_terminal_pow_block_hash(&spec).await.unwrap(), None) }) .await @@ -1260,7 +1216,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; assert_eq!( el.is_valid_terminal_pow_block_hash(terminal_block.unwrap().block_hash, &spec) .await @@ -1277,7 +1233,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; let invalid_terminal_block = terminal_block.unwrap().parent_hash; assert_eq!( @@ -1296,7 +1252,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; let missing_terminal_block = ExecutionBlockHash::repeat_byte(42); assert_eq!( From 1f54e10b7b84c31df9ae6f65b92f7b5827c0d345 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Jul 2022 23:07:49 +0000 Subject: [PATCH 064/184] Do not interpret "latest valid hash" as identifying a valid hash (#3327) ## Issue Addressed NA ## Proposed Changes After some discussion in Discord with @mkalinin it was raised that it was not the intention of the engine API to have CLs validate the `latest_valid_hash` (LVH) and all ancestors. Whilst I believe the engine API is being updated such that the LVH *must* identify a valid hash or be set to some junk value, I'm not confident that we can rely upon the LVH as being valid (at least for now) due to the confusion surrounding it. Being able to validate blocks via the LVH is a relatively minor optimisation; if the LVH value ends up becoming our head we'll send an fcU and get the VALID status there. Falsely marking a block as valid has serious consequences and since it's a minor optimisation to use LVH I think that we don't take the risk. For clarity, we will still *invalidate* the *descendants* of the LVH, we just wont *validate* the *ancestors*. ## Additional Info NA --- .../tests/payload_invalidation.rs | 30 ++++++++++++++----- consensus/proto_array/src/proto_array.rs | 3 -- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 5cd0a04c37..a4e62cf969 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -390,7 +390,7 @@ async fn invalid_payload_invalidates_parent() { }) .await; - assert!(rig.execution_status(roots[0]).is_valid_and_post_bellatrix()); + assert!(rig.execution_status(roots[0]).is_optimistic()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); @@ -532,9 +532,9 @@ async fn pre_finalized_latest_valid_hash() { /// Ensure that a `latest_valid_hash` will: /// /// - Invalidate descendants of `latest_valid_root`. -/// - Validate `latest_valid_root` and its ancestors. +/// - Will not validate `latest_valid_root` and its ancestors. #[tokio::test] -async fn latest_valid_hash_will_validate() { +async fn latest_valid_hash_will_not_validate() { const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -571,8 +571,10 @@ async fn latest_valid_hash_will_validate() { assert!(execution_status.is_invalid()) } else if slot == 0 { assert!(execution_status.is_irrelevant()) - } else { + } else if slot == 1 { assert!(execution_status.is_valid_and_post_bellatrix()) + } else { + assert!(execution_status.is_optimistic()) } } } @@ -693,9 +695,15 @@ async fn invalidates_all_descendants() { } let execution_status = rig.execution_status(root); - if slot <= latest_valid_slot { - // Blocks prior to the latest valid hash are valid. + if slot == 0 { + // Genesis block is pre-bellatrix. + assert!(execution_status.is_irrelevant()); + } else if slot == 1 { + // First slot was imported as valid. assert!(execution_status.is_valid_and_post_bellatrix()); + } else if slot <= latest_valid_slot { + // Blocks prior to and included the latest valid hash are not marked as valid. + assert!(execution_status.is_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -769,9 +777,15 @@ async fn switches_heads() { } let execution_status = rig.execution_status(root); - if slot <= latest_valid_slot { - // Blocks prior to the latest valid hash are valid. + if slot == 0 { + // Genesis block is pre-bellatrix. + assert!(execution_status.is_irrelevant()); + } else if slot == 1 { + // First slot was imported as valid. assert!(execution_status.is_valid_and_post_bellatrix()); + } else if slot <= latest_valid_slot { + // Blocks prior to and included the latest valid hash are not marked as valid. + assert!(execution_status.is_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 22d457ca3e..f3ee4ca48f 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -491,9 +491,6 @@ impl ProtoArray { node.best_descendant = None } - // It might be new knowledge that this block is valid, ensure that it and all - // ancestors are marked as valid. - self.propagate_execution_payload_validation_by_index(index)?; break; } } From 98a9626ef50d87b9943e284aa2a2467e63ca8df2 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Fri, 15 Jul 2022 07:31:19 +0000 Subject: [PATCH 065/184] Bump the MSRV to 1.62 and using `#[derive(Default)]` on enums (#3304) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed N/A ## Proposed Changes Since Rust 1.62, we can use `#[derive(Default)]` on enums. ✨ https://blog.rust-lang.org/2022/06/30/Rust-1.62.0.html#default-enum-variants There are no changes to functionality in this PR, just replaced the `Default` trait implementation with `#[derive(Default)]`. --- .github/workflows/local-testnet.yml | 3 +++ Dockerfile | 2 +- beacon_node/client/src/config.rs | 9 ++------- .../src/peer_manager/peerdb/peer_info.rs | 9 ++------- beacon_node/lighthouse_network/src/types/topics.rs | 9 ++------- crypto/eth2_keystore/src/json_keystore/kdf_module.rs | 9 ++------- lighthouse/Cargo.toml | 2 +- testing/ef_tests/src/bls_setting.rs | 9 ++------- 8 files changed, 15 insertions(+), 37 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 13c1af7ab6..b68135e4d8 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -18,6 +18,9 @@ jobs: steps: - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install ganache run: npm install ganache@latest --global diff --git a/Dockerfile b/Dockerfile index aa2853ce4f..6732c7eaf8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index b13ca8f489..a5d5b37c7a 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -10,7 +10,7 @@ use types::{Graffiti, PublicKeyBytes}; const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; /// Defines how the client should initialize the `BeaconChain` and other components. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub enum ClientGenesis { /// Creates a genesis state as per the 2019 Canada interop specifications. Interop { @@ -21,6 +21,7 @@ pub enum ClientGenesis { FromStore, /// Connects to an eth1 node and waits until it can create the genesis state from the deposit /// contract. + #[default] DepositContract, /// Loads the genesis state from SSZ-encoded `BeaconState` bytes. /// @@ -38,12 +39,6 @@ pub enum ClientGenesis { }, } -impl Default for ClientGenesis { - fn default() -> Self { - Self::DepositContract - } -} - /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 6273356b8f..555266d0e2 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -477,7 +477,7 @@ pub enum ConnectionDirection { } /// Connection Status of the peer. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub enum PeerConnectionStatus { /// The peer is connected. Connected { @@ -507,6 +507,7 @@ pub enum PeerConnectionStatus { since: Instant, }, /// The connection status has not been specified. + #[default] Unknown, } @@ -561,9 +562,3 @@ impl Serialize for PeerConnectionStatus { } } } - -impl Default for PeerConnectionStatus { - fn default() -> Self { - PeerConnectionStatus::Unknown - } -} diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 3dd7ad8470..825b1088b2 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -78,18 +78,13 @@ impl std::fmt::Display for GossipKind { } /// The known encoding types for gossipsub messages. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Default)] pub enum GossipEncoding { /// Messages are encoded with SSZSnappy. + #[default] SSZSnappy, } -impl Default for GossipEncoding { - fn default() -> Self { - GossipEncoding::SSZSnappy - } -} - impl GossipTopic { pub fn new(kind: GossipKind, encoding: GossipEncoding, fork_digest: [u8; 4]) -> Self { GossipTopic { diff --git a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs index a1295e859c..94aeab0682 100644 --- a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs +++ b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs @@ -58,9 +58,10 @@ impl Kdf { } /// PRF for use in `pbkdf2`. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Default)] pub enum Prf { #[serde(rename = "hmac-sha256")] + #[default] HmacSha256, } @@ -73,12 +74,6 @@ impl Prf { } } -impl Default for Prf { - fn default() -> Self { - Prf::HmacSha256 - } -} - /// Parameters for `pbkdf2` key derivation. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index f7742ef0b9..920cfa49c1 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "2.3.2-rc.0" authors = ["Sigma Prime "] edition = "2021" autotests = false -rust-version = "1.58" +rust-version = "1.62" [features] # Writes debugging .ssz files to /tmp during block processing. diff --git a/testing/ef_tests/src/bls_setting.rs b/testing/ef_tests/src/bls_setting.rs index add7d8b7bd..24aaf60080 100644 --- a/testing/ef_tests/src/bls_setting.rs +++ b/testing/ef_tests/src/bls_setting.rs @@ -2,20 +2,15 @@ use self::BlsSetting::*; use crate::error::Error; use serde_repr::Deserialize_repr; -#[derive(Deserialize_repr, Debug, Clone, Copy)] +#[derive(Deserialize_repr, Debug, Clone, Copy, Default)] #[repr(u8)] pub enum BlsSetting { + #[default] Flexible = 0, Required = 1, Ignored = 2, } -impl Default for BlsSetting { - fn default() -> Self { - Flexible - } -} - impl BlsSetting { /// Check the BLS setting and skip the test if it isn't compatible with the crypto config. pub fn check(self) -> Result<(), Error> { From 28b0ff27ff81b2348418dfecc33749a7e0f58fc7 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 15 Jul 2022 07:31:20 +0000 Subject: [PATCH 066/184] Ignored sync jobs 2 (#3317) ## Issue Addressed Duplicate of #3269. Making this since @divagant-martian opened the previous PR and she can't approve her own PR :smile: Co-authored-by: Diva M --- .../network/src/beacon_processor/mod.rs | 24 ++- .../network/src/beacon_processor/tests.rs | 48 +++++- .../work_reprocessing_queue.rs | 144 ++++++++++++++--- .../beacon_processor/worker/gossip_methods.rs | 4 +- .../beacon_processor/worker/sync_methods.rs | 30 +++- .../network/src/sync/block_lookups/mod.rs | 150 +++++++++++------- .../network/src/sync/block_lookups/tests.rs | 71 ++++++++- beacon_node/network/src/sync/manager.rs | 24 ++- 8 files changed, 396 insertions(+), 99 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 6f75e1fb23..a08f34f707 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -66,7 +66,7 @@ use types::{ SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, }; use worker::{Toolbox, Worker}; @@ -75,7 +75,7 @@ mod tests; mod work_reprocessing_queue; mod worker; -use crate::beacon_processor::work_reprocessing_queue::QueuedBlock; +use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; pub use worker::{ ChainSegmentProcessId, FailureMode, GossipAggregatePackage, GossipAttestationPackage, }; @@ -501,6 +501,7 @@ impl WorkEvent { block, seen_timestamp, process_type, + should_process: true, }, } } @@ -565,7 +566,7 @@ impl WorkEvent { impl std::convert::From> for WorkEvent { fn from(ready_work: ReadyWork) -> Self { match ready_work { - ReadyWork::Block(QueuedBlock { + ReadyWork::Block(QueuedGossipBlock { peer_id, block, seen_timestamp, @@ -577,6 +578,20 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::RpcBlock(QueuedRpcBlock { + block, + seen_timestamp, + process_type, + should_process, + }) => Self { + drop_during_sync: false, + work: Work::RpcBlock { + block, + seen_timestamp, + process_type, + should_process, + }, + }, ReadyWork::Unaggregate(QueuedUnaggregate { peer_id, message_id, @@ -695,6 +710,7 @@ pub enum Work { block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, + should_process: bool, }, ChainSegment { process_id: ChainSegmentProcessId, @@ -1521,12 +1537,14 @@ impl BeaconProcessor { block, seen_timestamp, process_type, + should_process, } => task_spawner.spawn_async(worker.process_rpc_block( block, seen_timestamp, process_type, work_reprocessing_tx, duplicate_cache, + should_process, )), /* * Verification for a chain segment (multiple blocks). diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index a39ca2ec33..d437cf0bed 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -1,7 +1,9 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(test)] -use crate::beacon_processor::work_reprocessing_queue::QUEUED_ATTESTATION_DELAY; +use crate::beacon_processor::work_reprocessing_queue::{ + QUEUED_ATTESTATION_DELAY, QUEUED_RPC_BLOCK_DELAY, +}; use crate::beacon_processor::*; use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ @@ -54,6 +56,7 @@ struct TestRig { work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, + duplicate_cache: DuplicateCache, _harness: BeaconChainHarness, } @@ -185,6 +188,7 @@ impl TestRig { let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); + let duplicate_cache = DuplicateCache::default(); BeaconProcessor { beacon_chain: Arc::downgrade(&chain), network_tx, @@ -193,7 +197,7 @@ impl TestRig { executor, max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, - importing_blocks: Default::default(), + importing_blocks: duplicate_cache.clone(), log: log.clone(), } .spawn_manager(beacon_processor_rx, Some(work_journal_tx)); @@ -211,6 +215,7 @@ impl TestRig { work_journal_rx, _network_rx, _sync_rx, + duplicate_cache, _harness: harness, } } @@ -246,6 +251,15 @@ impl TestRig { self.beacon_processor_tx.try_send(event).unwrap(); } + pub fn enqueue_single_lookup_rpc_block(&self) { + let event = WorkEvent::rpc_beacon_block( + self.next_block.clone(), + std::time::Duration::default(), + BlockProcessType::SingleBlock { id: 1 }, + ); + self.beacon_processor_tx.try_send(event).unwrap(); + } + pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); self.beacon_processor_tx @@ -828,3 +842,33 @@ async fn import_misc_gossip_ops() { "op pool should have one more exit" ); } + +/// Ensure that rpc block going to the reprocessing queue flow +/// works when the duplicate cache handle is held by another task. +#[tokio::test] +async fn test_rpc_block_reprocessing() { + let mut rig = TestRig::new(SMALL_CHAIN).await; + let next_block_root = rig.next_block.canonical_root(); + // Insert the next block into the duplicate cache manually + let handle = rig.duplicate_cache.check_and_insert(next_block_root); + rig.enqueue_single_lookup_rpc_block(); + + rig.assert_event_journal(&[RPC_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; + // next_block shouldn't be processed since it couldn't get the + // duplicate cache handle + assert_ne!(next_block_root, rig.head_root()); + + drop(handle); + + // The block should arrive at the beacon processor again after + // the specified delay. + tokio::time::sleep(QUEUED_RPC_BLOCK_DELAY).await; + + rig.assert_event_journal(&[RPC_BLOCK]).await; + // Add an extra delay for block processing + tokio::time::sleep(Duration::from_millis(10)).await; + // head should update to next block now since the duplicate + // cache handle was dropped. + assert_eq!(next_block_root, rig.head_root()); +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 33c15cf06b..efe8d3bf12 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -12,6 +12,7 @@ //! block will be re-queued until their block is imported, or until they expire. use super::MAX_SCHEDULED_WORK_QUEUE_LEN; use crate::metrics; +use crate::sync::manager::BlockProcessType; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; @@ -22,16 +23,18 @@ use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::pin::Pin; +use std::sync::Arc; use std::task::Context; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SubnetId}; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; -const BLOCKS: &str = "blocks"; +const GOSSIP_BLOCKS: &str = "gossip_blocks"; +const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. @@ -41,6 +44,9 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue rpc blocks before sending them back for reprocessing. +pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); + /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but /// it's nice to have extra protection. @@ -52,7 +58,10 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// Messages that the scheduler can receive. pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. - EarlyBlock(QueuedBlock), + EarlyBlock(QueuedGossipBlock), + /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same + /// hash until the gossip block is imported. + RpcBlock(QueuedRpcBlock), /// A block that was successfully processed. We use this to handle attestations for unknown /// blocks. BlockImported(Hash256), @@ -64,7 +73,8 @@ pub enum ReprocessQueueMessage { /// Events sent by the scheduler once they are ready for re-processing. pub enum ReadyWork { - Block(QueuedBlock), + Block(QueuedGossipBlock), + RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), } @@ -90,16 +100,30 @@ pub struct QueuedAggregate { } /// A block that arrived early and has been queued for later import. -pub struct QueuedBlock { +pub struct QueuedGossipBlock { pub peer_id: PeerId, pub block: Box>, pub seen_timestamp: Duration, } +/// A block that arrived for processing when the same block was being imported over gossip. +/// It is queued for later import. +pub struct QueuedRpcBlock { + pub block: Arc>, + pub process_type: BlockProcessType, + pub seen_timestamp: Duration, + /// Indicates if the beacon chain should process this block or not. + /// We use this to ignore block processing when rpc block queues are full. + pub should_process: bool, +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { - /// A block that was queued for later processing and is ready for import. - ReadyBlock(QueuedBlock), + /// A gossip block that was queued for later processing and is ready for import. + ReadyGossipBlock(QueuedGossipBlock), + /// A rpc block that was queued because the same gossip block was being imported + /// will now be retried for import. + ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), /// A `DelayQueue` returned an error. @@ -117,13 +141,15 @@ struct ReprocessQueue { /* Queues */ /// Queue to manage scheduled early blocks. - block_delay_queue: DelayQueue>, + gossip_block_delay_queue: DelayQueue>, + /// Queue to manage scheduled early blocks. + rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. - queued_block_roots: HashSet, + queued_gossip_block_roots: HashSet, /// Queued aggregated attestations. queued_aggregates: FnvHashMap, DelayKey)>, /// Queued attestations. @@ -135,6 +161,7 @@ struct ReprocessQueue { /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, early_block_debounce: TimeLatch, + rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, } @@ -167,12 +194,26 @@ impl Stream for ReprocessQueue { // // The sequential nature of blockchains means it is generally better to try and import all // existing blocks before new ones. - match self.block_delay_queue.poll_expired(cx) { + match self.gossip_block_delay_queue.poll_expired(cx) { Poll::Ready(Some(Ok(queued_block))) => { - return Poll::Ready(Some(InboundEvent::ReadyBlock(queued_block.into_inner()))); + return Poll::Ready(Some(InboundEvent::ReadyGossipBlock( + queued_block.into_inner(), + ))); } Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "block_queue"))); + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "gossip_block_queue"))); + } + // `Poll::Ready(None)` means that there are no more entries in the delay queue and we + // will continue to get this result until something else is added into the queue. + Poll::Ready(None) | Poll::Pending => (), + } + + match self.rpc_block_delay_queue.poll_expired(cx) { + Poll::Ready(Some(Ok(queued_block))) => { + return Poll::Ready(Some(InboundEvent::ReadyRpcBlock(queued_block.into_inner()))); + } + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "rpc_block_queue"))); } // `Poll::Ready(None)` means that there are no more entries in the delay queue and we // will continue to get this result until something else is added into the queue. @@ -219,14 +260,16 @@ pub fn spawn_reprocess_scheduler( let mut queue = ReprocessQueue { work_reprocessing_rx, ready_work_tx, - block_delay_queue: DelayQueue::new(), + gossip_block_delay_queue: DelayQueue::new(), + rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), - queued_block_roots: HashSet::new(), + queued_gossip_block_roots: HashSet::new(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), next_attestation: 0, early_block_debounce: TimeLatch::default(), + rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), }; @@ -259,13 +302,13 @@ impl ReprocessQueue { let block_root = early_block.block.block_root; // Don't add the same block to the queue twice. This prevents DoS attacks. - if self.queued_block_roots.contains(&block_root) { + if self.queued_gossip_block_roots.contains(&block_root) { return; } if let Some(duration_till_slot) = slot_clock.duration_to_slot(block_slot) { // Check to ensure this won't over-fill the queue. - if self.queued_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { + if self.queued_gossip_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { if self.early_block_debounce.elapsed() { warn!( log, @@ -278,10 +321,10 @@ impl ReprocessQueue { return; } - self.queued_block_roots.insert(block_root); + self.queued_gossip_block_roots.insert(block_root); // Queue the block until the start of the appropriate slot, plus // `ADDITIONAL_QUEUED_BLOCK_DELAY`. - self.block_delay_queue.insert( + self.gossip_block_delay_queue.insert( early_block, duration_till_slot + ADDITIONAL_QUEUED_BLOCK_DELAY, ); @@ -311,6 +354,58 @@ impl ReprocessQueue { } } } + // A rpc block arrived for processing at the same time when a gossip block + // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY` + // and then send the rpc block back for processing assuming the gossip import + // has completed by then. + InboundEvent::Msg(RpcBlock(mut rpc_block)) => { + // Check to ensure this won't over-fill the queue. + if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { + if self.rpc_block_debounce.elapsed() { + warn!( + log, + "RPC blocks queue is full"; + "queue_size" => MAXIMUM_QUEUED_BLOCKS, + "msg" => "check system clock" + ); + } + // Return the block to the beacon processor signalling to + // ignore processing for this block + rpc_block.should_process = false; + if self + .ready_work_tx + .try_send(ReadyWork::RpcBlock(rpc_block)) + .is_err() + { + error!( + log, + "Failed to send rpc block to beacon processor"; + ); + } + return; + } + + // Queue the block for 1/4th of a slot + self.rpc_block_delay_queue + .insert(rpc_block, QUEUED_RPC_BLOCK_DELAY); + } + InboundEvent::ReadyRpcBlock(queued_rpc_block) => { + debug!( + log, + "Sending rpc block for reprocessing"; + "block_root" => %queued_rpc_block.block.canonical_root() + ); + if self + .ready_work_tx + .try_send(ReadyWork::RpcBlock(queued_rpc_block)) + .is_err() + { + error!( + log, + "Failed to send rpc block to beacon processor"; + ); + } + } InboundEvent::Msg(UnknownBlockAggregate(queued_aggregate)) => { if self.attestations_delay_queue.len() >= MAXIMUM_QUEUED_ATTESTATIONS { if self.attestation_delay_debounce.elapsed() { @@ -423,10 +518,10 @@ impl ReprocessQueue { } } // A block that was queued for later processing is now ready to be processed. - InboundEvent::ReadyBlock(ready_block) => { + InboundEvent::ReadyGossipBlock(ready_block) => { let block_root = ready_block.block.block_root; - if !self.queued_block_roots.remove(&block_root) { + if !self.queued_gossip_block_roots.remove(&block_root) { // Log an error to alert that we've made a bad assumption about how this // program works, but still process the block anyway. error!( @@ -499,8 +594,13 @@ impl ReprocessQueue { metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, - &[BLOCKS], - self.block_delay_queue.len() as i64, + &[GOSSIP_BLOCKS], + self.gossip_block_delay_queue.len() as i64, + ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[RPC_BLOCKS], + self.rpc_block_delay_queue.len() as i64, ); metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 56f38c7f22..2dc02a31b3 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -25,7 +25,7 @@ use types::{ use super::{ super::work_reprocessing_queue::{ - QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, }, Worker, }; @@ -857,7 +857,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL); if reprocess_tx - .try_send(ReprocessQueueMessage::EarlyBlock(QueuedBlock { + .try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock { peer_id, block: Box::new(verified_block), seen_timestamp: seen_duration, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 804cfbe463..84e3c95c69 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -1,6 +1,7 @@ use std::time::Duration; use super::{super::work_reprocessing_queue::ReprocessQueueMessage, Worker}; +use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock; use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; use crate::beacon_processor::DuplicateCache; use crate::metrics; @@ -53,16 +54,37 @@ impl Worker { process_type: BlockProcessType, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, + should_process: bool, ) { + if !should_process { + // Sync handles these results + self.send_sync_message(SyncMessage::BlockProcessed { + process_type, + result: crate::sync::manager::BlockProcessResult::Ignored, + }); + return; + } // Check if the block is already being imported through another source let handle = match duplicate_cache.check_and_insert(block.canonical_root()) { Some(handle) => handle, None => { - // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + debug!( + self.log, + "Gossip block is being processed"; + "action" => "sending rpc block to reprocessing queue", + "block_root" => %block.canonical_root(), + ); + // Send message to work reprocess queue to retry the block + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block: block.clone(), process_type, - result: Err(BlockError::BlockIsAlreadyKnown), + seen_timestamp, + should_process: true, }); + + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block.canonical_root()) + }; return; } }; @@ -95,7 +117,7 @@ impl Worker { // Sync handles these results self.send_sync_message(SyncMessage::BlockProcessed { process_type, - result: result.map(|_| ()), + result: result.into(), }); // Drop the handle to remove the entry from the cache diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 99df8e4a66..e32770c592 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -19,6 +19,7 @@ use self::{ single_block_lookup::SingleBlockRequest, }; +use super::manager::BlockProcessResult; use super::BatchProcessResult; use super::{ manager::{BlockProcessType, Id}, @@ -247,7 +248,7 @@ impl BlockLookups { | VerifyError::ExtraBlocksReturned => { let e = e.into(); warn!(self.log, "Peer sent invalid response to parent request."; - "peer_id" => %peer_id, "reason" => e); + "peer_id" => %peer_id, "reason" => %e); // We do not tolerate these kinds of errors. We will accept a few but these are signs // of a faulty peer. @@ -381,7 +382,7 @@ impl BlockLookups { pub fn single_block_processed( &mut self, id: Id, - result: Result<(), BlockError>, + result: BlockProcessResult, cx: &mut SyncNetworkContext, ) { let mut req = match self.single_block_lookups.remove(&id) { @@ -403,52 +404,62 @@ impl BlockLookups { Err(_) => return, }; - if let Err(e) = &result { - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); - } else { - trace!(self.log, "Single block processing succeeded"; "block" => %root); - } - - if let Err(e) = result { - match e { - BlockError::BlockIsAlreadyKnown => { - // No error here - } - BlockError::BeaconChainError(e) => { - // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); - } - BlockError::ParentUnknown(block) => { - self.search_parent(block, peer_id, cx); - } - - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_)) - | e @ BlockError::ExecutionPayloadError( - ExecutionPayloadError::NoExecutionConnection, - ) => { - // These errors indicate that the execution layer is offline - // and failed to validate the execution payload. Do not downscore peer. - debug!( - self.log, - "Single block lookup failed. Execution layer is offline"; - "root" => %root, - "error" => ?e - ); - } - other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "single_block_failure", - ); - - // Try it again if possible. - req.register_failure(); - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) { - // insert with the new id - self.single_block_lookups.insert(request_id, req); + match result { + BlockProcessResult::Ok => { + trace!(self.log, "Single block processing succeeded"; "block" => %root); + } + BlockProcessResult::Ignored => { + // Beacon processor signalled to ignore the block processing result. + // This implies that the cpu is overloaded. Drop the request. + warn!( + self.log, + "Single block processing was ignored, cpu might be overloaded"; + "action" => "dropping single block request" + ); + } + BlockProcessResult::Err(e) => { + trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); + match e { + BlockError::BlockIsAlreadyKnown => { + // No error here + } + BlockError::BeaconChainError(e) => { + // Internal error + error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + } + BlockError::ParentUnknown(block) => { + self.search_parent(block, peer_id, cx); + } + e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed( + _, + )) + | e @ BlockError::ExecutionPayloadError( + ExecutionPayloadError::NoExecutionConnection, + ) => { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Single block lookup failed. Execution layer is offline"; + "root" => %root, + "error" => ?e + ); + } + other => { + warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + "single_block_failure", + ); + // Try it again if possible. + req.register_failure(); + if let Ok((peer_id, request)) = req.request_block() { + if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) + { + // insert with the new id + self.single_block_lookups.insert(request_id, req); + } } } } @@ -464,7 +475,7 @@ impl BlockLookups { pub fn parent_block_processed( &mut self, chain_hash: Hash256, - result: Result<(), BlockError>, + result: BlockProcessResult, cx: &mut SyncNetworkContext, ) { let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self @@ -487,20 +498,32 @@ impl BlockLookups { return crit!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; - if let Err(e) = &result { - trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e); - } else { - trace!(self.log, "Parent block processing succeeded"; &parent_lookup); + match &result { + BlockProcessResult::Ok => { + trace!(self.log, "Parent block processing succeeded"; &parent_lookup) + } + BlockProcessResult::Err(e) => { + trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) + } + BlockProcessResult::Ignored => { + trace!( + self.log, + "Parent block processing job was ignored"; + "action" => "re-requesting block", + &parent_lookup + ); + } } match result { - Err(BlockError::ParentUnknown(block)) => { + BlockProcessResult::Err(BlockError::ParentUnknown(block)) => { // need to keep looking for parents // add the block back to the queue and continue the search parent_lookup.add_block(block); self.request_parent(parent_lookup, cx); } - Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => { + BlockProcessResult::Ok + | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { let chain_hash = parent_lookup.chain_hash(); let blocks = parent_lookup.chain_blocks(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); @@ -521,8 +544,10 @@ impl BlockLookups { } } } - Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) - | Err( + BlockProcessResult::Err( + e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_)), + ) + | BlockProcessResult::Err( e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection), ) => { // These errors indicate that the execution layer is offline @@ -534,7 +559,7 @@ impl BlockLookups { "error" => ?e ); } - Err(outcome) => { + BlockProcessResult::Err(outcome) => { // all else we consider the chain a failure and downvote the peer that sent // us the last block warn!( @@ -551,6 +576,15 @@ impl BlockLookups { // ambiguity. cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); } + BlockProcessResult::Ignored => { + // Beacon processor signalled to ignore the block processing result. + // This implies that the cpu is overloaded. Drop the request. + warn!( + self.log, + "Parent block processing was ignored, cpu might be overloaded"; + "action" => "dropping parent request" + ); + } } metrics::set_gauge( diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index e9c8ac8ca7..352de4e09b 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -168,7 +168,7 @@ fn test_single_block_lookup_happy_path() { // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); - bl.single_block_processed(id, Ok(()), &mut cx); + bl.single_block_processed(id, Ok(()).into(), &mut cx); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } @@ -252,7 +252,11 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed(id, Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx); + bl.single_block_processed( + id, + BlockError::ParentUnknown(Arc::new(block)).into(), + &mut cx, + ); assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); @@ -278,7 +282,7 @@ fn test_parent_lookup_happy_path() { rig.expect_empty_network(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Err(BlockError::BlockIsAlreadyKnown), &mut cx); + bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -312,7 +316,7 @@ fn test_parent_lookup_wrong_response() { rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -341,7 +345,7 @@ fn test_parent_lookup_empty_response() { rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -369,7 +373,7 @@ fn test_parent_lookup_rpc_failure() { rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -440,7 +444,7 @@ fn test_parent_lookup_too_deep() { // the processing result bl.parent_block_processed( chain_hash, - Err(BlockError::ParentUnknown(Arc::new(block))), + BlockError::ParentUnknown(Arc::new(block)).into(), &mut cx, ) } @@ -458,3 +462,56 @@ fn test_parent_lookup_disconnection() { bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_queue.is_empty()); } + +#[test] +fn test_single_block_lookup_ignored_response() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let block = rig.rand_block(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_block(block.canonical_root(), peer_id, &mut cx); + let id = rig.expect_block_request(); + + // The peer provides the correct block, should not be penalized. Now the block should be sent + // for processing. + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); + rig.expect_empty_network(); + rig.expect_block_process(); + + // The request should still be active. + assert_eq!(bl.single_block_lookups.len(), 1); + + // Send the stream termination. Peer should have not been penalized, and the request removed + // after processing. + bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + // Send an Ignored response, the request should be dropped + bl.single_block_processed(id, BlockProcessResult::Ignored, &mut cx); + rig.expect_empty_network(); + assert_eq!(bl.single_block_lookups.len(), 0); +} + +#[test] +fn test_parent_lookup_ignored_response() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = rig.rand_block(); + let block = rig.block_with_parent(parent.canonical_root()); + let chain_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + let id = rig.expect_parent_request(); + + // Peer sends the right block, it should be sent for processing. Peer should not be penalized. + bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); + rig.expect_block_process(); + rig.expect_empty_network(); + + // Return an Ignored result. The request should be dropped + bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); + rig.expect_empty_network(); + assert_eq!(bl.parent_queue.len(), 0); +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 3e44256655..d0919406b2 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -117,7 +117,7 @@ pub enum SyncMessage { /// Block processed BlockProcessed { process_type: BlockProcessType, - result: Result<(), BlockError>, + result: BlockProcessResult, }, } @@ -128,6 +128,13 @@ pub enum BlockProcessType { ParentLookup { chain_hash: Hash256 }, } +#[derive(Debug)] +pub enum BlockProcessResult { + Ok, + Err(BlockError), + Ignored, +} + /// The result of processing multiple blocks (a chain segment). #[derive(Debug)] pub enum BatchProcessResult { @@ -620,3 +627,18 @@ impl SyncManager { } } } + +impl From>> for BlockProcessResult { + fn from(result: Result>) -> Self { + match result { + Ok(_) => BlockProcessResult::Ok, + Err(e) => e.into(), + } + } +} + +impl From> for BlockProcessResult { + fn from(e: BlockError) -> Self { + BlockProcessResult::Err(e) + } +} From 5243cc6c30e684110b6f99d3af115f05b705aaa8 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 15 Jul 2022 07:31:21 +0000 Subject: [PATCH 067/184] Add a u256_hex_be module to encode/decode U256 types (#3321) ## Issue Addressed Resolves #3314 ## Proposed Changes Add a module to encode/decode u256 types according to the execution layer encoding/decoding standards https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#structures Updates `JsonExecutionPayloadV1.base_fee_per_gas`, `JsonExecutionPayloadHeaderV1.base_fee_per_gas` and `TransitionConfigurationV1.terminal_total_difficulty` to encode/decode according to standards Co-authored-by: Michael Sproul --- .../src/engine_api/json_structures.rs | 3 + consensus/serde_utils/src/lib.rs | 1 + consensus/serde_utils/src/u256_hex_be.rs | 144 ++++++++++++++++++ 3 files changed, 148 insertions(+) create mode 100644 consensus/serde_utils/src/u256_hex_be.rs diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 5414c52623..0316cf3993 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -78,6 +78,7 @@ pub struct JsonExecutionPayloadHeaderV1 { pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, @@ -142,6 +143,7 @@ pub struct JsonExecutionPayloadV1 { pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] @@ -486,6 +488,7 @@ impl From for JsonProposeBlindedBlockResponse #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, #[serde(with = "eth2_serde_utils::u64_hex_be")] diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 81e2bbe963..92b5966c9a 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -6,6 +6,7 @@ pub mod hex_vec; pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; +pub mod u256_hex_be; pub mod u32_hex; pub mod u64_hex_be; pub mod u8_hex; diff --git a/consensus/serde_utils/src/u256_hex_be.rs b/consensus/serde_utils/src/u256_hex_be.rs new file mode 100644 index 0000000000..8007e5792c --- /dev/null +++ b/consensus/serde_utils/src/u256_hex_be.rs @@ -0,0 +1,144 @@ +use ethereum_types::U256; + +use serde::de::Visitor; +use serde::{de, Deserializer, Serialize, Serializer}; +use std::fmt; +use std::str::FromStr; + +pub fn serialize(num: &U256, serializer: S) -> Result +where + S: Serializer, +{ + num.serialize(serializer) +} + +pub struct U256Visitor; + +impl<'de> Visitor<'de> for U256Visitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a well formatted hex string") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + if !value.starts_with("0x") { + return Err(de::Error::custom("must start with 0x")); + } + let stripped = &value[2..]; + if stripped.is_empty() { + Err(de::Error::custom(format!( + "quantity cannot be {:?}", + stripped + ))) + } else if stripped == "0" { + Ok(value.to_string()) + } else if stripped.starts_with('0') { + Err(de::Error::custom("cannot have leading zero")) + } else { + Ok(value.to_string()) + } + } +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let decoded = deserializer.deserialize_string(U256Visitor)?; + + U256::from_str(&decoded).map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))) +} + +#[cfg(test)] +mod test { + use ethereum_types::U256; + use serde::{Deserialize, Serialize}; + use serde_json; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct Wrapper { + #[serde(with = "super")] + val: U256, + } + + #[test] + fn encoding() { + assert_eq!( + &serde_json::to_string(&Wrapper { val: 0.into() }).unwrap(), + "\"0x0\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1.into() }).unwrap(), + "\"0x1\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 256.into() }).unwrap(), + "\"0x100\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 65.into() }).unwrap(), + "\"0x41\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1024.into() }).unwrap(), + "\"0x400\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: U256::max_value() - 1 + }) + .unwrap(), + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: U256::max_value() + }) + .unwrap(), + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ); + } + + #[test] + fn decoding() { + assert_eq!( + serde_json::from_str::("\"0x0\"").unwrap(), + Wrapper { val: 0.into() }, + ); + assert_eq!( + serde_json::from_str::("\"0x41\"").unwrap(), + Wrapper { val: 65.into() }, + ); + assert_eq!( + serde_json::from_str::("\"0x400\"").unwrap(), + Wrapper { val: 1024.into() }, + ); + assert_eq!( + serde_json::from_str::( + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ) + .unwrap(), + Wrapper { + val: U256::max_value() - 1 + }, + ); + assert_eq!( + serde_json::from_str::( + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap(), + Wrapper { + val: U256::max_value() + }, + ); + serde_json::from_str::("\"0x\"").unwrap_err(); + serde_json::from_str::("\"0x0400\"").unwrap_err(); + serde_json::from_str::("\"400\"").unwrap_err(); + serde_json::from_str::("\"ff\"").unwrap_err(); + } +} From 2940783a9c61f0ae13f2781db708e7144f13b58d Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 15 Jul 2022 07:31:22 +0000 Subject: [PATCH 068/184] Upstream local testnet improvements (#3336) ## Proposed Changes Adds some improvements I found when playing around with local testnets in #3335: - When trying to kill processes, do not exit on a failure. (If a node fails to start due to a bug, the PID associated with it no longer exists. When trying to tear down the testnets, an error will be raised when it tries that PID and then will not try any PIDs following it. This change means it will continue and tear down the rest of the network. - When starting the testnet, set `ulimit` to a high number. This allows the VCs to import 1000s of validators without running into limitations. --- scripts/local_testnet/kill_processes.sh | 2 +- scripts/local_testnet/start_local_testnet.sh | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh index be6b7f3d66..d63725ac14 100755 --- a/scripts/local_testnet/kill_processes.sh +++ b/scripts/local_testnet/kill_processes.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Kill processes -set -Eeuo pipefail +set -Euo pipefail # First parameter is the file with # one pid per line. diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index a5c6c0b5eb..33c1d642e7 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -5,6 +5,9 @@ set -Eeuo pipefail source ./vars.env +# Set a higher ulimit in case we want to import 1000s of validators. +ulimit -n 65536 + # VC_COUNT is defaulted in vars.env DEBUG_LEVEL=${DEBUG_LEVEL:-info} From 4f58c555a92ce38208a103c4b66c175ff98183a0 Mon Sep 17 00:00:00 2001 From: Peter Davies Date: Fri, 15 Jul 2022 14:16:00 +0000 Subject: [PATCH 069/184] Add Merge support to web3signer validators (#3318) ## Issue Addressed Web3signer validators can't produce post-Bellatrix blocks. ## Proposed Changes Add support for Bellatrix to web3signer validators. ## Additional Info I am running validators with this code on Ropsten, but it may be a while for them to get a proposal. --- testing/web3signer_tests/src/lib.rs | 37 +++++++++++++++++++ .../src/signing_method/web3signer.rs | 30 +++++++++++---- 2 files changed, 59 insertions(+), 8 deletions(-) diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 5803f360a6..eb307290c2 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -612,6 +612,28 @@ mod tests { .await; } + /// Test all the Merge types. + async fn test_merge_types(network: &str, listen_port: u16) { + let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); + let spec = &network_config.chain_spec::().unwrap(); + let merge_fork_slot = spec + .bellatrix_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()); + + TestingRig::new(network, spec.clone(), listen_port) + .await + .assert_signatures_match("beacon_block_merge", |pubkey, validator_store| async move { + let mut merge_block = BeaconBlockMerge::empty(spec); + merge_block.slot = merge_fork_slot; + validator_store + .sign_block(pubkey, BeaconBlock::Merge(merge_block), merge_fork_slot) + .await + .unwrap() + }) + .await; + } + #[tokio::test] async fn mainnet_base_types() { test_base_types("mainnet", 4242).await @@ -631,4 +653,19 @@ mod tests { async fn prater_altair_types() { test_altair_types("prater", 4247).await } + + #[tokio::test] + async fn ropsten_base_types() { + test_base_types("ropsten", 4250).await + } + + #[tokio::test] + async fn ropsten_altair_types() { + test_altair_types("ropsten", 4251).await + } + + #[tokio::test] + async fn ropsten_merge_types() { + test_merge_types("ropsten", 4252).await + } } diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 0ab37484ba..cf02ae0c32 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -25,6 +25,7 @@ pub enum MessageType { pub enum ForkName { Phase0, Altair, + Bellatrix, } #[derive(Debug, PartialEq, Serialize)] @@ -43,7 +44,10 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { Attestation(&'a AttestationData), BeaconBlock { version: ForkName, - block: &'a BeaconBlock, + #[serde(skip_serializing_if = "Option::is_none")] + block: Option<&'a BeaconBlock>, + #[serde(skip_serializing_if = "Option::is_none")] + block_header: Option, }, #[allow(dead_code)] Deposit { @@ -70,13 +74,23 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { pub fn beacon_block(block: &'a BeaconBlock) -> Result { - let version = match block { - BeaconBlock::Base(_) => ForkName::Phase0, - BeaconBlock::Altair(_) => ForkName::Altair, - BeaconBlock::Merge(_) => return Err(Error::MergeForkNotSupported), - }; - - Ok(Web3SignerObject::BeaconBlock { version, block }) + match block { + BeaconBlock::Base(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Phase0, + block: Some(block), + block_header: None, + }), + BeaconBlock::Altair(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Altair, + block: Some(block), + block_header: None, + }), + BeaconBlock::Merge(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Bellatrix, + block: None, + block_header: Some(block.block_header()), + }), + } } pub fn message_type(&self) -> MessageType { From 2ed51c364dab97b3a5ab020911997d1a8a8a2bee Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 17 Jul 2022 23:26:58 +0000 Subject: [PATCH 070/184] Improve block-lookup functionality (#3287) Improves some of the functionality around single and parent block lookup. Gives extra information about whether failures for lookups are related to processing or downloading. This is entirely untested. Co-authored-by: Diva M --- .../network/src/sync/block_lookups/mod.rs | 34 +++++-- .../src/sync/block_lookups/parent_lookup.rs | 29 ++++-- .../sync/block_lookups/single_block_lookup.rs | 93 +++++++++++++------ .../network/src/sync/block_lookups/tests.rs | 74 ++++++++++++++- 4 files changed, 186 insertions(+), 44 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index e32770c592..49e1eb290f 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -69,6 +69,8 @@ impl BlockLookups { /* Lookup requests */ + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is + /// constructed. pub fn search_block( &mut self, hash: Hash256, @@ -105,6 +107,8 @@ impl BlockLookups { } } + /// If a block is attempted to be processed but we do not know its parent, this function is + /// called in order to find the block's parent. pub fn search_parent( &mut self, block: Arc>, @@ -201,6 +205,7 @@ impl BlockLookups { ); } + /// Process a response received from a parent lookup request. pub fn parent_lookup_response( &mut self, id: Id, @@ -258,7 +263,6 @@ impl BlockLookups { self.request_parent(parent_lookup, cx); } VerifyError::PreviousFailure { parent_root } => { - self.failed_chains.insert(parent_lookup.chain_hash()); debug!( self.log, "Parent chain ignored due to past failure"; @@ -336,6 +340,7 @@ impl BlockLookups { } } + /// An RPC error has occurred during a parent lookup. This function handles this case. pub fn parent_lookup_failed( &mut self, id: Id, @@ -362,7 +367,7 @@ impl BlockLookups { pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext) { if let Some(mut request) = self.single_block_lookups.remove(&id) { - request.register_failure(); + request.register_failure_downloading(); trace!(self.log, "Single block lookup failed"; "block" => %request.hash); if let Ok((peer_id, block_request)) = request.request_block() { if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { @@ -453,7 +458,7 @@ impl BlockLookups { "single_block_failure", ); // Try it again if possible. - req.register_failure(); + req.register_failure_processing(); if let Ok((peer_id, request)) = req.request_block() { if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) { @@ -569,12 +574,13 @@ impl BlockLookups { "last_peer" => %peer_id, ); - // Add this chain to cache of failed chains - self.failed_chains.insert(chain_hash); - // This currently can be a host of errors. We permit this due to the partial // ambiguity. cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); + + // Try again if possible + parent_lookup.processing_failed(); + self.request_parent(parent_lookup, cx); } BlockProcessResult::Ignored => { // Beacon processor signalled to ignore the block processing result. @@ -683,14 +689,26 @@ impl BlockLookups { parent_lookup::RequestError::SendFailed(_) => { // Probably shutting down, nothing to do here. Drop the request } - parent_lookup::RequestError::ChainTooLong - | parent_lookup::RequestError::TooManyAttempts => { + parent_lookup::RequestError::ChainTooLong => { self.failed_chains.insert(parent_lookup.chain_hash()); // This indicates faulty peers. for &peer_id in parent_lookup.used_peers() { cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) } } + parent_lookup::RequestError::TooManyAttempts { cannot_process } => { + // We only consider the chain failed if we were unable to process it. + // We could have failed because one peer continually failed to send us + // bad blocks. We still allow other peers to send us this chain. Note + // that peers that do this, still get penalised. + if cannot_process { + self.failed_chains.insert(parent_lookup.chain_hash()); + } + // This indicates faulty peers. + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) + } + } parent_lookup::RequestError::NoPeers => { // This happens if the peer disconnects while the block is being // processed. Drop the request without extra penalty diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 62503353ad..bf5a1b259b 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -10,7 +10,7 @@ use crate::sync::{ use super::single_block_lookup::{self, SingleBlockRequest}; -/// How many attempts we try to find a parent of a block before we give up trying . +/// How many attempts we try to find a parent of a block before we give up trying. pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth @@ -41,7 +41,12 @@ pub enum VerifyError { pub enum RequestError { SendFailed(&'static str), ChainTooLong, - TooManyAttempts, + /// We witnessed too many failures trying to complete this parent lookup. + TooManyAttempts { + /// We received more failures trying to process the blocks than downloading them + /// from peers. + cannot_process: bool, + }, NoPeers, } @@ -105,7 +110,12 @@ impl ParentLookup { } pub fn download_failed(&mut self) { - self.current_parent_request.register_failure(); + self.current_parent_request.register_failure_downloading(); + self.current_parent_request_id = None; + } + + pub fn processing_failed(&mut self) { + self.current_parent_request.register_failure_processing(); self.current_parent_request_id = None; } @@ -126,7 +136,7 @@ impl ParentLookup { // be dropped and the peer downscored. if let Some(parent_root) = block.as_ref().map(|block| block.parent_root()) { if failed_chains.contains(&parent_root) { - self.current_parent_request.register_failure(); + self.current_parent_request.register_failure_downloading(); self.current_parent_request_id = None; return Err(VerifyError::PreviousFailure { parent_root }); } @@ -144,7 +154,7 @@ impl ParentLookup { #[cfg(test)] pub fn failed_attempts(&self) -> u8 { - self.current_parent_request.failed_attempts + self.current_parent_request.failed_attempts() } pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool { @@ -171,7 +181,9 @@ impl From for RequestError { fn from(e: super::single_block_lookup::LookupRequestError) -> Self { use super::single_block_lookup::LookupRequestError as E; match e { - E::TooManyAttempts => RequestError::TooManyAttempts, + E::TooManyAttempts { cannot_process } => { + RequestError::TooManyAttempts { cannot_process } + } E::NoPeers => RequestError::NoPeers, } } @@ -195,7 +207,10 @@ impl RequestError { match self { RequestError::SendFailed(e) => e, RequestError::ChainTooLong => "chain_too_long", - RequestError::TooManyAttempts => "too_many_attempts", + RequestError::TooManyAttempts { cannot_process } if *cannot_process => { + "too_many_processing_attempts" + } + RequestError::TooManyAttempts { cannot_process: _ } => "too_many_downloading_attempts", RequestError::NoPeers => "no_peers", } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index debf3de8db..8ba5b17bfa 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -18,8 +18,10 @@ pub struct SingleBlockRequest { pub available_peers: HashSet, /// Peers from which we have requested this block. pub used_peers: HashSet, - /// How many times have we attempted this block. - pub failed_attempts: u8, + /// How many times have we attempted to process this block. + failed_processing: u8, + /// How many times have we attempted to download this block. + failed_downloading: u8, } #[derive(Debug, PartialEq, Eq)] @@ -38,7 +40,11 @@ pub enum VerifyError { #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupRequestError { - TooManyAttempts, + /// Too many failed attempts + TooManyAttempts { + /// The failed attempts were primarily due to processing failures. + cannot_process: bool, + }, NoPeers, } @@ -49,15 +55,29 @@ impl SingleBlockRequest { state: State::AwaitingDownload, available_peers: HashSet::from([peer_id]), used_peers: HashSet::default(), - failed_attempts: 0, + failed_processing: 0, + failed_downloading: 0, } } - pub fn register_failure(&mut self) { - self.failed_attempts += 1; + /// Registers a failure in processing a block. + pub fn register_failure_processing(&mut self) { + self.failed_processing = self.failed_processing.saturating_add(1); self.state = State::AwaitingDownload; } + /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong + /// block. + pub fn register_failure_downloading(&mut self) { + self.failed_downloading = self.failed_downloading.saturating_add(1); + self.state = State::AwaitingDownload; + } + + /// The total number of failures, whether it be processing or downloading. + pub fn failed_attempts(&self) -> u8 { + self.failed_processing + self.failed_downloading + } + pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool { let is_useful = &self.hash == hash; if is_useful { @@ -72,7 +92,7 @@ impl SingleBlockRequest { if let State::Downloading { peer_id } = &self.state { if peer_id == dc_peer_id { // Peer disconnected before providing a block - self.register_failure(); + self.register_failure_downloading(); return Err(()); } } @@ -87,14 +107,16 @@ impl SingleBlockRequest { ) -> Result>>, VerifyError> { match self.state { State::AwaitingDownload => { - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::ExtraBlocksReturned) } State::Downloading { peer_id } => match block { Some(block) => { if block.canonical_root() != self.hash { // return an error and drop the block - self.register_failure(); + // NOTE: we take this is as a download failure to prevent counting the + // attempt as a chain failure, but simply a peer failure. + self.register_failure_downloading(); Err(VerifyError::RootMismatch) } else { // Return the block for processing. @@ -103,14 +125,14 @@ impl SingleBlockRequest { } } None => { - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::NoBlockReturned) } }, State::Processing { peer_id: _ } => match block { Some(_) => { // We sent the block for processing and received an extra block. - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::ExtraBlocksReturned) } None => { @@ -124,19 +146,19 @@ impl SingleBlockRequest { pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> { debug_assert!(matches!(self.state, State::AwaitingDownload)); - if self.failed_attempts <= MAX_ATTEMPTS { - if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { - let request = BlocksByRootRequest { - block_roots: VariableList::from(vec![self.hash]), - }; - self.state = State::Downloading { peer_id }; - self.used_peers.insert(peer_id); - Ok((peer_id, request)) - } else { - Err(LookupRequestError::NoPeers) - } + if self.failed_attempts() >= MAX_ATTEMPTS { + Err(LookupRequestError::TooManyAttempts { + cannot_process: self.failed_processing >= self.failed_downloading, + }) + } else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { + let request = BlocksByRootRequest { + block_roots: VariableList::from(vec![self.hash]), + }; + self.state = State::Downloading { peer_id }; + self.used_peers.insert(peer_id); + Ok((peer_id, request)) } else { - Err(LookupRequestError::TooManyAttempts) + Err(LookupRequestError::NoPeers) } } @@ -169,6 +191,8 @@ impl slog::Value for SingleBlockRequest { serializer.emit_arguments("processing_peer", &format_args!("{}", peer_id))? } } + serializer.emit_u8("failed_downloads", self.failed_downloading)?; + serializer.emit_u8("failed_processing", self.failed_processing)?; slog::Result::Ok(()) } } @@ -200,11 +224,28 @@ mod tests { } #[test] - fn test_max_attempts() { + fn test_block_lookup_failures() { + const FAILURES: u8 = 3; let peer_id = PeerId::random(); let block = rand_block(); - let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); - sl.register_failure(); + let mut sl = SingleBlockRequest::::new(block.canonical_root(), peer_id); + for _ in 1..FAILURES { + sl.request_block().unwrap(); + sl.register_failure_downloading(); + } + + // Now we receive the block and send it for processing + sl.request_block().unwrap(); + sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); + + // One processing failure maxes the available attempts + sl.register_failure_processing(); + assert_eq!( + sl.request_block(), + Err(LookupRequestError::TooManyAttempts { + cannot_process: false + }) + ) } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 352de4e09b..b3afadda2c 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -385,12 +385,11 @@ fn test_parent_lookup_too_many_attempts() { let parent = rig.rand_block(); let block = rig.block_with_parent(parent.canonical_root()); - let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); // Trigger the request bl.search_parent(Arc::new(block), peer_id, &mut cx); - for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE + 1 { + for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { let id = rig.expect_parent_request(); match i % 2 { // make sure every error is accounted for @@ -402,6 +401,8 @@ fn test_parent_lookup_too_many_attempts() { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + // Send the stream termination + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); rig.expect_penalty(); } } @@ -411,7 +412,74 @@ fn test_parent_lookup_too_many_attempts() { } assert_eq!(bl.parent_queue.len(), 0); - assert!(bl.failed_chains.contains(&chain_hash)); +} + +#[test] +fn test_parent_lookup_too_many_download_attempts_no_blacklist() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = rig.rand_block(); + let block = rig.block_with_parent(parent.canonical_root()); + let block_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { + assert!(!bl.failed_chains.contains(&block_hash)); + let id = rig.expect_parent_request(); + if i % 2 != 0 { + // The request fails. It should be tried again. + bl.parent_lookup_failed(id, peer_id, &mut cx); + } else { + // Send a bad block this time. It should be tried again. + let bad_block = rig.rand_block(); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + rig.expect_penalty(); + } + if i < parent_lookup::PARENT_FAIL_TOLERANCE { + assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + } + } + + assert_eq!(bl.parent_queue.len(), 0); + assert!(!bl.failed_chains.contains(&block_hash)); + assert!(!bl.failed_chains.contains(&parent.canonical_root())); +} + +#[test] +fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { + const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = Arc::new(rig.rand_block()); + let block = rig.block_with_parent(parent.canonical_root()); + let block_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + + // Fail downloading the block + for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + let id = rig.expect_parent_request(); + // The request fails. It should be tried again. + bl.parent_lookup_failed(id, peer_id, &mut cx); + } + + // Now fail processing a block in the parent request + for _ in 0..PROCESSING_FAILURES { + let id = dbg!(rig.expect_parent_request()); + assert!(!bl.failed_chains.contains(&block_hash)); + // send the right parent but fail processing + bl.parent_lookup_response(id, peer_id, Some(parent.clone()), D, &mut cx); + bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx); + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + rig.expect_penalty(); + } + + assert!(bl.failed_chains.contains(&block_hash)); + assert_eq!(bl.parent_queue.len(), 0); } #[test] From da7b7a0f60dbc341346247e96f3a49b27ead1de2 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 18 Jul 2022 01:51:36 +0000 Subject: [PATCH 071/184] Make transactions in execution layer integration tests (#3320) ## Issue Addressed Resolves #3159 ## Proposed Changes Sends transactions to the EE before requesting for a payload in the `execution_integration_tests`. Made some changes to the integration tests in order to be able to sign and publish transactions to the EE: 1. `genesis.json` for both geth and nethermind was modified to include pre-funded accounts that we know private keys for 2. Using the unauthenticated port again in order to make `eth_sendTransaction` and calls from the `personal` namespace to import keys Also added a `fcu` call with `PayloadAttributes` before calling `getPayload` in order to give EEs sufficient time to pack transactions into the payload. --- Cargo.lock | 190 +++++++++++++++++- .../execution_engine_integration/Cargo.toml | 5 + .../src/execution_engine.rs | 19 ++ .../src/genesis_json.rs | 91 ++++++++- .../execution_engine_integration/src/geth.rs | 3 +- .../execution_engine_integration/src/main.rs | 2 + .../src/nethermind.rs | 24 ++- .../src/test_rig.rs | 133 +++++++++++- .../src/transactions.rs | 87 ++++++++ 9 files changed, 541 insertions(+), 13 deletions(-) create mode 100644 testing/execution_engine_integration/src/transactions.rs diff --git a/Cargo.lock b/Cargo.lock index a31a6b382c..dfeac97cf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -189,6 +189,17 @@ dependencies = [ "syn", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.0", +] + [[package]] name = "asynchronous-codec" version = "0.6.0" @@ -224,6 +235,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "autocfg" version = "0.1.8" @@ -1865,14 +1888,53 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethers-providers" +version = "0.6.0" +source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +dependencies = [ + "async-trait", + "auto_impl", + "base64", + "ethers-core", + "futures-channel", + "futures-core", + "futures-timer", + "futures-util", + "hex", + "http", + "once_cell", + "parking_lot 0.11.2", + "pin-project 1.0.11", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-tungstenite 0.17.1", + "tracing", + "tracing-futures", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-timer", + "web-sys", + "ws_stream_wasm", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" dependencies = [ + "deposit_contract", "environment", + "ethers-core", + "ethers-providers", "execution_layer", "exit-future", "futures", + "hex", + "reqwest", "sensitive_url", "serde_json", "task_executor", @@ -2620,6 +2682,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "rustls 0.20.6", + "tokio", + "tokio-rustls 0.23.4", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -2758,6 +2833,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -4351,6 +4429,16 @@ dependencies = [ "indexmap", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.0", +] + [[package]] name = "pin-project" version = "0.4.30" @@ -5038,6 +5126,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -5047,17 +5136,21 @@ dependencies = [ "native-tls", "percent-encoding", "pin-project-lite 0.2.9", + "rustls 0.20.6", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls 0.23.4", "tokio-util 0.7.3", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.10.1", ] @@ -5219,6 +5312,15 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +dependencies = [ + "base64", +] + [[package]] name = "rustversion" version = "1.0.7" @@ -5437,6 +5539,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" + [[package]] name = "sensitive_url" version = "0.1.0" @@ -6398,6 +6506,17 @@ dependencies = [ "webpki 0.21.4", ] +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.6", + "tokio", + "webpki 0.22.0", +] + [[package]] name = "tokio-stream" version = "0.1.9" @@ -6420,7 +6539,23 @@ dependencies = [ "log", "pin-project 1.0.11", "tokio", - "tungstenite", + "tungstenite 0.14.0", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +dependencies = [ + "futures-util", + "log", + "rustls 0.20.6", + "tokio", + "tokio-rustls 0.23.4", + "tungstenite 0.17.2", + "webpki 0.22.0", + "webpki-roots", ] [[package]] @@ -6502,6 +6637,16 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project 1.0.11", + "tracing", +] + [[package]] name = "tracing-log" version = "0.1.3" @@ -6644,6 +6789,27 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +dependencies = [ + "base64", + "byteorder", + "bytes", + "http", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.20.6", + "sha-1 0.10.0", + "thiserror", + "url", + "utf-8", + "webpki 0.22.0", +] + [[package]] name = "twoway" version = "0.1.8" @@ -6991,9 +7157,9 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.15.0", "tokio-util 0.6.10", "tower-service", "tracing", @@ -7378,6 +7544,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "ws_stream_wasm" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47ca1ab42f5afed7fc332b22b6e932ca5414b209465412c8cdf0ad23bc0de645" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "pharos", + "rustc_version 0.4.0", + "send_wrapper", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.2.0" diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index fc8230c7a2..f42a7f6abc 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -15,3 +15,8 @@ execution_layer = { path = "../../beacon_node/execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } types = { path = "../../consensus/types" } unused_port = { path = "../../common/unused_port" } +ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +ethers-providers = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +deposit_contract = { path = "../../common/deposit_contract" } +reqwest = { version = "0.11.0", features = ["json"] } +hex = "0.4.2" diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index 7df88aa0d7..ad5af53158 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -1,3 +1,4 @@ +use ethers_providers::{Http, Provider}; use execution_layer::DEFAULT_JWT_FILE; use sensitive_url::SensitiveUrl; use std::path::PathBuf; @@ -5,6 +6,14 @@ use std::process::Child; use tempfile::TempDir; use unused_port::unused_tcp_port; +pub const KEYSTORE_PASSWORD: &str = "testpwd"; +pub const ACCOUNT1: &str = "7b8C3a386C0eea54693fFB0DA17373ffC9228139"; +pub const ACCOUNT2: &str = "dA2DD7560DB7e212B945fC72cEB54B7D8C886D77"; +pub const PRIVATE_KEYS: [&str; 2] = [ + "115fe42a60e5ef45f5490e599add1f03c73aeaca129c2c41451eca6cf8ff9e04", + "6a692e710077d9000be1326acbe32f777b403902ac8779b19eb1398b849c99c3", +]; + /// Defined for each EE type (e.g., Geth, Nethermind, etc). pub trait GenericExecutionEngine: Clone { fn init_datadir() -> TempDir; @@ -22,8 +31,10 @@ pub struct ExecutionEngine { engine: E, #[allow(dead_code)] datadir: TempDir, + http_port: u16, http_auth_port: u16, child: Child, + pub provider: Provider, } impl Drop for ExecutionEngine { @@ -42,11 +53,15 @@ impl ExecutionEngine { let http_port = unused_tcp_port().unwrap(); let http_auth_port = unused_tcp_port().unwrap(); let child = E::start_client(&datadir, http_port, http_auth_port, jwt_secret_path); + let provider = Provider::::try_from(format!("http://localhost:{}", http_port)) + .expect("failed to instantiate ethers provider"); Self { engine, datadir, + http_port, http_auth_port, child, + provider, } } @@ -54,6 +69,10 @@ impl ExecutionEngine { SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap() } + pub fn http_url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() + } + pub fn datadir(&self) -> PathBuf { self.datadir.path().to_path_buf() } diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index 87fdaec14a..17654b292a 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -32,7 +32,12 @@ pub fn geth_genesis_json() -> Value { "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", "coinbase":"0x0000000000000000000000000000000000000000", "alloc":{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"} + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, }, "number":"0x0", "gasUsed":"0x0", @@ -40,3 +45,87 @@ pub fn geth_genesis_json() -> Value { "baseFeePerGas":"0x7" }) } + +/// Modified kiln config +pub fn nethermind_genesis_json() -> Value { + json!( + { + "name": "lighthouse_test_network", + "engine": { + "Ethash": { + "params": { + "minimumDifficulty": "0x20000", + "difficultyBoundDivisor": "0x800", + "durationLimit": "0xd", + "blockReward": { + "0x0": "0x1BC16D674EC80000" + }, + "homesteadTransition": "0x0", + "eip100bTransition": "0x0", + "difficultyBombDelays": {} + } + } + }, + "params": { + "gasLimitBoundDivisor": "0x400", + "registrar": "0x0000000000000000000000000000000000000000", + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID": "0x1469ca", + "MergeForkIdTransition": "0x3e8", + "eip150Transition": "0x0", + "eip158Transition": "0x0", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "eip155Transition": "0x0", + "eip140Transition": "0x0", + "eip211Transition": "0x0", + "eip214Transition": "0x0", + "eip658Transition": "0x0", + "eip145Transition": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", + "eip1283Transition": "0x0", + "eip1283DisableTransition": "0x0", + "eip152Transition": "0x0", + "eip1108Transition": "0x0", + "eip1344Transition": "0x0", + "eip1884Transition": "0x0", + "eip2028Transition": "0x0", + "eip2200Transition": "0x0", + "eip2565Transition": "0x0", + "eip2929Transition": "0x0", + "eip2930Transition": "0x0", + "eip1559Transition": "0x0", + "eip3198Transition": "0x0", + "eip3529Transition": "0x0", + "eip3541Transition": "0x0" + }, + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x1234", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x01", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "", + "gasLimit": "0x1C9C380" + }, + "accounts": { + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, + }, + "nodes": [] + } + ) +} diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 129faea907..8c751ed651 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -90,13 +90,14 @@ impl GenericExecutionEngine for GethEngine { .arg(datadir.path().to_str().unwrap()) .arg("--http") .arg("--http.api") - .arg("engine,eth") + .arg("engine,eth,personal") .arg("--http.port") .arg(http_port.to_string()) .arg("--authrpc.port") .arg(http_auth_port.to_string()) .arg("--port") .arg(network_port.to_string()) + .arg("--allow-insecure-unlock") .arg("--authrpc.jwtsecret") .arg(jwt_secret_path.as_path().to_str().unwrap()) .stdout(build_utils::build_stdio()) diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index a4ec0f9215..bd3436602c 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,3 +1,4 @@ +#![recursion_limit = "1024"] /// This binary runs integration tests between Lighthouse and execution engines. /// /// It will first attempt to build any supported integration clients, then it will run tests. @@ -9,6 +10,7 @@ mod genesis_json; mod geth; mod nethermind; mod test_rig; +mod transactions; use geth::GethEngine; use nethermind::NethermindEngine; diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index df345f36be..1fe7bf0f05 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -1,6 +1,8 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; +use crate::genesis_json::nethermind_genesis_json; use std::env; +use std::fs::File; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use tempfile::TempDir; @@ -69,33 +71,43 @@ impl NethermindEngine { impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { - TempDir::new().unwrap() + let datadir = TempDir::new().unwrap(); + let genesis_json_path = datadir.path().join("genesis.json"); + let mut file = File::create(&genesis_json_path).unwrap(); + let json = nethermind_genesis_json(); + serde_json::to_writer(&mut file, &json).unwrap(); + datadir } fn start_client( datadir: &TempDir, - _http_port: u16, + http_port: u16, http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { let network_port = unused_tcp_port().unwrap(); + let genesis_json_path = datadir.path().join("genesis.json"); Command::new(Self::binary_path()) .arg("--datadir") .arg(datadir.path().to_str().unwrap()) .arg("--config") .arg("kiln") + .arg("--Init.ChainSpecPath") + .arg(genesis_json_path.to_str().unwrap()) .arg("--Merge.TerminalTotalDifficulty") .arg("0") + .arg("--JsonRpc.Enabled") + .arg("true") + .arg("--JsonRpc.EnabledModules") + .arg("net,eth,subscribe,web3,admin,personal") + .arg("--JsonRpc.Port") + .arg(http_port.to_string()) .arg("--JsonRpc.AdditionalRpcUrls") .arg(format!( "http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", http_auth_port )) - .arg("--JsonRpc.EnabledModules") - .arg("net,eth,subscribe,web3,admin,engine") - .arg("--JsonRpc.Port") - .arg(http_auth_port.to_string()) .arg("--Network.DiscoveryPort") .arg(network_port.to_string()) .arg("--Network.P2PPort") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 5b23af4fa1..ee5e9cf2cc 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -1,5 +1,12 @@ -use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine}; +use crate::execution_engine::{ + ExecutionEngine, GenericExecutionEngine, ACCOUNT1, ACCOUNT2, KEYSTORE_PASSWORD, PRIVATE_KEYS, +}; +use crate::transactions::transactions; +use ethers_providers::Middleware; use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use reqwest::{header::CONTENT_TYPE, Client}; +use sensitive_url::SensitiveUrl; +use serde_json::{json, Value}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; @@ -8,7 +15,6 @@ use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, MainnetEthSpec, Slot, Uint256, }; - const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); struct ExecutionPair { @@ -32,6 +38,63 @@ pub struct TestRig { _runtime_shutdown: exit_future::Signal, } +/// Import a private key into the execution engine and unlock it so that we can +/// make transactions with the corresponding account. +async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: &str) { + for priv_key in priv_keys { + let body = json!( + { + "jsonrpc":"2.0", + "method":"personal_importRawKey", + "params":[priv_key, password], + "id":1 + } + ); + + let client = Client::builder().build().unwrap(); + let request = client + .post(http_url.full.clone()) + .header(CONTENT_TYPE, "application/json") + .json(&body); + + let response: Value = request + .send() + .await + .unwrap() + .error_for_status() + .unwrap() + .json() + .await + .unwrap(); + + let account = response.get("result").unwrap().as_str().unwrap(); + + let body = json!( + { + "jsonrpc":"2.0", + "method":"personal_unlockAccount", + "params":[account, password], + "id":1 + } + ); + + let request = client + .post(http_url.full.clone()) + .header(CONTENT_TYPE, "application/json") + .json(&body); + + let _response: Value = request + .send() + .await + .unwrap() + .error_for_status() + .unwrap() + .json() + .await + .unwrap(); + } +} + impl TestRig { pub fn new(generic_engine: E) -> Self { let log = environment::null_logger().unwrap(); @@ -125,6 +188,20 @@ impl TestRig { pub async fn perform_tests(&self) { self.wait_until_synced().await; + // Import and unlock all private keys to sign transactions + let _ = futures::future::join_all([&self.ee_a, &self.ee_b].iter().map(|ee| { + import_and_unlock( + ee.execution_engine.http_url(), + &PRIVATE_KEYS, + KEYSTORE_PASSWORD, + ) + })) + .await; + + // We hardcode the accounts here since some EEs start with a default unlocked account + let account1 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT1).unwrap()); + let account2 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT2).unwrap()); + /* * Check the transition config endpoint. */ @@ -157,6 +234,17 @@ impl TestRig { .unwrap() ); + // Submit transactions before getting payload + let txs = transactions::(account1, account2); + for tx in txs.clone().into_iter() { + self.ee_a + .execution_engine + .provider + .send_transaction(tx, None) + .await + .unwrap(); + } + /* * Execution Engine A: * @@ -168,6 +256,45 @@ impl TestRig { let prev_randao = Hash256::zero(); let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; + + let prepared = self + .ee_a + .execution_layer + .insert_proposer( + Slot::new(1), // Insert proposer for the next slot + Hash256::zero(), + proposer_index, + PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient: Address::zero(), + }, + ) + .await; + + assert!(!prepared, "Inserting proposer for the first time"); + + // Make a fcu call with the PayloadAttributes that we inserted previously + let prepare = self + .ee_a + .execution_layer + .notify_forkchoice_updated( + parent_hash, + finalized_block_hash, + Slot::new(0), + Hash256::zero(), + ) + .await + .unwrap(); + + assert_eq!(prepare, PayloadStatus::Valid); + + // Add a delay to give the EE sufficient time to pack the + // submitted transactions into a payload. + // This is required when running on under resourced nodes and + // in CI. + sleep(Duration::from_secs(3)).await; + let valid_payload = self .ee_a .execution_layer @@ -184,6 +311,8 @@ impl TestRig { .unwrap() .execution_payload; + assert_eq!(valid_payload.transactions.len(), txs.len()); + /* * Execution Engine A: * diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs new file mode 100644 index 0000000000..144946682b --- /dev/null +++ b/testing/execution_engine_integration/src/transactions.rs @@ -0,0 +1,87 @@ +use deposit_contract::{encode_eth1_tx_data, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS}; +use ethers_core::types::{ + transaction::{eip2718::TypedTransaction, eip2930::AccessList}, + Address, Bytes, Eip1559TransactionRequest, TransactionRequest, +}; +use types::{DepositData, EthSpec, Hash256, Keypair, Signature}; + +/// Hardcoded deposit contract address based on sender address and nonce +pub const DEPOSIT_CONTRACT_ADDRESS: &str = "64f43BEc7F86526686C931d65362bB8698872F90"; + +#[derive(Debug)] +pub enum Transaction { + Transfer(Address, Address), + TransferLegacy(Address, Address), + TransferAccessList(Address, Address), + DeployDepositContract(Address), + DepositDepositContract { + sender: Address, + deposit_contract_address: Address, + }, +} + +/// Get a list of transactions to publish to the execution layer. +pub fn transactions(account1: Address, account2: Address) -> Vec { + vec![ + Transaction::Transfer(account1, account2).transaction::(), + Transaction::TransferLegacy(account1, account2).transaction::(), + Transaction::TransferAccessList(account1, account2).transaction::(), + Transaction::DeployDepositContract(account1).transaction::(), + Transaction::DepositDepositContract { + sender: account1, + deposit_contract_address: ethers_core::types::Address::from_slice( + &hex::decode(&DEPOSIT_CONTRACT_ADDRESS).unwrap(), + ), + } + .transaction::(), + ] +} + +impl Transaction { + pub fn transaction(&self) -> TypedTransaction { + match &self { + Self::TransferLegacy(from, to) => TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .into(), + Self::Transfer(from, to) => Eip1559TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .into(), + Self::TransferAccessList(from, to) => TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .with_access_list(AccessList::default()) + .into(), + Self::DeployDepositContract(addr) => TransactionRequest::new() + .from(*addr) + .data(Bytes::from(BYTECODE.to_vec())) + .gas(CONTRACT_DEPLOY_GAS) + .into(), + Self::DepositDepositContract { + sender, + deposit_contract_address, + } => { + let keypair = Keypair::random(); + + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials: Hash256::zero(), + amount: 32_000_000_000, + signature: Signature::empty().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); + TransactionRequest::new() + .from(*sender) + .to(*deposit_contract_address) + .data(Bytes::from(encode_eth1_tx_data(&deposit).unwrap())) + .gas(DEPOSIT_GAS) + .into() + } + } + } +} From f9b9658711698269f9b9cd302364f318a29ba509 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 18 Jul 2022 23:15:40 +0000 Subject: [PATCH 072/184] Add merge support to simulator (#3292) ## Issue Addressed N/A ## Proposed Changes Make simulator merge compatible. Adds a `--post_merge` flag to the eth1 simulator that enables a ttd and simulates the merge transition. Uses the `MockServer` in the execution layer test utils to simulate a dummy execution node. Adds the merge transition simulation to CI. --- .github/custom/clippy.toml | 1 + .github/workflows/test-suite.yml | 12 ++ Cargo.lock | 1 + beacon_node/beacon_chain/src/test_utils.rs | 3 + .../execution_layer/src/engine_api/auth.rs | 8 +- .../execution_layer/src/engine_api/http.rs | 8 +- .../test_utils/execution_block_generator.rs | 45 +++++++- .../src/test_utils/handle_rpc.rs | 26 +++-- .../src/test_utils/mock_execution_layer.rs | 10 +- .../execution_layer/src/test_utils/mod.rs | 103 +++++++++++++----- bors.toml | 1 + testing/node_test_rig/Cargo.toml | 1 + testing/node_test_rig/src/lib.rs | 29 +++++ testing/simulator/src/checks.rs | 46 +++++++- testing/simulator/src/cli.rs | 5 + testing/simulator/src/eth1_sim.rs | 88 +++++++++++++-- testing/simulator/src/local_network.rs | 71 +++++++++++- 17 files changed, 389 insertions(+), 69 deletions(-) diff --git a/.github/custom/clippy.toml b/.github/custom/clippy.toml index df09502307..f50e35bcdf 100644 --- a/.github/custom/clippy.toml +++ b/.github/custom/clippy.toml @@ -18,4 +18,5 @@ async-wrapper-methods = [ "warp_utils::task::blocking_json_task", "validator_client::http_api::blocking_signed_json_task", "execution_layer::test_utils::MockServer::new", + "execution_layer::test_utils::MockServer::new_with_config", ] diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a58491d04f..6458af6e79 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -158,6 +158,18 @@ jobs: run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim + merge-transition-ubuntu: + name: merge-transition-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the beacon chain sim and go through the merge transition + run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index dfeac97cf4..c1277ed1d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4045,6 +4045,7 @@ dependencies = [ "beacon_node", "environment", "eth2", + "execution_layer", "sensitive_url", "tempfile", "types", diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 62765c2222..1dc6f4b83b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -11,7 +11,9 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; +use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ + auth::JwtKey, test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, ExecutionLayer, }; @@ -361,6 +363,7 @@ where DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ); self.execution_layer = Some(mock.el.clone()); diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index 560e43585b..8fcdb2543d 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -25,7 +25,7 @@ impl From for Error { } /// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`. -#[derive(Zeroize)] +#[derive(Zeroize, Clone)] #[zeroize(drop)] pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]); @@ -159,12 +159,12 @@ pub struct Claims { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::JWT_SECRET; + use crate::test_utils::DEFAULT_JWT_SECRET; #[test] fn test_roundtrip() { let auth = Auth::new( - JwtKey::from_slice(&JWT_SECRET).unwrap(), + JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), Some("42".into()), Some("Lighthouse".into()), ); @@ -172,7 +172,7 @@ mod tests { let token = auth.generate_token_with_claims(&claims).unwrap(); assert_eq!( - Auth::validate_token(&token, &JwtKey::from_slice(&JWT_SECRET).unwrap()) + Auth::validate_token(&token, &JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()) .unwrap() .claims, claims diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c4811e04c1..a8eb42971e 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -708,7 +708,7 @@ impl HttpJsonRpc { mod test { use super::auth::JwtKey; use super::*; - use crate::test_utils::{MockServer, JWT_SECRET}; + use crate::test_utils::{MockServer, DEFAULT_JWT_SECRET}; use std::future::Future; use std::str::FromStr; use std::sync::Arc; @@ -728,8 +728,10 @@ mod test { let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); // Create rpc clients that include JWT auth headers if `with_auth` is true. let (rpc_client, echo_client) = if with_auth { - let rpc_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None); - let echo_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None); + let rpc_auth = + Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); + let echo_auth = + Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth).unwrap()), Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth).unwrap()), diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b61092cf0e..7d8cdb299d 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,10 +1,13 @@ -use crate::engine_api::{ - json_structures::{ - JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, - }, - ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, -}; use crate::engines::ForkChoiceState; +use crate::{ + engine_api::{ + json_structures::{ + JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, + }, + ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, + }, + ExecutionBlockWithTransactions, +}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; @@ -66,6 +69,28 @@ impl Block { }, } } + + pub fn as_execution_block_with_tx(&self) -> Option> { + match self { + Block::PoS(payload) => Some(ExecutionBlockWithTransactions { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: vec![], + }), + Block::PoW(_) => None, + } + } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize, TreeHash)] @@ -153,6 +178,14 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } + pub fn execution_block_with_txs_by_hash( + &self, + hash: ExecutionBlockHash, + ) -> Option> { + self.block_by_hash(hash) + .and_then(|block| block.as_execution_block_with_tx()) + } + pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { let target_block = self .terminal_block_number diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 772ac3c866..5e0e0591cd 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -48,13 +48,25 @@ pub async fn handle_rpc( s.parse() .map_err(|e| format!("unable to parse hash: {:?}", e)) })?; - - Ok(serde_json::to_value( - ctx.execution_block_generator - .read() - .execution_block_by_hash(hash), - ) - .unwrap()) + let full_tx = params + .get(1) + .and_then(JsonValue::as_bool) + .ok_or_else(|| "missing/invalid params[1] value".to_string())?; + if full_tx { + Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .execution_block_with_txs_by_hash(hash), + ) + .unwrap()) + } else { + Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .execution_block_by_hash(hash), + ) + .unwrap()) + } } ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 707a7c0c3e..517772a695 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,5 +1,7 @@ use crate::{ - test_utils::{MockServer, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, JWT_SECRET}, + test_utils::{ + MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, + }, Config, *, }; use sensitive_url::SensitiveUrl; @@ -22,6 +24,7 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), Epoch::new(0), + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ) } @@ -32,6 +35,7 @@ impl MockExecutionLayer { terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, + jwt_key: Option, builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); @@ -41,8 +45,10 @@ impl MockExecutionLayer { spec.terminal_block_hash = terminal_block_hash; spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; + let jwt_key = jwt_key.unwrap_or_else(JwtKey::random); let server = MockServer::new( &handle, + jwt_key, terminal_total_difficulty, terminal_block, terminal_block_hash, @@ -52,7 +58,7 @@ impl MockExecutionLayer { let file = NamedTempFile::new().unwrap(); let path = file.path().into(); - std::fs::write(&path, hex::encode(JWT_SECRET)).unwrap(); + std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); let config = Config { execution_endpoints: vec![url], diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 805f6716fb..723da25ff1 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -26,12 +26,33 @@ pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; -pub const JWT_SECRET: [u8; 32] = [42; 32]; +pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; mod execution_block_generator; mod handle_rpc; mod mock_execution_layer; +/// Configuration for the MockExecutionLayer. +pub struct MockExecutionConfig { + pub server_config: Config, + pub jwt_key: JwtKey, + pub terminal_difficulty: Uint256, + pub terminal_block: u64, + pub terminal_block_hash: ExecutionBlockHash, +} + +impl Default for MockExecutionConfig { + fn default() -> Self { + Self { + jwt_key: JwtKey::random(), + terminal_difficulty: DEFAULT_TERMINAL_DIFFICULTY.into(), + terminal_block: DEFAULT_TERMINAL_BLOCK, + terminal_block_hash: ExecutionBlockHash::zero(), + server_config: Config::default(), + } + } +} + pub struct MockServer { _shutdown_tx: oneshot::Sender<()>, listen_socket_addr: SocketAddr, @@ -43,25 +64,29 @@ impl MockServer { pub fn unit_testing() -> Self { Self::new( &runtime::Handle::current(), + JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), ) } - pub fn new( - handle: &runtime::Handle, - terminal_difficulty: Uint256, - terminal_block: u64, - terminal_block_hash: ExecutionBlockHash, - ) -> Self { + pub fn new_with_config(handle: &runtime::Handle, config: MockExecutionConfig) -> Self { + let MockExecutionConfig { + jwt_key, + terminal_difficulty, + terminal_block, + terminal_block_hash, + server_config, + } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); let execution_block_generator = ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); let ctx: Arc> = Arc::new(Context { - config: <_>::default(), + config: server_config, + jwt_key, log: null_logger().unwrap(), last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), @@ -99,6 +124,25 @@ impl MockServer { } } + pub fn new( + handle: &runtime::Handle, + jwt_key: JwtKey, + terminal_difficulty: Uint256, + terminal_block: u64, + terminal_block_hash: ExecutionBlockHash, + ) -> Self { + Self::new_with_config( + handle, + MockExecutionConfig { + server_config: Config::default(), + jwt_key, + terminal_difficulty, + terminal_block, + terminal_block_hash, + }, + ) + } + pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { self.ctx.execution_block_generator.write() } @@ -351,6 +395,7 @@ impl warp::reject::Reject for AuthError {} /// The server will gracefully handle the case where any fields are `None`. pub struct Context { pub config: Config, + pub jwt_key: JwtKey, pub log: Logger, pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, @@ -386,28 +431,30 @@ struct ErrorMessage { /// Returns a `warp` header which filters out request that has a missing or incorrectly /// signed JWT token. -fn auth_header_filter() -> warp::filters::BoxedFilter<()> { +fn auth_header_filter(jwt_key: JwtKey) -> warp::filters::BoxedFilter<()> { warp::any() .and(warp::filters::header::optional("Authorization")) - .and_then(move |authorization: Option| async move { - match authorization { - None => Err(warp::reject::custom(AuthError( - "auth absent from request".to_string(), - ))), - Some(auth) => { - if let Some(token) = auth.strip_prefix("Bearer ") { - let secret = JwtKey::from_slice(&JWT_SECRET).unwrap(); - match Auth::validate_token(token, &secret) { - Ok(_) => Ok(()), - Err(e) => Err(warp::reject::custom(AuthError(format!( - "Auth failure: {:?}", - e - )))), + .and_then(move |authorization: Option| { + let secret = jwt_key.clone(); + async move { + match authorization { + None => Err(warp::reject::custom(AuthError( + "auth absent from request".to_string(), + ))), + Some(auth) => { + if let Some(token) = auth.strip_prefix("Bearer ") { + match Auth::validate_token(token, &secret) { + Ok(_) => Ok(()), + Err(e) => Err(warp::reject::custom(AuthError(format!( + "Auth failure: {:?}", + e + )))), + } + } else { + Err(warp::reject::custom(AuthError( + "Bearer token not present in auth header".to_string(), + ))) } - } else { - Err(warp::reject::custom(AuthError( - "Bearer token not present in auth header".to_string(), - ))) } } } @@ -523,7 +570,7 @@ pub fn serve( }); let routes = warp::post() - .and(auth_header_filter()) + .and(auth_header_filter(ctx.jwt_key.clone())) .and(root.or(echo)) .recover(handle_rejection) // Add a `Server` header. diff --git a/bors.toml b/bors.toml index d7d1e98762..0ff5d6231b 100644 --- a/bors.toml +++ b/bors.toml @@ -7,6 +7,7 @@ status = [ "ef-tests-ubuntu", "dockerfile-ubuntu", "eth1-simulator-ubuntu", + "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", "check-benchmarks", "check-consensus", diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 8e4b8595df..2c9bd5939f 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -13,3 +13,4 @@ eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } sensitive_url = { path = "../../common/sensitive_url" } +execution_layer = { path = "../../beacon_node/execution_layer" } \ No newline at end of file diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index acf9bb9e68..0933bff4c6 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -17,6 +17,9 @@ use validator_dir::insecure_keys::build_deterministic_validator_dirs; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; pub use environment; pub use eth2; +pub use execution_layer::test_utils::{ + Config as MockServerConfig, MockExecutionConfig, MockServer, +}; pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. @@ -211,3 +214,29 @@ impl LocalValidatorClient { }) } } + +/// Provides an execution engine api server that is running in the current process on a given tokio executor (it +/// is _local_ to this process). +/// +/// Intended for use in testing and simulation. Not for production. +pub struct LocalExecutionNode { + pub server: MockServer, + pub datadir: TempDir, +} + +impl LocalExecutionNode { + pub fn new(context: RuntimeContext, config: MockExecutionConfig) -> Self { + let datadir = TempBuilder::new() + .prefix("lighthouse_node_test_rig_el") + .tempdir() + .expect("should create temp directory for client datadir"); + let jwt_file_path = datadir.path().join("jwt.hex"); + if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) { + panic!("Failed to write jwt file {}", e); + } + Self { + server: MockServer::new_with_config(&context.executor.handle().unwrap(), config), + datadir, + } + } +} diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 7ff387b9c6..02f4f76d51 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,7 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, Slot, Unsigned}; +use types::{Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. @@ -149,19 +149,19 @@ pub async fn verify_fork_version( network: LocalNetwork, fork_epoch: Epoch, slot_duration: Duration, - altair_fork_version: [u8; 4], + fork_version: [u8; 4], ) -> Result<(), String> { epoch_delay(fork_epoch, slot_duration, E::slots_per_epoch()).await; for remote_node in network.remote_nodes()? { - let fork_version = remote_node + let remote_fork_version = remote_node .get_beacon_states_fork(StateId::Head) .await .map(|resp| resp.unwrap().data.current_version) .map_err(|e| format!("Failed to get fork from beacon node: {:?}", e))?; - if fork_version != altair_fork_version { + if fork_version != remote_fork_version { return Err(format!( "Fork version after FORK_EPOCH is incorrect, got: {:?}, expected: {:?}", - fork_version, altair_fork_version, + remote_fork_version, fork_version, )); } } @@ -207,3 +207,39 @@ pub async fn verify_full_sync_aggregates_up_to( Ok(()) } + +/// Verify that the first merged PoS block got finalized. +pub async fn verify_transition_block_finalized( + network: LocalNetwork, + transition_epoch: Epoch, + slot_duration: Duration, + should_verify: bool, +) -> Result<(), String> { + if !should_verify { + return Ok(()); + } + epoch_delay(transition_epoch + 2, slot_duration, E::slots_per_epoch()).await; + let mut block_hashes = Vec::new(); + for remote_node in network.remote_nodes()?.iter() { + let execution_block_hash: ExecutionBlockHash = remote_node + .get_beacon_blocks::(BlockId::Finalized) + .await + .map(|body| body.unwrap().data) + .map_err(|e| format!("Get state root via http failed: {:?}", e))? + .message() + .execution_payload() + .map(|payload| payload.execution_payload.block_hash) + .map_err(|e| format!("Execution payload does not exist: {:?}", e))?; + block_hashes.push(execution_block_hash); + } + + let first = block_hashes[0]; + if first.into_root() != Hash256::zero() && block_hashes.iter().all(|&item| item == first) { + Ok(()) + } else { + Err(format!( + "Terminal block not finalized on all nodes Finalized block hashes:{:?}", + block_hashes + )) + } +} diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 28f1a25627..f1196502fb 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -36,6 +36,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("3") .help("Speed up factor. Please use a divisor of 12.")) + .arg(Arg::with_name("post-merge") + .short("m") + .long("post-merge") + .takes_value(false) + .help("Simulate the merge transition")) .arg(Arg::with_name("continue_after_checks") .short("c") .long("continue_after_checks") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 4c773c70bf..c54944c2e1 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,4 +1,4 @@ -use crate::local_network::INVALID_ADDRESS; +use crate::local_network::{EXECUTION_PORT, INVALID_ADDRESS, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; @@ -18,8 +18,12 @@ use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; -const FORK_EPOCH: u64 = 2; const END_EPOCH: u64 = 16; +const ALTAIR_FORK_EPOCH: u64 = 1; +const BELLATRIX_FORK_EPOCH: u64 = 2; + +const SUGGESTED_FEE_RECIPIENT: [u8; 20] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); @@ -28,10 +32,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let speed_up_factor = value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); let continue_after_checks = matches.is_present("continue_after_checks"); + let post_merge_sim = matches.is_present("post-merge"); println!("Beacon Chain Simulator:"); println!(" nodes:{}", node_count); println!(" validators_per_node:{}", validators_per_node); + println!(" post merge simulation:{}", post_merge_sim); println!(" continue_after_checks:{}", continue_after_checks); // Generate the directories and keystores required for the validator clients. @@ -72,6 +78,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let total_validator_count = validators_per_node * node_count; let altair_fork_version = spec.altair_fork_version; + let bellatrix_fork_version = spec.bellatrix_fork_version; spec.seconds_per_slot /= speed_up_factor; spec.seconds_per_slot = max(1, spec.seconds_per_slot); @@ -80,8 +87,14 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = total_validator_count as u64; spec.seconds_per_eth1_block = eth1_block_time.as_secs(); - spec.altair_fork_epoch = Some(Epoch::new(FORK_EPOCH)); + spec.altair_fork_epoch = Some(Epoch::new(ALTAIR_FORK_EPOCH)); + // Set these parameters only if we are doing a merge simulation + if post_merge_sim { + spec.terminal_total_difficulty = TERMINAL_DIFFICULTY.into(); + spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH)); + } + let seconds_per_slot = spec.seconds_per_slot; let slot_duration = Duration::from_secs(spec.seconds_per_slot); let initial_validator_count = spec.min_genesis_active_validator_count as usize; let deposit_amount = env.eth2_config.spec.max_effective_balance; @@ -137,6 +150,19 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + if post_merge_sim { + let el_config = execution_layer::Config { + execution_endpoints: vec![SensitiveUrl::parse(&format!( + "http://localhost:{}", + EXECUTION_PORT + )) + .unwrap()], + ..Default::default() + }; + + beacon_config.execution_layer = Some(el_config); + } + /* * Create a new `LocalNetwork` with one beacon node. */ @@ -168,9 +194,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let network_1 = network.clone(); executor.spawn( async move { + let mut validator_config = testing_validator_config(); + if post_merge_sim { + validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); + } println!("Adding validator client {}", i); network_1 - .add_validator_client(testing_validator_config(), i, files, i % 2 == 0) + .add_validator_client(validator_config, i, files, i % 2 == 0) .await .expect("should add validator"); }, @@ -182,6 +212,21 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; + if post_merge_sim { + let executor = executor.clone(); + let network_2 = network.clone(); + executor.spawn( + async move { + println!("Mining pow blocks"); + let mut interval = tokio::time::interval(Duration::from_secs(seconds_per_slot)); + for i in 1..=TERMINAL_BLOCK + 1 { + interval.tick().await; + let _ = network_2.mine_pow_blocks(i); + } + }, + "pow_mining", + ); + } /* * Start the checks that ensure the network performs as expected. * @@ -190,7 +235,16 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * tests start at the right time. Whilst this is works well for now, it's subject to * breakage by changes to the VC. */ - let (finalization, block_prod, validator_count, onboarding, fork, sync_aggregate) = futures::join!( + + let ( + finalization, + block_prod, + validator_count, + onboarding, + fork, + sync_aggregate, + transition, + ) = futures::join!( // Check that the chain finalizes at the first given opportunity. checks::verify_first_finalization(network.clone(), slot_duration), // Check that a block is produced at every slot. @@ -212,21 +266,36 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { slot_duration, total_validator_count, ), - // Check that all nodes have transitioned to the new fork. + // Check that all nodes have transitioned to the required fork. checks::verify_fork_version( network.clone(), - Epoch::new(FORK_EPOCH), + if post_merge_sim { + Epoch::new(BELLATRIX_FORK_EPOCH) + } else { + Epoch::new(ALTAIR_FORK_EPOCH) + }, slot_duration, - altair_fork_version + if post_merge_sim { + bellatrix_fork_version + } else { + altair_fork_version + } ), // Check that all sync aggregates are full. checks::verify_full_sync_aggregates_up_to( network.clone(), // Start checking for sync_aggregates at `FORK_EPOCH + 1` to account for // inefficiencies in finding subnet peers at the `fork_slot`. - Epoch::new(FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()), + Epoch::new(ALTAIR_FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()), Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), slot_duration, + ), + // Check that the transition block is finalized. + checks::verify_transition_block_finalized( + network.clone(), + Epoch::new(TERMINAL_BLOCK / MinimalEthSpec::slots_per_epoch()), + slot_duration, + post_merge_sim ) ); @@ -236,6 +305,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { onboarding?; fork?; sync_aggregate?; + transition?; // The `final_future` either completes immediately or never completes, depending on the value // of `continue_after_checks`. diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 6cfc3e6db7..8df912ed16 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,7 +1,8 @@ use node_test_rig::{ environment::RuntimeContext, eth2::{types::StateId, BeaconNodeHttpClient}, - ClientConfig, LocalBeaconNode, LocalValidatorClient, ValidatorConfig, ValidatorFiles, + ClientConfig, LocalBeaconNode, LocalExecutionNode, LocalValidatorClient, MockExecutionConfig, + MockServerConfig, ValidatorConfig, ValidatorFiles, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -15,11 +16,17 @@ use types::{Epoch, EthSpec}; const BOOTNODE_PORT: u16 = 42424; pub const INVALID_ADDRESS: &str = "http://127.0.0.1:42423"; +pub const EXECUTION_PORT: u16 = 4000; + +pub const TERMINAL_DIFFICULTY: u64 = 6400; +pub const TERMINAL_BLOCK: u64 = 64; + /// Helper struct to reduce `Arc` usage. pub struct Inner { pub context: RuntimeContext, pub beacon_nodes: RwLock>>, pub validator_clients: RwLock>>, + pub execution_nodes: RwLock>>, } /// Represents a set of interconnected `LocalBeaconNode` and `LocalValidatorClient`. @@ -46,7 +53,7 @@ impl Deref for LocalNetwork { } impl LocalNetwork { - /// Creates a new network with a single `BeaconNode`. + /// Creates a new network with a single `BeaconNode` and a connected `ExecutionNode`. pub async fn new( context: RuntimeContext, mut beacon_config: ClientConfig, @@ -56,6 +63,30 @@ impl LocalNetwork { beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT); beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT); beacon_config.network.discv5_config.table_filter = |_| true; + + let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { + let mock_execution_config = MockExecutionConfig { + server_config: MockServerConfig { + listen_port: EXECUTION_PORT, + ..Default::default() + }, + terminal_block: TERMINAL_BLOCK, + terminal_difficulty: TERMINAL_DIFFICULTY.into(), + ..Default::default() + }; + let execution_node = LocalExecutionNode::new( + context.service_context("boot_node_el".into()), + mock_execution_config, + ); + el_config.default_datadir = execution_node.datadir.path().to_path_buf(); + el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; + el_config.execution_endpoints = + vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + vec![execution_node] + } else { + vec![] + }; + let beacon_node = LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config) .await?; @@ -63,6 +94,7 @@ impl LocalNetwork { inner: Arc::new(Inner { context, beacon_nodes: RwLock::new(vec![beacon_node]), + execution_nodes: RwLock::new(execution_node), validator_clients: RwLock::new(vec![]), }), }) @@ -87,6 +119,7 @@ impl LocalNetwork { /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> { let self_1 = self.clone(); + let count = self.beacon_node_count() as u16; println!("Adding beacon node.."); { let read_lock = self.beacon_nodes.read(); @@ -99,20 +132,38 @@ impl LocalNetwork { .enr() .expect("bootnode must have a network"), ); - let count = self.beacon_node_count() as u16; beacon_config.network.discovery_port = BOOTNODE_PORT + count; beacon_config.network.libp2p_port = BOOTNODE_PORT + count; beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT + count); beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT + count); beacon_config.network.discv5_config.table_filter = |_| true; } + if let Some(el_config) = &mut beacon_config.execution_layer { + let config = MockExecutionConfig { + server_config: MockServerConfig { + listen_port: EXECUTION_PORT + count, + ..Default::default() + }, + terminal_block: TERMINAL_BLOCK, + terminal_difficulty: TERMINAL_DIFFICULTY.into(), + ..Default::default() + }; + let execution_node = LocalExecutionNode::new( + self.context.service_context(format!("node_{}_el", count)), + config, + ); + el_config.default_datadir = execution_node.datadir.path().to_path_buf(); + el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; + el_config.execution_endpoints = + vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + self.execution_nodes.write().push(execution_node); + } // We create the beacon node without holding the lock, so that the lock isn't held // across the await. This is only correct if this function never runs in parallel // with itself (which at the time of writing, it does not). - let index = self_1.beacon_nodes.read().len(); let beacon_node = LocalBeaconNode::production( - self.context.service_context(format!("node_{}", index)), + self.context.service_context(format!("node_{}", count)), beacon_config, ) .await?; @@ -184,6 +235,16 @@ impl LocalNetwork { .map(|body| body.unwrap().data.finalized.epoch) } + pub fn mine_pow_blocks(&self, block_number: u64) -> Result<(), String> { + let execution_nodes = self.execution_nodes.read(); + for execution_node in execution_nodes.iter() { + let mut block_gen = execution_node.server.ctx.execution_block_generator.write(); + block_gen.insert_pow_block(block_number)?; + println!("Mined pow block {}", block_number); + } + Ok(()) + } + pub async fn duration_to_genesis(&self) -> Duration { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); From e5e4e6275822533014fdb7b90c7bd4c4e5ede7d9 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 18 Jul 2022 23:15:41 +0000 Subject: [PATCH 073/184] Don't create a execution payload with same timestamp as terminal block (#3331) ## Issue Addressed Resolves #3316 ## Proposed Changes This PR fixes an issue where lighthouse created a transition block with `block.execution_payload().timestamp == terminal_block.timestamp` if the terminal block was created at the slot boundary. --- beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- .../beacon_chain/src/execution_payload.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 6 ++ beacon_node/beacon_chain/tests/merge.rs | 27 ++++++- beacon_node/execution_layer/src/engine_api.rs | 2 + beacon_node/execution_layer/src/lib.rs | 72 ++++++++++++++++--- .../test_utils/execution_block_generator.rs | 24 +++++++ .../execution_layer/src/test_utils/mod.rs | 5 +- .../src/test_rig.rs | 4 +- 9 files changed, 130 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9fb895f78f..aa719b1a6f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3908,14 +3908,13 @@ impl BeaconChain { ForkName::Base | ForkName::Altair => return Ok(()), _ => { // We are post-bellatrix - if execution_layer + if let Some(payload_attributes) = execution_layer .payload_attributes(next_slot, params.head_root) .await - .is_some() { // We are a proposer, check for terminal_pow_block_hash if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp) .await .map_err(Error::ForkchoiceUpdate)? { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 747b8a468d..5c7c3c05d8 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -393,7 +393,7 @@ where } let terminal_pow_block_hash = execution_layer - .get_terminal_pow_block_hash(spec) + .get_terminal_pow_block_hash(spec, timestamp) .await .map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1dc6f4b83b..2adae6c166 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -30,6 +30,7 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::Logger; use slot_clock::TestingSlotClock; +use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ state_advance::{complete_state_advance, partial_state_advance}, StateRootStrategy, @@ -521,6 +522,11 @@ where self.chain.head_beacon_state_cloned() } + pub fn get_timestamp_at_slot(&self) -> u64 { + let state = self.get_current_state(); + compute_timestamp_at_slot(&state, &self.spec).unwrap() + } + pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 91d5eb21ca..19e8902a3e 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -1,7 +1,7 @@ #![cfg(not(debug_assertions))] // Tests run too slow in debug. use beacon_chain::test_utils::BeaconChainHarness; -use execution_layer::test_utils::{generate_pow_block, DEFAULT_TERMINAL_BLOCK}; +use execution_layer::test_utils::{generate_pow_block, Block, DEFAULT_TERMINAL_BLOCK}; use types::*; const VALIDATOR_COUNT: usize = 32; @@ -22,6 +22,7 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { prev_ep.execution_payload.block_number + 1, ep.execution_payload.block_number ); + assert!(ep.execution_payload.timestamp > prev_ep.execution_payload.timestamp); } prev_ep = Some(ep.clone()); } @@ -169,6 +170,30 @@ async fn base_altair_merge_with_terminal_block_after_fork() { .move_to_terminal_block() .unwrap(); + // Add a slot duration to get to the next slot + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + + harness.extend_slots(1).await; + + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert_eq!( + *one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap(), + FullPayload::default() + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); + /* * Next merge block should include an exec payload. */ diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 7e04a3fac3..4f957d6387 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -106,6 +106,8 @@ pub struct ExecutionBlock { pub block_number: u64, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, } /// Representation of an exection block with enough detail to reconstruct a payload. diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 9bb4ead350..d85f9eb811 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -902,6 +902,7 @@ impl ExecutionLayer { pub async fn get_terminal_pow_block_hash( &self, spec: &ChainSpec, + timestamp: u64, ) -> Result, Error> { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, @@ -924,8 +925,19 @@ impl ExecutionLayer { } } - self.get_pow_block_hash_at_total_difficulty(engine, spec) - .await + let block = self.get_pow_block_at_total_difficulty(engine, spec).await?; + if let Some(pow_block) = block { + // If `terminal_block.timestamp == transition_block.timestamp`, + // we violate the invariant that a block's timestamp must be + // strictly greater than its parent's timestamp. + // The execution layer will reject a fcu call with such payload + // attributes leading to a missed block. + // Hence, we return `None` in such a case. + if pow_block.timestamp >= timestamp { + return Ok(None); + } + } + Ok(block.map(|b| b.block_hash)) }) .await .map_err(Box::new) @@ -953,11 +965,11 @@ impl ExecutionLayer { /// `get_pow_block_at_terminal_total_difficulty` /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md - async fn get_pow_block_hash_at_total_difficulty( + async fn get_pow_block_at_total_difficulty( &self, engine: &Engine, spec: &ChainSpec, - ) -> Result, ApiError> { + ) -> Result, ApiError> { let mut block = engine .api .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) @@ -970,7 +982,7 @@ impl ExecutionLayer { let block_reached_ttd = block.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd { if block.parent_hash == ExecutionBlockHash::zero() { - return Ok(Some(block.block_hash)); + return Ok(Some(block)); } let parent = self .get_pow_block(engine, block.parent_hash) @@ -979,7 +991,7 @@ impl ExecutionLayer { let parent_reached_ttd = parent.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd && !parent_reached_ttd { - return Ok(Some(block.block_hash)); + return Ok(Some(block)); } else { block = parent; } @@ -1197,19 +1209,54 @@ mod test { .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { el.engine().upcheck().await; - assert_eq!(el.get_terminal_pow_block_hash(&spec).await.unwrap(), None) + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), + None + ) }) .await .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { assert_eq!( - el.get_terminal_pow_block_hash(&spec).await.unwrap(), + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), Some(terminal_block.unwrap().block_hash) ) }) .await; } + #[tokio::test] + async fn rejects_terminal_block_with_equal_timestamp() { + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) + .move_to_block_prior_to_terminal_block() + .with_terminal_block(|spec, el, _| async move { + el.engine().upcheck().await; + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), + None + ) + }) + .await + .move_to_terminal_block() + .with_terminal_block(|spec, el, terminal_block| async move { + let timestamp = terminal_block.as_ref().map(|b| b.timestamp).unwrap(); + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp) + .await + .unwrap(), + None + ) + }) + .await; + } + #[tokio::test] async fn verifies_valid_terminal_block_hash() { let runtime = TestRuntime::default(); @@ -1269,3 +1316,12 @@ mod test { fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { None } + +#[cfg(test)] +/// Returns the duration since the unix epoch. +fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 7d8cdb299d..bf8ed4947a 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -60,12 +60,14 @@ impl Block { block_number: block.block_number, parent_hash: block.parent_hash, total_difficulty: block.total_difficulty, + timestamp: block.timestamp, }, Block::PoS(payload) => ExecutionBlock { block_hash: payload.block_hash, block_number: payload.block_number, parent_hash: payload.parent_hash, total_difficulty, + timestamp: payload.timestamp, }, } } @@ -100,6 +102,7 @@ pub struct PoWBlock { pub block_hash: ExecutionBlockHash, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, + pub timestamp: u64, } pub struct ExecutionBlockGenerator { @@ -266,6 +269,26 @@ impl ExecutionBlockGenerator { Ok(()) } + pub fn modify_last_block(&mut self, block_modifier: impl FnOnce(&mut Block)) { + if let Some((last_block_hash, block_number)) = + self.block_hashes.keys().max().and_then(|block_number| { + self.block_hashes + .get(block_number) + .map(|block| (block, *block_number)) + }) + { + let mut block = self.blocks.remove(last_block_hash).unwrap(); + block_modifier(&mut block); + // Update the block hash after modifying the block + match &mut block { + Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + } + self.block_hashes.insert(block_number, block.block_hash()); + self.blocks.insert(block.block_hash(), block); + } + } + pub fn get_payload(&mut self, id: &PayloadId) -> Option> { self.payload_ids.get(id).cloned() } @@ -423,6 +446,7 @@ pub fn generate_pow_block( block_hash: ExecutionBlockHash::zero(), parent_hash, total_difficulty, + timestamp: block_number, }; block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root()); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 723da25ff1..970c619a56 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -6,7 +6,7 @@ use crate::engine_api::{ }; use bytes::Bytes; use environment::null_logger; -use execution_block_generator::{Block, PoWBlock}; +use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; @@ -21,7 +21,7 @@ use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; -pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; +pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; @@ -334,6 +334,7 @@ impl MockServer { block_hash, parent_hash, total_difficulty, + timestamp: block_number, }); self.ctx diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index ee5e9cf2cc..9c09ec8d96 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -219,7 +219,7 @@ impl TestRig { let terminal_pow_block_hash = self .ee_a .execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, timestamp_now()) .await .unwrap() .unwrap(); @@ -228,7 +228,7 @@ impl TestRig { terminal_pow_block_hash, self.ee_b .execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, timestamp_now()) .await .unwrap() .unwrap() From 7dbc59efebb277aa52996b74f18187218b2056ac Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 19 Jul 2022 05:48:05 +0000 Subject: [PATCH 074/184] Share `reqwest::Client` between validators when using Web3Signer (#3335) ## Issue Addressed #3302 ## Proposed Changes Move the `reqwest::Client` from being initialized per-validator, to being initialized per distinct Web3Signer. This is done by placing the `Client` into a `HashMap` keyed by the definition of the Web3Signer as specified by the `ValidatorDefintion`. This will allow multiple Web3Signers to be used with a single VC and also maintains backwards compatibility. ## Additional Info This was done to reduce the memory used by the VC when connecting to a Web3Signer. I set up a local testnet using [a custom script](https://github.com/macladson/lighthouse/tree/web3signer-local-test/scripts/local_testnet_web3signer) and ran a VC with 200 validator keys: VC with Web3Signer: - `unstable`: ~200MB - With fix: ~50MB VC with Local Signer: - `unstable`: ~35MB - With fix: ~35MB > I'm seeing some fragmentation with the VC using the Web3Signer, but not when using a local signer (this is most likely due to making lots of http requests and dealing with lots of JSON objects). I tested the above using `MALLOC_ARENA_MAX=1` to try to reduce the fragmentation. Without it, the values are around +50MB for both `unstable` and the fix. --- .../src/validator_definitions.rs | 45 ++++---- testing/web3signer_tests/src/lib.rs | 6 +- validator_client/src/http_api/mod.rs | 19 +-- validator_client/src/http_api/remotekeys.rs | 20 ++-- .../src/initialized_validators.rs | 109 ++++++++++++------ 5 files changed, 124 insertions(+), 75 deletions(-) diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 3f4831ae17..e68737e259 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -45,6 +45,29 @@ pub enum Error { UnableToCreateValidatorDir(PathBuf), } +#[derive(Clone, PartialEq, Serialize, Deserialize, Hash, Eq)] +pub struct Web3SignerDefinition { + pub url: String, + /// Path to a .pem file. + #[serde(skip_serializing_if = "Option::is_none")] + pub root_certificate_path: Option, + /// Specifies a request timeout. + /// + /// The timeout is applied from when the request starts connecting until the response body has finished. + #[serde(skip_serializing_if = "Option::is_none")] + pub request_timeout_ms: Option, + + /// Path to a PKCS12 file. + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_path: Option, + + /// Password for the PKCS12 file. + /// + /// An empty password will be used if this is omitted. + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_password: Option, +} + /// Defines how the validator client should attempt to sign messages for this validator. #[derive(Clone, PartialEq, Serialize, Deserialize)] #[serde(tag = "type")] @@ -62,27 +85,7 @@ pub enum SigningDefinition { /// /// https://github.com/ConsenSys/web3signer #[serde(rename = "web3signer")] - Web3Signer { - url: String, - /// Path to a .pem file. - #[serde(skip_serializing_if = "Option::is_none")] - root_certificate_path: Option, - /// Specifies a request timeout. - /// - /// The timeout is applied from when the request starts connecting until the response body has finished. - #[serde(skip_serializing_if = "Option::is_none")] - request_timeout_ms: Option, - - /// Path to a PKCS12 file. - #[serde(skip_serializing_if = "Option::is_none")] - client_identity_path: Option, - - /// Password for the PKCS12 file. - /// - /// An empty password will be used if this is omitted. - #[serde(skip_serializing_if = "Option::is_none")] - client_identity_password: Option, - }, + Web3Signer(Web3SignerDefinition), } impl SigningDefinition { diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index eb307290c2..bdee18026b 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -15,7 +15,7 @@ #[cfg(all(test, unix, not(debug_assertions)))] mod tests { use account_utils::validator_definitions::{ - SigningDefinition, ValidatorDefinition, ValidatorDefinitions, + SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; @@ -376,13 +376,13 @@ mod tests { graffiti: None, suggested_fee_recipient: None, description: String::default(), - signing_definition: SigningDefinition::Web3Signer { + signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url: signer_rig.url.to_string(), root_certificate_path: Some(root_certificate_path()), request_timeout_ms: None, client_identity_path: Some(client_identity_path()), client_identity_password: Some(client_identity_password()), - }, + }), }; ValidatorStoreRig::new(vec![validator_definition], spec).await }; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 56218cd81b..07e7b1e13f 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -7,7 +7,7 @@ mod tests; use crate::ValidatorStore; use account_utils::{ mnemonic_from_phrase, - validator_definitions::{SigningDefinition, ValidatorDefinition}, + validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, }; pub use api_secret::ApiSecret; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; @@ -470,13 +470,16 @@ pub fn serve( graffiti: web3signer.graffiti, suggested_fee_recipient: web3signer.suggested_fee_recipient, description: web3signer.description, - signing_definition: SigningDefinition::Web3Signer { - url: web3signer.url, - root_certificate_path: web3signer.root_certificate_path, - request_timeout_ms: web3signer.request_timeout_ms, - client_identity_path: web3signer.client_identity_path, - client_identity_password: web3signer.client_identity_password, - }, + signing_definition: SigningDefinition::Web3Signer( + Web3SignerDefinition { + url: web3signer.url, + root_certificate_path: web3signer.root_certificate_path, + request_timeout_ms: web3signer.request_timeout_ms, + client_identity_path: web3signer.client_identity_path, + client_identity_password: web3signer + .client_identity_password, + }, + ), }) .collect(); handle.block_on(create_validators_web3signer( diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs index 402396d4b4..57b7527e2b 100644 --- a/validator_client/src/http_api/remotekeys.rs +++ b/validator_client/src/http_api/remotekeys.rs @@ -1,6 +1,8 @@ //! Implementation of the standard remotekey management API. use crate::{initialized_validators::Error, InitializedValidators, ValidatorStore}; -use account_utils::validator_definitions::{SigningDefinition, ValidatorDefinition}; +use account_utils::validator_definitions::{ + SigningDefinition, ValidatorDefinition, Web3SignerDefinition, +}; use eth2::lighthouse_vc::std_types::{ DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, @@ -31,11 +33,13 @@ pub fn list( match &def.signing_definition { SigningDefinition::LocalKeystore { .. } => None, - SigningDefinition::Web3Signer { url, .. } => Some(SingleListRemotekeysResponse { - pubkey: validating_pubkey, - url: url.clone(), - readonly: false, - }), + SigningDefinition::Web3Signer(Web3SignerDefinition { url, .. }) => { + Some(SingleListRemotekeysResponse { + pubkey: validating_pubkey, + url: url.clone(), + readonly: false, + }) + } } }) .collect::>(); @@ -120,13 +124,13 @@ fn import_single_remotekey( graffiti: None, suggested_fee_recipient: None, description: String::from("Added by remotekey API"), - signing_definition: SigningDefinition::Web3Signer { + signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url, root_certificate_path: None, request_timeout_ms: None, client_identity_path: None, client_identity_password: None, - }, + }), }; handle .block_on(validator_store.add_validator(web3signer_validator)) diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index a0fe6dfe2a..8069bfcab8 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -10,7 +10,8 @@ use crate::signing_method::SigningMethod; use account_utils::{ read_password, read_password_from_user, validator_definitions::{ - self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, + self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, + CONFIG_FILENAME, }, ZeroizeString, }; @@ -155,6 +156,7 @@ impl InitializedValidator { def: ValidatorDefinition, key_cache: &mut KeyCache, key_stores: &mut HashMap, + web3_signer_client_map: &mut Option>, ) -> Result { if !def.enabled { return Err(Error::UnableToInitializeDisabledValidator); @@ -239,46 +241,45 @@ impl InitializedValidator { voting_keypair: Arc::new(voting_keypair), } } - SigningDefinition::Web3Signer { - url, - root_certificate_path, - request_timeout_ms, - client_identity_path, - client_identity_password, - } => { - let signing_url = build_web3_signer_url(&url, &def.voting_public_key) + SigningDefinition::Web3Signer(web3_signer) => { + let signing_url = build_web3_signer_url(&web3_signer.url, &def.voting_public_key) .map_err(|e| Error::InvalidWeb3SignerUrl(e.to_string()))?; - let request_timeout = request_timeout_ms + + let request_timeout = web3_signer + .request_timeout_ms .map(Duration::from_millis) .unwrap_or(DEFAULT_REMOTE_SIGNER_REQUEST_TIMEOUT); - let builder = Client::builder().timeout(request_timeout); - - let builder = if let Some(path) = root_certificate_path { - let certificate = load_pem_certificate(path)?; - builder.add_root_certificate(certificate) - } else { - builder - }; - - let builder = if let Some(path) = client_identity_path { - let identity = load_pkcs12_identity( - path, - &client_identity_password - .ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, - )?; - builder.identity(identity) - } else { - if client_identity_password.is_some() { - return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + // Check if a client has already been initialized for this remote signer url. + let http_client = if let Some(client_map) = web3_signer_client_map { + match client_map.get(&web3_signer) { + Some(client) => client.clone(), + None => { + let client = build_web3_signer_client( + web3_signer.root_certificate_path.clone(), + web3_signer.client_identity_path.clone(), + web3_signer.client_identity_password.clone(), + request_timeout, + )?; + client_map.insert(web3_signer, client.clone()); + client + } } - builder + } else { + // There are no clients in the map. + let mut new_web3_signer_client_map: HashMap = + HashMap::new(); + let client = build_web3_signer_client( + web3_signer.root_certificate_path.clone(), + web3_signer.client_identity_path.clone(), + web3_signer.client_identity_password.clone(), + request_timeout, + )?; + new_web3_signer_client_map.insert(web3_signer, client.clone()); + *web3_signer_client_map = Some(new_web3_signer_client_map); + client }; - let http_client = builder - .build() - .map_err(Error::UnableToBuildWeb3SignerClient)?; - SigningMethod::Web3Signer { signing_url, http_client, @@ -332,6 +333,39 @@ fn build_web3_signer_url(base_url: &str, voting_public_key: &PublicKey) -> Resul Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } +fn build_web3_signer_client( + root_certificate_path: Option, + client_identity_path: Option, + client_identity_password: Option, + request_timeout: Duration, +) -> Result { + let builder = Client::builder().timeout(request_timeout); + + let builder = if let Some(path) = root_certificate_path { + let certificate = load_pem_certificate(path)?; + builder.add_root_certificate(certificate) + } else { + builder + }; + + let builder = if let Some(path) = client_identity_path { + let identity = load_pkcs12_identity( + path, + &client_identity_password.ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, + )?; + builder.identity(identity) + } else { + if client_identity_password.is_some() { + return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + } + builder + }; + + builder + .build() + .map_err(Error::UnableToBuildWeb3SignerClient) +} + /// Try to unlock `keystore` at `keystore_path` by prompting the user via `stdin`. fn unlock_keystore_via_stdin_password( keystore: &Keystore, @@ -382,6 +416,8 @@ pub struct InitializedValidators { validators_dir: PathBuf, /// The canonical set of validators. validators: HashMap, + /// The clients used for communications with a remote signer. + web3_signer_client_map: Option>, /// For logging via `slog`. log: Logger, } @@ -397,6 +433,7 @@ impl InitializedValidators { validators_dir, definitions, validators: HashMap::default(), + web3_signer_client_map: None, log, }; this.update_validators().await?; @@ -826,6 +863,7 @@ impl InitializedValidators { def.clone(), &mut key_cache, &mut key_stores, + &mut None, ) .await { @@ -870,11 +908,12 @@ impl InitializedValidators { } } } - SigningDefinition::Web3Signer { .. } => { + SigningDefinition::Web3Signer(Web3SignerDefinition { .. }) => { match InitializedValidator::from_definition( def.clone(), &mut key_cache, &mut key_stores, + &mut self.web3_signer_client_map, ) .await { From 822c30da66af22c5d6fa402b899ae4b9b4eb5ce4 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 20 Jul 2022 18:18:25 +0000 Subject: [PATCH 075/184] docker rust version update (#3353) ## Issue Addressed The lcli and antithesis docker builds are failing in unstable so bumping all the versions here Co-authored-by: realbigsean --- Dockerfile | 2 +- lcli/Dockerfile | 2 +- testing/antithesis/Dockerfile.libvoidstar | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6732c7eaf8..86a69c6539 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.62.0-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 255f96eec1..2a0e5a9d47 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake COPY . lighthouse ARG PORTABLE diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 61b95397d7..81a1beea4a 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,4 +1,4 @@ -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse From fabe50abe74aa8f139b02b98d53612385f806a8f Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 20 Jul 2022 18:18:26 +0000 Subject: [PATCH 076/184] debug tests rust version (#3354) ## Issue Addressed Which issue # does this PR address? ## Proposed Changes Please list or describe the changes introduced by this PR. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. Co-authored-by: realbigsean --- .github/workflows/test-suite.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 6458af6e79..f26eadc398 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -104,7 +104,7 @@ jobs: run: make test-op-pool debug-tests-ubuntu: name: debug-tests-ubuntu - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: cargo-fmt steps: - uses: actions/checkout@v1 From 6d8dfc9eee034367bfe792737742c09fa71bcba0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 20 Jul 2022 20:59:36 +0000 Subject: [PATCH 077/184] Add TTD and Bellatrix epoch for Prater (#3345) ## Issue Addressed NA ## Proposed Changes Adds the TTD and Bellatrix values for Prater, as per https://github.com/eth-clients/eth2-networks/pull/77. ## Additional Info - ~~Blocked on https://github.com/eth-clients/eth2-networks/pull/77~~ --- .../built_in_network_configs/prater/config.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index d337c4120a..d173be20de 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -6,8 +6,7 @@ PRESET_BASE: 'mainnet' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +TERMINAL_TOTAL_DIFFICULTY: 10790000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +34,7 @@ ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 # Merge BELLATRIX_FORK_VERSION: 0x02001020 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 112260 # Sharding SHARDING_FORK_VERSION: 0x03001020 SHARDING_FORK_EPOCH: 18446744073709551615 From 7c3ff903ca921d7b61945eb3007e7f347c9f8272 Mon Sep 17 00:00:00 2001 From: ethDreamer Date: Wed, 20 Jul 2022 20:59:38 +0000 Subject: [PATCH 078/184] Fix Gossip Penalties During Optimistic Sync Window (#3350) ## Issue Addressed * #3344 ## Proposed Changes There are a number of cases during block processing where we might get an `ExecutionPayloadError` but we shouldn't penalize peers. We were forgetting to enumerate all of the non-penalizing errors in every single match statement where we are making that decision. I created a function to make it explicit when we should and should not penalize peers and I used that function in all places where this logic is needed. This way we won't make the same mistake if we add another variant of `ExecutionPayloadError` in the future. --- .../beacon_chain/src/block_verification.rs | 23 +++++++++++++++---- .../beacon_processor/worker/gossip_methods.rs | 12 +++------- .../network/src/sync/block_lookups/mod.rs | 20 +++++----------- 3 files changed, 28 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a64fb387e3..c8341cd60b 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -335,17 +335,32 @@ pub enum ExecutionPayloadError { terminal_block_hash: ExecutionBlockHash, payload_parent_hash: ExecutionBlockHash, }, - /// The execution node failed to provide a parent block to a known block. This indicates an - /// issue with the execution node. + /// The execution node is syncing but we fail the conditions for optimistic sync /// /// ## Peer scoring /// /// The peer is not necessarily invalid. - PoWParentMissing(ExecutionBlockHash), - /// The execution node is syncing but we fail the conditions for optimistic sync UnverifiedNonOptimisticCandidate, } +impl ExecutionPayloadError { + pub fn penalize_peer(&self) -> bool { + // This match statement should never have a default case so that we are + // always forced to consider here whether or not to penalize a peer when + // we add a new error condition. + match self { + ExecutionPayloadError::NoExecutionConnection => false, + ExecutionPayloadError::RequestFailed(_) => false, + ExecutionPayloadError::RejectedByExecutionEngine { .. } => true, + ExecutionPayloadError::InvalidPayloadTimestamp { .. } => true, + ExecutionPayloadError::InvalidTerminalPoWBlock { .. } => true, + ExecutionPayloadError::InvalidActivationEpoch { .. } => true, + ExecutionPayloadError::InvalidTerminalBlockHash { .. } => true, + ExecutionPayloadError::UnverifiedNonOptimisticCandidate => false, + } + } +} + impl From for ExecutionPayloadError { fn from(e: execution_layer::Error) -> Self { ExecutionPayloadError::RequestFailed(e) diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 2dc02a31b3..b88b58b8bf 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -6,8 +6,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, ForkChoiceError, - GossipVerifiedBlock, + BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; @@ -776,9 +775,7 @@ impl Worker { return None; } // TODO(merge): reconsider peer scoring for this event. - Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) - | Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::UnverifiedNonOptimisticCandidate)) - | Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection)) => { + Err(ref e @BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -951,10 +948,7 @@ impl Worker { ); self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); } - Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) - | Err( - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection), - ) => { + Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( self.log, "Failed to verify execution payload"; diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 49e1eb290f..2aa4acdb5a 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,7 +1,7 @@ use std::collections::hash_map::Entry; use std::time::Duration; -use beacon_chain::{BeaconChainTypes, BlockError, ExecutionPayloadError}; +use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; @@ -435,17 +435,12 @@ impl BlockLookups { BlockError::ParentUnknown(block) => { self.search_parent(block, peer_id, cx); } - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed( - _, - )) - | e @ BlockError::ExecutionPayloadError( - ExecutionPayloadError::NoExecutionConnection, - ) => { + ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline // and failed to validate the execution payload. Do not downscore peer. debug!( self.log, - "Single block lookup failed. Execution layer is offline"; + "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; "root" => %root, "error" => ?e ); @@ -549,12 +544,9 @@ impl BlockLookups { } } } - BlockProcessResult::Err( - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_)), - ) - | BlockProcessResult::Err( - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection), - ) => { + ref e @ BlockProcessResult::Err(BlockError::ExecutionPayloadError(ref epe)) + if !epe.penalize_peer() => + { // These errors indicate that the execution layer is offline // and failed to validate the execution payload. Do not downscore peer. debug!( From 5b5cf9cfaa6efb740135639b87801f03356dcf4f Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 20 Jul 2022 23:16:54 +0000 Subject: [PATCH 079/184] Log ttd (#3339) ## Issue Addressed Resolves #3249 ## Proposed Changes Log merge related parameters and EE status in the beacon notifier before the merge. Co-authored-by: Paul Hauner --- beacon_node/beacon_chain/src/lib.rs | 1 + .../beacon_chain/src/merge_readiness.rs | 169 ++++++++++++++++++ beacon_node/client/src/notifier.rs | 99 +++++++++- beacon_node/execution_layer/src/lib.rs | 33 ++++ .../src/test_utils/handle_rpc.rs | 9 + 5 files changed, 305 insertions(+), 6 deletions(-) create mode 100644 beacon_node/beacon_chain/src/merge_readiness.rs diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index b82b690d20..b54964aa32 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -20,6 +20,7 @@ pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod merge_readiness; mod metrics; pub mod migrate; mod naive_aggregation_pool; diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs new file mode 100644 index 0000000000..be158ecbe1 --- /dev/null +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -0,0 +1,169 @@ +//! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge +//! transition. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::Error as EngineError; +use std::fmt; +use std::fmt::Write; +use types::*; + +/// The time before the Bellatrix fork when we will start issuing warnings about preparation. +const SECONDS_IN_A_WEEK: u64 = 604800; +pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK; + +#[derive(Default, Debug)] +pub struct MergeConfig { + pub terminal_total_difficulty: Option, + pub terminal_block_hash: Option, + pub terminal_block_hash_epoch: Option, +} + +impl fmt::Display for MergeConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.terminal_block_hash.is_none() + && self.terminal_block_hash_epoch.is_none() + && self.terminal_total_difficulty.is_none() + { + return write!( + f, + "Merge terminal difficulty parameters not configured, check your config" + ); + } + let mut display_string = String::new(); + if let Some(terminal_total_difficulty) = self.terminal_total_difficulty { + write!( + display_string, + "terminal_total_difficulty: {},", + terminal_total_difficulty + )?; + } + if let Some(terminal_block_hash) = self.terminal_block_hash { + write!( + display_string, + "terminal_block_hash: {},", + terminal_block_hash + )?; + } + if let Some(terminal_block_hash_epoch) = self.terminal_block_hash_epoch { + write!( + display_string, + "terminal_block_hash_epoch: {},", + terminal_block_hash_epoch + )?; + } + write!(f, "{}", display_string.trim_end_matches(','))?; + Ok(()) + } +} +impl MergeConfig { + /// Instantiate `self` from the values in a `ChainSpec`. + pub fn from_chainspec(spec: &ChainSpec) -> Self { + let mut params = MergeConfig::default(); + if spec.terminal_total_difficulty != Uint256::max_value() { + params.terminal_total_difficulty = Some(spec.terminal_total_difficulty); + } + if spec.terminal_block_hash != ExecutionBlockHash::zero() { + params.terminal_block_hash = Some(spec.terminal_block_hash); + } + if spec.terminal_block_hash_activation_epoch != Epoch::max_value() { + params.terminal_block_hash_epoch = Some(spec.terminal_block_hash_activation_epoch); + } + params + } +} + +/// Indicates if a node is ready for the Bellatrix upgrade and subsequent merge transition. +pub enum MergeReadiness { + /// The node is ready, as far as we can tell. + Ready { + config: MergeConfig, + current_difficulty: Result, + }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeTransitionConfigurationFailed(EngineError), + /// The EL can be reached and has the correct configuration, however it's not yet synced. + NotSynced, + /// The user has not configured this node to use an execution endpoint. + NoExecutionEndpoint, +} + +impl fmt::Display for MergeReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MergeReadiness::Ready { + config: params, + current_difficulty, + } => { + write!( + f, + "This node appears ready for the merge. \ + Params: {}, current_difficulty: {:?}", + params, current_difficulty + ) + } + MergeReadiness::ExchangeTransitionConfigurationFailed(e) => write!( + f, + "Could not confirm the transition configuration with the \ + execution endpoint: {:?}", + e + ), + MergeReadiness::NotSynced => write!( + f, + "The execution endpoint is connected and configured, \ + however it is not yet synced" + ), + MergeReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement for the merge" + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if the Bellatrix fork has occurred or will occur within + /// `MERGE_READINESS_PREPARATION_SECONDS`. + pub fn is_time_to_prepare_for_bellatrix(&self, current_slot: Slot) -> bool { + if let Some(bellatrix_epoch) = self.spec.bellatrix_fork_epoch { + let bellatrix_slot = bellatrix_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let merge_readiness_preparation_slots = + MERGE_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + + // Return `true` if Bellatrix has happened or is within the preparation time. + current_slot + merge_readiness_preparation_slots > bellatrix_slot + } else { + // The Bellatrix fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for the merge. + pub async fn check_merge_readiness(&self) -> MergeReadiness { + if let Some(el) = self.execution_layer.as_ref() { + if let Err(e) = el.exchange_transition_configuration(&self.spec).await { + // The EL was either unreachable, responded with an error or has a different + // configuration. + return MergeReadiness::ExchangeTransitionConfigurationFailed(e); + } + + if !el.is_synced_for_notifier().await { + // The EL is not synced. + return MergeReadiness::NotSynced; + } + let params = MergeConfig::from_chainspec(&self.spec); + let current_difficulty = el + .get_current_difficulty() + .await + .map_err(|_| "Failed to get current difficulty from execution node".to_string()); + MergeReadiness::Ready { + config: params, + current_difficulty, + } + } else { + // There is no EL configured. + MergeReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 9476819a4b..53478971af 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,13 +1,16 @@ use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainTypes, ExecutionStatus}; +use beacon_chain::{ + merge_readiness::{MergeConfig, MergeReadiness}, + BeaconChain, BeaconChainTypes, ExecutionStatus, +}; use lighthouse_network::{types::SyncState, NetworkGlobals}; -use parking_lot::Mutex; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; +use tokio::sync::Mutex; use tokio::time::sleep; -use types::{EthSpec, Slot}; +use types::*; /// Create a warning log whenever the peer count is at or below this value. pub const WARN_PEER_COUNT: usize = 1; @@ -77,6 +80,7 @@ pub fn spawn_notifier( // Perform post-genesis logging. let mut last_backfill_log_slot = None; + loop { interval.tick().await; let connected_peer_count = network.connected_peers(); @@ -87,12 +91,12 @@ pub fn spawn_notifier( match (current_sync_state, &sync_state) { (_, SyncState::BackFillSyncing { .. }) => { // We have transitioned to a backfill sync. Reset the speedo. - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; speedo.clear(); } (SyncState::BackFillSyncing { .. }, _) => { // We have transitioned from a backfill sync, reset the speedo - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; speedo.clear(); } (_, _) => {} @@ -125,7 +129,7 @@ pub fn spawn_notifier( // progress. let mut sync_distance = current_slot - head_slot; - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; match current_sync_state { SyncState::BackFillSyncing { .. } => { // Observe backfilling sync info. @@ -306,6 +310,7 @@ pub fn spawn_notifier( } eth1_logging(&beacon_chain, &log); + merge_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -315,6 +320,88 @@ pub fn spawn_notifier( Ok(()) } +/// Provides some helpful logging to users to indicate if their node is ready for the Bellatrix +/// fork and subsequent merge transition. +async fn merge_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let merge_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| { + payload.parent_hash() != ExecutionBlockHash::zero() + }); + + if merge_completed || !beacon_chain.is_time_to_prepare_for_bellatrix(current_slot) { + return; + } + + match beacon_chain.check_merge_readiness().await { + MergeReadiness::Ready { + config, + current_difficulty, + } => match config { + MergeConfig { + terminal_total_difficulty: Some(ttd), + terminal_block_hash: None, + terminal_block_hash_epoch: None, + } => { + info!( + log, + "Ready for the merge"; + "terminal_total_difficulty" => %ttd, + "current_difficulty" => current_difficulty + .map(|d| d.to_string()) + .unwrap_or_else(|_| "??".into()), + ) + } + MergeConfig { + terminal_total_difficulty: _, + terminal_block_hash: Some(terminal_block_hash), + terminal_block_hash_epoch: Some(terminal_block_hash_epoch), + } => { + info!( + log, + "Ready for the merge"; + "info" => "you are using override parameters, please ensure that you \ + understand these parameters and their implications.", + "terminal_block_hash" => ?terminal_block_hash, + "terminal_block_hash_epoch" => ?terminal_block_hash_epoch, + ) + } + other => error!( + log, + "Inconsistent merge configuration"; + "config" => ?other + ), + }, + readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed(_) => { + error!( + log, + "Not ready for merge"; + "info" => %readiness, + ) + } + readiness @ MergeReadiness::NotSynced => warn!( + log, + "Not ready for merge"; + "info" => %readiness, + ), + readiness @ MergeReadiness::NoExecutionEndpoint => warn!( + log, + "Not ready for merge"; + "info" => %readiness, + ), + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d85f9eb811..4ab38cb3ab 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -234,6 +234,16 @@ impl ExecutionLayer { &self.inner.executor } + /// Get the current difficulty of the PoW chain. + pub async fn get_current_difficulty(&self) -> Result { + let block = self + .engine() + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await? + .ok_or(ApiError::ExecutionHeadBlockNotFound)?; + Ok(block.total_difficulty) + } /// Note: this function returns a mutex guard, be careful to avoid deadlocks. async fn execution_blocks( &self, @@ -355,6 +365,29 @@ impl ExecutionLayer { self.engine().is_synced().await } + /// Execution nodes return a "SYNCED" response when they do not have any peers. + /// + /// This function is a wrapper over `Self::is_synced` that makes an additional + /// check for the execution layer sync status. Checks if the latest block has + /// a `block_number != 0`. + /// Returns the `Self::is_synced` response if unable to get latest block. + pub async fn is_synced_for_notifier(&self) -> bool { + let synced = self.is_synced().await; + if synced { + if let Ok(Some(block)) = self + .engine() + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await + { + if block.block_number == 0 { + return false; + } + } + } + synced + } + /// Updates the proposer preparation data provided by validators pub async fn update_proposer_preparation( &self, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 5e0e0591cd..eceb50df23 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -132,6 +132,15 @@ pub async fn handle_rpc( Ok(serde_json::to_value(response).unwrap()) } + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => { + let block_generator = ctx.execution_block_generator.read(); + let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 { + terminal_total_difficulty: block_generator.terminal_total_difficulty, + terminal_block_hash: block_generator.terminal_block_hash, + terminal_block_number: block_generator.terminal_block_number, + }; + Ok(serde_json::to_value(transition_config).unwrap()) + } other => Err(format!( "The method {} does not exist/is not available", other From 6a0e9d4353124ea5be5b2fe02b5aba3d1458ec83 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 20 Jul 2022 23:16:56 +0000 Subject: [PATCH 080/184] Add Goerli `--network` flag as duplicate of Prater: Option A (#3346) ## Issue Addressed - Resolves #3338 ## Proposed Changes This PR adds a new `--network goerli` flag that reuses the [Prater network configs](https://github.com/sigp/lighthouse/tree/stable/common/eth2_network_config/built_in_network_configs/prater). As you'll see in #3338, there are several approaches to the problem of the Goerli/Prater alias. This approach achieves: 1. No duplication of the genesis state between Goerli and Prater. - Upside: the genesis state for Prater is ~17mb, duplication would increase the size of the binary by that much. 2. When the user supplies `--network goerli`, they will get a datadir in `~/.lighthouse/goerli`. - Upside: our docs stay correct when they declare a datadir is located at `~/.lighthouse/{network}` - Downside: switching from `--network prater` to `--network goerli` will require some manual migration. 3. When using `--network goerli`, the [`config/spec`](https://ethereum.github.io/beacon-APIs/#/Config/getSpec) endpoint will return a [`CONFIG_NAME`](https://github.com/ethereum/consensus-specs/blob/02a2b71d64fcf5023a8bd890dabce774a6e9802e/configs/mainnet.yaml#L11) of "prater". - Upside: VC running `--network prater` will still think it's on the same network as one using `--network goerli`. - Downside: potentially confusing. #3348 achieves the same goal as this PR with a different approach and set of trade-offs. ## Additional Info ### Notes for reviewers: In https://github.com/sigp/lighthouse/commit/e4896c268217e501ab581ce857d526572b235b91 you'll see that I remove the `$name_str` by just using `stringify!($name_ident)` instead. This is a simplification that should have have been there in the first place. Then, in https://github.com/sigp/lighthouse/commit/90b5e22fca366c1db741c6d8f02902d9e375279f I reclaim that second parameter with a new purpose; to specify the directory from which to load configs. --- common/eth2_config/src/lib.rs | 102 +++++++++++++++++++++----- common/eth2_network_config/src/lib.rs | 9 ++- 2 files changed, 93 insertions(+), 18 deletions(-) diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 9cea725865..7e3c025a83 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -69,7 +69,7 @@ impl Eth2Config { #[derive(Copy, Clone, Debug, PartialEq)] pub struct Eth2NetArchiveAndDirectory<'a> { pub name: &'a str, - pub unique_id: &'a str, + pub config_dir: &'a str, pub genesis_is_known: bool, } @@ -81,7 +81,7 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> { .parse::() .expect("should parse manifest dir as path") .join(PREDEFINED_NETWORKS_DIR) - .join(self.unique_id) + .join(self.config_dir) } pub fn genesis_state_archive(&self) -> PathBuf { @@ -96,6 +96,7 @@ const GENESIS_STATE_IS_KNOWN: bool = true; #[derive(Copy, Clone, Debug, PartialEq)] pub struct HardcodedNet { pub name: &'static str, + pub config_dir: &'static str, pub genesis_is_known: bool, pub config: &'static [u8], pub deploy_block: &'static [u8], @@ -108,15 +109,15 @@ pub struct HardcodedNet { /// It also defines a `include__file!` macro which provides a wrapper around /// `std::include_bytes`, allowing the inclusion of bytes from the specific testnet directory. macro_rules! define_archive { - ($name_ident: ident, $name_str: tt, $genesis_is_known: ident) => { + ($name_ident: ident, $config_dir: tt, $genesis_is_known: ident) => { paste! { #[macro_use] pub mod $name_ident { use super::*; pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory { - name: $name_str, - unique_id: $name_str, + name: stringify!($name_ident), + config_dir: $config_dir, genesis_is_known: $genesis_is_known, }; @@ -130,7 +131,7 @@ macro_rules! define_archive { "/", $this_crate::predefined_networks_dir!(), "/", - $name_str, + $config_dir, "/", $filename )) @@ -149,6 +150,7 @@ macro_rules! define_net { $this_crate::HardcodedNet { name: ETH2_NET_DIR.name, + config_dir: ETH2_NET_DIR.config_dir, genesis_is_known: ETH2_NET_DIR.genesis_is_known, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), @@ -164,13 +166,13 @@ macro_rules! define_net { /// - `HARDCODED_NET_NAMES`: a list of the *names* of the networks defined by this macro. #[macro_export] macro_rules! define_nets { - ($this_crate: ident, $($name_ident: ident, $name_str: tt,)+) => { + ($this_crate: ident, $($name_ident: ident,)+) => { $this_crate::paste! { $( const [<$name_ident:upper>]: $this_crate::HardcodedNet = $this_crate::define_net!($this_crate, $name_ident, [<include_ $name_ident _file>]); )+ const HARDCODED_NETS: &[$this_crate::HardcodedNet] = &[$([<$name_ident:upper>],)+]; - pub const HARDCODED_NET_NAMES: &[&'static str] = &[$($name_str,)+]; + pub const HARDCODED_NET_NAMES: &[&'static str] = &[$(stringify!($name_ident),)+]; } }; } @@ -197,9 +199,9 @@ macro_rules! define_nets { /// `build.rs` which will unzip the genesis states. Then, that `eth2_network_configs` crate can /// perform the final step of using `std::include_bytes` to bake the files (bytes) into the binary. macro_rules! define_hardcoded_nets { - ($(($name_ident: ident, $name_str: tt, $genesis_is_known: ident)),+) => { + ($(($name_ident: ident, $config_dir: tt, $genesis_is_known: ident)),+) => { $( - define_archive!($name_ident, $name_str, $genesis_is_known); + define_archive!($name_ident, $config_dir, $genesis_is_known); )+ pub const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[$($name_ident::ETH2_NET_DIR,)+]; @@ -213,7 +215,7 @@ macro_rules! define_hardcoded_nets { #[macro_export] macro_rules! instantiate_hardcoded_nets { ($this_crate: ident) => { - $this_crate::define_nets!($this_crate, $($name_ident, $name_str,)+); + $this_crate::define_nets!($this_crate, $($name_ident,)+); } } }; @@ -234,10 +236,76 @@ macro_rules! define_hardcoded_nets { // // The directory containing the testnet files should match the human-friendly name (element 1). define_hardcoded_nets!( - (mainnet, "mainnet", GENESIS_STATE_IS_KNOWN), - (prater, "prater", GENESIS_STATE_IS_KNOWN), - (gnosis, "gnosis", GENESIS_STATE_IS_KNOWN), - (kiln, "kiln", GENESIS_STATE_IS_KNOWN), - (ropsten, "ropsten", GENESIS_STATE_IS_KNOWN), - (sepolia, "sepolia", GENESIS_STATE_IS_KNOWN) + ( + // Network name (must be unique among all networks). + mainnet, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "mainnet", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + prater, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "prater", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + goerli, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + // + // The Goerli network is effectively an alias to Prater. + "prater", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + gnosis, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "gnosis", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + kiln, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "kiln", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + ropsten, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "ropsten", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + sepolia, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "sepolia", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ) ); diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 8df54a5a8b..2bfd003266 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -256,6 +256,13 @@ mod tests { config.beacon_state::<E>().expect("beacon state can decode"); } + #[test] + fn prater_and_goerli_are_equal() { + let goerli = Eth2NetworkConfig::from_hardcoded_net(&GOERLI).unwrap(); + let prater = Eth2NetworkConfig::from_hardcoded_net(&PRATER).unwrap(); + assert_eq!(goerli, prater); + } + #[test] fn hard_coded_nets_work() { for net in HARDCODED_NETS { @@ -275,7 +282,7 @@ mod tests { "{:?}", net.name ); - assert_eq!(config.config.config_name, Some(net.name.to_string())); + assert_eq!(config.config.config_name, Some(net.config_dir.to_string())); } } From e32868458f56a4754c6c63ec5835829c80b5768c Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Thu, 21 Jul 2022 05:45:37 +0000 Subject: [PATCH 081/184] Set safe block hash to justified (#3347) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/3189. ## Proposed Changes - Always supply the justified block hash as the `safe_block_hash` when calling `forkchoiceUpdated` on the execution engine. - Refactor the `get_payload` routine to use the new `ForkchoiceUpdateParameters` struct rather than just the `finalized_block_hash`. I think this is a nice simplification and that the old way of computing the `finalized_block_hash` was unnecessary, but if anyone sees reason to keep that approach LMK. --- Cargo.lock | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 27 ++++-- .../beacon_chain/src/canonical_head.rs | 7 ++ .../beacon_chain/src/execution_payload.rs | 37 ++------ beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/lib.rs | 93 ++++++++++--------- .../test_utils/execution_block_generator.rs | 4 +- .../src/test_utils/mock_execution_layer.rs | 13 ++- consensus/fork_choice/src/fork_choice.rs | 8 ++ .../execution_engine_integration/Cargo.toml | 1 + .../src/test_rig.rs | 57 ++++++++++-- 12 files changed, 156 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1277ed1d0..1c2d69ceed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1932,6 +1932,7 @@ dependencies = [ "ethers-providers", "execution_layer", "exit-future", + "fork_choice", "futures", "hex", "reqwest", @@ -1958,6 +1959,7 @@ dependencies = [ "eth2_ssz_types", "ethers-core", "exit-future", + "fork_choice", "futures", "hex", "jsonwebtoken", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index aa719b1a6f..78e846e74a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3256,14 +3256,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, BeaconState::Merge(_) => { - let finalized_checkpoint = self.canonical_head.cached_head().finalized_checkpoint(); - let prepare_payload_handle = get_execution_payload( - self.clone(), - &state, - finalized_checkpoint, - proposer_index, - pubkey_opt, - )?; + let prepare_payload_handle = + get_execution_payload(self.clone(), &state, proposer_index, pubkey_opt)?; Some(prepare_payload_handle) } }; @@ -3890,11 +3884,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - let (head_block_root, head_hash, finalized_hash) = if let Some(head_hash) = params.head_hash + let (head_block_root, head_hash, justified_hash, finalized_hash) = if let Some(head_hash) = + params.head_hash { ( params.head_root, head_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), params .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), @@ -3925,6 +3923,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { ( params.head_root, terminal_pow_block_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), params .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), @@ -3942,7 +3943,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> { }; let forkchoice_updated_response = execution_layer - .notify_forkchoice_updated(head_hash, finalized_hash, current_slot, head_block_root) + .notify_forkchoice_updated( + head_hash, + justified_hash, + finalized_hash, + current_slot, + head_block_root, + ) .await .map_err(Error::ExecutionForkChoiceUpdateFailed); diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c02ddb8263..a07b346c1b 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -99,6 +99,8 @@ pub struct CachedHead<E: EthSpec> { /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` /// before Bellatrix. head_hash: Option<ExecutionBlockHash>, + /// The `execution_payload.block_hash` of the justified block. Set to `None` before Bellatrix. + justified_hash: Option<ExecutionBlockHash>, /// The `execution_payload.block_hash` of the finalized block. Set to `None` before Bellatrix. finalized_hash: Option<ExecutionBlockHash>, } @@ -183,6 +185,7 @@ impl<E: EthSpec> CachedHead<E> { ForkchoiceUpdateParameters { head_root: self.snapshot.beacon_block_root, head_hash: self.head_hash, + justified_hash: self.justified_hash, finalized_hash: self.finalized_hash, } } @@ -224,6 +227,7 @@ impl<T: BeaconChainTypes> CanonicalHead<T> { justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, head_hash: forkchoice_update_params.head_hash, + justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, }; @@ -272,6 +276,7 @@ impl<T: BeaconChainTypes> CanonicalHead<T> { justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, head_hash: forkchoice_update_params.head_hash, + justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, }; @@ -612,6 +617,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, head_hash: new_forkchoice_update_parameters.head_hash, + justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, }; @@ -638,6 +644,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, head_hash: new_forkchoice_update_parameters.head_hash, + justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, }; diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 5c7c3c05d8..81193563cb 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -302,7 +302,6 @@ pub fn get_execution_payload< >( chain: Arc<BeaconChain<T>>, state: &BeaconState<T::EthSpec>, - finalized_checkpoint: Checkpoint, proposer_index: u64, pubkey: Option<PublicKeyBytes>, ) -> Result<PreparePayloadHandle<Payload>, BlockProductionError> { @@ -330,7 +329,6 @@ pub fn get_execution_payload< is_merge_transition_complete, timestamp, random, - finalized_checkpoint, proposer_index, pubkey, latest_execution_payload_header_block_hash, @@ -365,7 +363,6 @@ pub async fn prepare_execution_payload<T, Payload>( is_merge_transition_complete: bool, timestamp: u64, random: Hash256, - finalized_checkpoint: Checkpoint, proposer_index: u64, pubkey: Option<PublicKeyBytes>, latest_execution_payload_header_block_hash: ExecutionBlockHash, @@ -408,44 +405,24 @@ where latest_execution_payload_header_block_hash }; - // Try to obtain the finalized proto block from fork choice. + // Try to obtain the fork choice update parameters from the cached head. // - // Use a blocking task to interact with the `fork_choice` lock otherwise we risk blocking the + // Use a blocking task to interact with the `canonical_head` lock otherwise we risk blocking the // core `tokio` executor. let inner_chain = chain.clone(); - let finalized_proto_block = chain + let forkchoice_update_params = chain .spawn_blocking_handle( move || { inner_chain .canonical_head - .fork_choice_read_lock() - .get_block(&finalized_checkpoint.root) + .cached_head() + .forkchoice_update_parameters() }, - "prepare_execution_payload_finalized_hash", + "prepare_execution_payload_forkchoice_update_params", ) .await .map_err(BlockProductionError::BeaconChain)?; - // The finalized block hash is not included in the specification, however we provide this - // parameter so that the execution layer can produce a payload id if one is not already known - // (e.g., due to a recent reorg). - let finalized_block_hash = if let Some(block) = finalized_proto_block { - block.execution_status.block_hash() - } else { - chain - .store - .get_blinded_block(&finalized_checkpoint.root) - .map_err(BlockProductionError::FailedToReadFinalizedBlock)? - .ok_or(BlockProductionError::MissingFinalizedBlock( - finalized_checkpoint.root, - ))? - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()) - }; - // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. @@ -454,10 +431,10 @@ where parent_hash, timestamp, random, - finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero), proposer_index, pubkey, slot, + forkchoice_update_params, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index b54964aa32..728057c90f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -57,7 +57,7 @@ pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBl pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; -pub use fork_choice::ExecutionStatus; +pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index c181c19050..26e4ba52ef 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -39,3 +39,4 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } builder_client = { path = "../builder_client" } +fork_choice = { path = "../../consensus/fork_choice" } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 4ab38cb3ab..e89e9ba814 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -11,6 +11,7 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; use engines::{Engine, EngineError}; +use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; pub use payload_status::PayloadStatus; @@ -502,10 +503,10 @@ impl<T: EthSpec> ExecutionLayer<T> { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, proposer_index: u64, pubkey: Option<PublicKeyBytes>, slot: Slot, + forkchoice_update_params: ForkchoiceUpdateParameters, ) -> Result<Payload, Error> { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -519,10 +520,10 @@ impl<T: EthSpec> ExecutionLayer<T> { parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, pubkey, slot, + forkchoice_update_params, ) .await } @@ -535,8 +536,8 @@ impl<T: EthSpec> ExecutionLayer<T> { parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, + forkchoice_update_params, ) .await } @@ -549,17 +550,22 @@ impl<T: EthSpec> ExecutionLayer<T> { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, suggested_fee_recipient: Address, pubkey_opt: Option<PublicKeyBytes>, slot: Slot, + forkchoice_update_params: ForkchoiceUpdateParameters, ) -> Result<Payload, Error> { //FIXME(sean) fallback logic included in PR #3134 // Don't attempt to outsource payload construction until after the merge transition has been // finalized. We want to be conservative with payload construction until then. if let (Some(builder), Some(pubkey)) = (self.builder(), pubkey_opt) { - if finalized_block_hash != ExecutionBlockHash::zero() { + if forkchoice_update_params + .finalized_hash + .map_or(false, |finalized_block_hash| { + finalized_block_hash != ExecutionBlockHash::zero() + }) + { info!( self.log(), "Requesting blinded header from connected builder"; @@ -578,8 +584,8 @@ impl<T: EthSpec> ExecutionLayer<T> { parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, + forkchoice_update_params, ) .await } @@ -590,15 +596,15 @@ impl<T: EthSpec> ExecutionLayer<T> { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, ) -> Result<Payload, Error> { self.get_full_payload_with( parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, + forkchoice_update_params, noop, ) .await @@ -609,8 +615,8 @@ impl<T: EthSpec> ExecutionLayer<T> { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, f: fn(&ExecutionLayer<T>, &ExecutionPayload<T>) -> Option<ExecutionPayload<T>>, ) -> Result<Payload, Error> { debug!( @@ -634,20 +640,20 @@ impl<T: EthSpec> ExecutionLayer<T> { ); id } else { - // The payload id has *not* been cached for this engine. Trigger an artificial + // The payload id has *not* been cached. Trigger an artificial // fork choice update to retrieve a payload ID. - // - // TODO(merge): a better algorithm might try to favour a node that already had a - // cached payload id, since a payload that has had more time to produce is - // likely to be more profitable. metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, &[metrics::MISS], ); let fork_choice_state = ForkChoiceState { head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash, + safe_block_hash: forkchoice_update_params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + finalized_block_hash: forkchoice_update_params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), }; let payload_attributes = PayloadAttributes { timestamp, @@ -655,29 +661,28 @@ impl<T: EthSpec> ExecutionLayer<T> { suggested_fee_recipient, }; - let response = engine - .notify_forkchoice_updated( - fork_choice_state, - Some(payload_attributes), - self.log(), - ) - .await?; + let response = engine + .notify_forkchoice_updated( + fork_choice_state, + Some(payload_attributes), + self.log(), + ) + .await?; - match response.payload_id { - Some(payload_id) => payload_id, - None => { - error!( - self.log(), - "Exec engine unable to produce payload"; - "msg" => "No payload ID, the engine is likely syncing. \ - This has the potential to cause a missed block \ - proposal.", - "status" => ?response.payload_status - ); - return Err(ApiError::PayloadIdUnavailable); - } - } - }; + match response.payload_id { + Some(payload_id) => payload_id, + None => { + error!( + self.log(), + "Exec engine unable to produce payload"; + "msg" => "No payload ID, the engine is likely syncing. \ + This has the potential to cause a missed block proposal.", + "status" => ?response.payload_status + ); + return Err(ApiError::PayloadIdUnavailable); + } + } + }; engine .api @@ -685,7 +690,11 @@ impl<T: EthSpec> ExecutionLayer<T> { .await .map(|full_payload| { if f(self, &full_payload).is_some() { - warn!(self.log(), "Duplicate payload cached, this might indicate redundant proposal attempts."); + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ + attempts." + ); } full_payload.into() }) @@ -809,6 +818,7 @@ impl<T: EthSpec> ExecutionLayer<T> { pub async fn notify_forkchoice_updated( &self, head_block_hash: ExecutionBlockHash, + justified_block_hash: ExecutionBlockHash, finalized_block_hash: ExecutionBlockHash, current_slot: Slot, head_block_root: Hash256, @@ -822,6 +832,7 @@ impl<T: EthSpec> ExecutionLayer<T> { self.log(), "Issuing engine_forkchoiceUpdated"; "finalized_block_hash" => ?finalized_block_hash, + "justified_block_hash" => ?justified_block_hash, "head_block_hash" => ?head_block_hash, ); @@ -848,11 +859,9 @@ impl<T: EthSpec> ExecutionLayer<T> { } } - // see https://hackmd.io/@n0ble/kintsugi-spec#Engine-API - // for now, we must set safe_block_hash = head_block_hash let forkchoice_state = ForkChoiceState { head_block_hash, - safe_block_hash: head_block_hash, + safe_block_hash: justified_block_hash, finalized_block_hash, }; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index bf8ed4947a..6935c88f22 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -335,7 +335,9 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> { } let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); - let unknown_safe_block_hash = !self.blocks.contains_key(&forkchoice_state.safe_block_hash); + let unknown_safe_block_hash = forkchoice_state.safe_block_hash + != ExecutionBlockHash::zero() + && !self.blocks.contains_key(&forkchoice_state.safe_block_hash); let unknown_finalized_block_hash = forkchoice_state.finalized_block_hash != ExecutionBlockHash::zero() && !self diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 517772a695..8a5c26fe8d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -88,11 +88,16 @@ impl<T: EthSpec> MockExecutionLayer<T> { let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); - let finalized_block_hash = parent_hash; + let head_block_root = Hash256::repeat_byte(42); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: head_block_root, + head_hash: Some(parent_hash), + justified_hash: None, + finalized_hash: None, + }; // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); - let head_block_root = Hash256::repeat_byte(42); let validator_index = 0; self.el .insert_proposer( @@ -111,6 +116,7 @@ impl<T: EthSpec> MockExecutionLayer<T> { .notify_forkchoice_updated( parent_hash, ExecutionBlockHash::zero(), + ExecutionBlockHash::zero(), slot, head_block_root, ) @@ -124,10 +130,10 @@ impl<T: EthSpec> MockExecutionLayer<T> { parent_hash, timestamp, prev_randao, - finalized_block_hash, validator_index, None, slot, + forkchoice_update_params, ) .await .unwrap() @@ -148,6 +154,7 @@ impl<T: EthSpec> MockExecutionLayer<T> { .notify_forkchoice_updated( block_hash, ExecutionBlockHash::zero(), + ExecutionBlockHash::zero(), slot, head_block_root, ) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7390ce7f94..d06d52235f 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -259,6 +259,7 @@ pub enum AttestationFromBlock { pub struct ForkchoiceUpdateParameters { pub head_root: Hash256, pub head_hash: Option<ExecutionBlockHash>, + pub justified_hash: Option<ExecutionBlockHash>, pub finalized_hash: Option<ExecutionBlockHash>, } @@ -372,6 +373,7 @@ where // This will be updated during the next call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, + justified_hash: None, finalized_hash: None, head_root: Hash256::zero(), }, @@ -489,13 +491,18 @@ where let head_hash = self .get_block(&head_root) .and_then(|b| b.execution_status.block_hash()); + let justified_root = self.justified_checkpoint().root; let finalized_root = self.finalized_checkpoint().root; + let justified_hash = self + .get_block(&justified_root) + .and_then(|b| b.execution_status.block_hash()); let finalized_hash = self .get_block(&finalized_root) .and_then(|b| b.execution_status.block_hash()); self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, head_hash, + justified_hash, finalized_hash, }; @@ -1211,6 +1218,7 @@ where // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, + justified_hash: None, finalized_hash: None, head_root: Hash256::zero(), }, diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index f42a7f6abc..7a8d7e99b5 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -20,3 +20,4 @@ ethers-providers = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93 deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" +fork_choice = { path = "../../consensus/fork_choice" } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 9c09ec8d96..7dac2010b6 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -4,6 +4,7 @@ use crate::execution_engine::{ use crate::transactions::transactions; use ethers_providers::Middleware; use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; use sensitive_url::SensitiveUrl; use serde_json::{json, Value}; @@ -254,7 +255,15 @@ impl<E: GenericExecutionEngine> TestRig<E> { let parent_hash = terminal_pow_block_hash; let timestamp = timestamp_now(); let prev_randao = Hash256::zero(); + let head_root = Hash256::zero(); + let justified_block_hash = ExecutionBlockHash::zero(); let finalized_block_hash = ExecutionBlockHash::zero(); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root, + head_hash: Some(parent_hash), + justified_hash: Some(justified_block_hash), + finalized_hash: Some(finalized_block_hash), + }; let proposer_index = 0; let prepared = self @@ -262,7 +271,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { .execution_layer .insert_proposer( Slot::new(1), // Insert proposer for the next slot - Hash256::zero(), + head_root, proposer_index, PayloadAttributes { timestamp, @@ -280,6 +289,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { .execution_layer .notify_forkchoice_updated( parent_hash, + justified_block_hash, finalized_block_hash, Slot::new(0), Hash256::zero(), @@ -302,10 +312,10 @@ impl<E: GenericExecutionEngine> TestRig<E> { parent_hash, timestamp, prev_randao, - finalized_block_hash, proposer_index, None, Slot::new(0), + forkchoice_update_params, ) .await .unwrap() @@ -326,7 +336,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Syncing); @@ -360,7 +376,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -390,7 +412,6 @@ impl<E: GenericExecutionEngine> TestRig<E> { let parent_hash = valid_payload.block_hash; let timestamp = valid_payload.timestamp + 1; let prev_randao = Hash256::zero(); - let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; let second_payload = self .ee_a @@ -399,10 +420,10 @@ impl<E: GenericExecutionEngine> TestRig<E> { parent_hash, timestamp, prev_randao, - finalized_block_hash, proposer_index, None, Slot::new(0), + forkchoice_update_params, ) .await .unwrap() @@ -445,7 +466,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -475,7 +502,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_b .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Syncing); @@ -521,7 +554,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_b .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); From 612cdb709207d94f770cb389000061939d349c44 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Thu, 21 Jul 2022 05:45:39 +0000 Subject: [PATCH 082/184] Merge readiness endpoint (#3349) ## Issue Addressed Resolves final task in https://github.com/sigp/lighthouse/issues/3260 ## Proposed Changes Adds a lighthouse http endpoint to indicate merge readiness. Blocked on #3339 --- .../beacon_chain/src/merge_readiness.rs | 39 +++++++++++++------ beacon_node/client/src/notifier.rs | 4 +- beacon_node/http_api/src/lib.rs | 13 +++++++ book/src/api-lighthouse.md | 21 +++++++++- 4 files changed, 63 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index be158ecbe1..4a7b38bdb4 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -2,7 +2,7 @@ //! transition. use crate::{BeaconChain, BeaconChainTypes}; -use execution_layer::Error as EngineError; +use serde::{Deserialize, Serialize, Serializer}; use std::fmt; use std::fmt::Write; use types::*; @@ -11,10 +11,13 @@ use types::*; const SECONDS_IN_A_WEEK: u64 = 604800; pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK; -#[derive(Default, Debug)] +#[derive(Default, Debug, Serialize, Deserialize)] pub struct MergeConfig { + #[serde(serialize_with = "serialize_uint256")] pub terminal_total_difficulty: Option<Uint256>, + #[serde(skip_serializing_if = "Option::is_none")] pub terminal_block_hash: Option<ExecutionBlockHash>, + #[serde(skip_serializing_if = "Option::is_none")] pub terminal_block_hash_epoch: Option<Epoch>, } @@ -73,15 +76,19 @@ impl MergeConfig { } /// Indicates if a node is ready for the Bellatrix upgrade and subsequent merge transition. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] pub enum MergeReadiness { /// The node is ready, as far as we can tell. Ready { config: MergeConfig, - current_difficulty: Result<Uint256, String>, + #[serde(serialize_with = "serialize_uint256")] + current_difficulty: Option<Uint256>, }, /// The transition configuration with the EL failed, there might be a problem with /// connectivity, authentication or a difference in configuration. - ExchangeTransitionConfigurationFailed(EngineError), + ExchangeTransitionConfigurationFailed { error: String }, /// The EL can be reached and has the correct configuration, however it's not yet synced. NotSynced, /// The user has not configured this node to use an execution endpoint. @@ -102,11 +109,11 @@ impl fmt::Display for MergeReadiness { params, current_difficulty ) } - MergeReadiness::ExchangeTransitionConfigurationFailed(e) => write!( + MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!( f, "Could not confirm the transition configuration with the \ execution endpoint: {:?}", - e + error ), MergeReadiness::NotSynced => write!( f, @@ -145,7 +152,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { if let Err(e) = el.exchange_transition_configuration(&self.spec).await { // The EL was either unreachable, responded with an error or has a different // configuration. - return MergeReadiness::ExchangeTransitionConfigurationFailed(e); + return MergeReadiness::ExchangeTransitionConfigurationFailed { + error: format!("{:?}", e), + }; } if !el.is_synced_for_notifier().await { @@ -153,10 +162,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { return MergeReadiness::NotSynced; } let params = MergeConfig::from_chainspec(&self.spec); - let current_difficulty = el - .get_current_difficulty() - .await - .map_err(|_| "Failed to get current difficulty from execution node".to_string()); + let current_difficulty = el.get_current_difficulty().await.ok(); MergeReadiness::Ready { config: params, current_difficulty, @@ -167,3 +173,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } } } + +/// Utility function to serialize a Uint256 as a decimal string. +fn serialize_uint256<S>(val: &Option<Uint256>, s: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + match val { + Some(v) => v.to_string().serialize(s), + None => s.serialize_none(), + } +} diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 53478971af..74947c16f5 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -359,7 +359,7 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( "terminal_total_difficulty" => %ttd, "current_difficulty" => current_difficulty .map(|d| d.to_string()) - .unwrap_or_else(|_| "??".into()), + .unwrap_or_else(|| "??".into()), ) } MergeConfig { @@ -382,7 +382,7 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( "config" => ?other ), }, - readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed(_) => { + readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => { error!( log, "Not ready for merge"; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 606dfb64dc..14f260e57b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2887,6 +2887,18 @@ pub fn serve<T: BeaconChainTypes>( }) }); + // GET lighthouse/merge_readiness + let get_lighthouse_merge_readiness = warp::path("lighthouse") + .and(warp::path("merge_readiness")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc<BeaconChain<T>>| async move { + let merge_readiness = chain.check_merge_readiness().await; + Ok::<_, warp::reject::Rejection>(warp::reply::json(&api_types::GenericResponse::from( + merge_readiness, + ))) + }); + let get_events = eth1_v1 .and(warp::path("events")) .and(warp::path::end()) @@ -3015,6 +3027,7 @@ pub fn serve<T: BeaconChainTypes>( .or(get_lighthouse_block_rewards.boxed()) .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) + .or(get_lighthouse_merge_readiness.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index f5c4542b9e..d9c8080b4d 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -453,4 +453,23 @@ Caveats: loading a state on a boundary is most efficient. [block_reward_src]: -https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs \ No newline at end of file +https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs + + +### `/lighthouse/merge_readiness` + +```bash +curl -X GET "http://localhost:5052/lighthouse/merge_readiness" +``` + +``` +{ + "data":{ + "type":"ready", + "config":{ + "terminal_total_difficulty":"6400" + }, + "current_difficulty":"4800" + } + } +``` From 21dec6f603edd53ad8d2372941c77ab8098d3b5a Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Thu, 21 Jul 2022 22:02:36 +0000 Subject: [PATCH 083/184] v2.4.0 (#3360) ## Issue Addressed NA ## Proposed Changes Bump versions to v2.4.0 ## Additional Info Blocked on: - ~~#3349~~ - ~~#3347~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c2d69ceed..6b0edcae92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -351,7 +351,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.3.2-rc.0" +version = "2.4.0" dependencies = [ "beacon_chain", "clap", @@ -508,7 +508,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.3.2-rc.0" +version = "2.4.0" dependencies = [ "beacon_node", "clap", @@ -2983,7 +2983,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.3.2-rc.0" +version = "2.4.0" dependencies = [ "account_utils", "bls", @@ -3481,7 +3481,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.3.2-rc.0" +version = "2.4.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index ccb145caf9..73e5ad65cc 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.3.2-rc.0" +version = "2.4.0" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] edition = "2021" diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index a8d6e03680..88651023f2 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.3.2-rc.0" +version = "2.4.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index e4a6bd0179..5e2862951e 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.3.2-rc.0-", - fallback = "Lighthouse/v2.3.2-rc.0" + prefix = "Lighthouse/v2.4.0-", + fallback = "Lighthouse/v2.4.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 037171097d..6cc0e5959b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.3.2-rc.0" +version = "2.4.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 920cfa49c1..8c6f7524b9 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.3.2-rc.0" +version = "2.4.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false From bb5a6d2cca90931c496b42c4fd11d5c66b21f6cb Mon Sep 17 00:00:00 2001 From: Mac L <mjladson@pm.me> Date: Mon, 25 Jul 2022 08:23:00 +0000 Subject: [PATCH 084/184] Add `execution_optimistic` flag to HTTP responses (#3070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed #3031 ## Proposed Changes Updates the following API endpoints to conform with https://github.com/ethereum/beacon-APIs/pull/190 and https://github.com/ethereum/beacon-APIs/pull/196 - [x] `beacon/states/{state_id}/root` - [x] `beacon/states/{state_id}/fork` - [x] `beacon/states/{state_id}/finality_checkpoints` - [x] `beacon/states/{state_id}/validators` - [x] `beacon/states/{state_id}/validators/{validator_id}` - [x] `beacon/states/{state_id}/validator_balances` - [x] `beacon/states/{state_id}/committees` - [x] `beacon/states/{state_id}/sync_committees` - [x] `beacon/headers` - [x] `beacon/headers/{block_id}` - [x] `beacon/blocks/{block_id}` - [x] `beacon/blocks/{block_id}/root` - [x] `beacon/blocks/{block_id}/attestations` - [x] `debug/beacon/states/{state_id}` - [x] `debug/beacon/heads` - [x] `validator/duties/attester/{epoch}` - [x] `validator/duties/proposer/{epoch}` - [x] `validator/duties/sync/{epoch}` Updates the following Server-Sent Events: - [x] `events?topics=head` - [x] `events?topics=block` - [x] `events?topics=finalized_checkpoint` - [x] `events?topics=chain_reorg` ## Backwards Incompatible There is a very minor breaking change with the way the API now handles requests to `beacon/blocks/{block_id}/root` and `beacon/states/{state_id}/root` when `block_id` or `state_id` is the `Root` variant of `BlockId` and `StateId` respectively. Previously a request to a non-existent root would simply echo the root back to the requester: ``` curl "http://localhost:5052/eth/v1/beacon/states/0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/root" {"data":{"root":"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}} ``` Now it will return a `404`: ``` curl "http://localhost:5052/eth/v1/beacon/blocks/0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/root" {"code":404,"message":"NOT_FOUND: beacon block with root 0xaaaa…aaaa","stacktraces":[]} ``` In addition to this is the block root `0x0000000000000000000000000000000000000000000000000000000000000000` previously would return the genesis block. It will now return a `404`: ``` curl "http://localhost:5052/eth/v1/beacon/blocks/0x0000000000000000000000000000000000000000000000000000000000000000" {"code":404,"message":"NOT_FOUND: beacon block with root 0x0000…0000","stacktraces":[]} ``` ## Additional Info - `execution_optimistic` is always set, and will return `false` pre-Bellatrix. I am also open to the idea of doing something like `#[serde(skip_serializing_if = "Option::is_none")]`. - The value of `execution_optimistic` is set to `false` where possible. Any computation that is reliant on the `head` will simply use the `ExecutionStatus` of the head (unless the head block is pre-Bellatrix). Co-authored-by: Paul Hauner <paul@paulhauner.com> --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 44 +- .../beacon_chain/src/canonical_head.rs | 24 + beacon_node/beacon_chain/src/test_utils.rs | 8 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/attester_duties.rs | 79 +- beacon_node/http_api/src/block_id.rs | 165 ++-- beacon_node/http_api/src/lib.rs | 730 +++++++++++------- beacon_node/http_api/src/proposer_duties.rs | 105 ++- beacon_node/http_api/src/state_id.rs | 203 ++++- beacon_node/http_api/src/sync_committees.rs | 17 +- .../http_api/src/validator_inclusion.rs | 5 +- beacon_node/http_api/src/version.rs | 24 +- beacon_node/http_api/tests/tests.rs | 393 +++++----- beacon_node/store/src/hot_cold_store.rs | 4 +- common/eth2/src/lib.rs | 76 +- common/eth2/src/types.rs | 35 + consensus/fork_choice/src/fork_choice.rs | 6 + 18 files changed, 1227 insertions(+), 693 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b0edcae92..ab75fe2aed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2604,6 +2604,7 @@ dependencies = [ "lru", "network", "parking_lot 0.12.1", + "proto_array", "safe_arith", "sensitive_url", "serde", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 78e846e74a..c18f4a7374 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1289,23 +1289,28 @@ impl<T: BeaconChainTypes> BeaconChain<T> { epoch: Epoch, head_block_root: Hash256, ) -> Result<(Vec<Option<AttestationDuty>>, Hash256, ExecutionStatus), Error> { - self.with_committee_cache(head_block_root, epoch, |committee_cache, dependent_root| { - let duties = validator_indices - .iter() - .map(|validator_index| { - let validator_index = *validator_index as usize; - committee_cache.get_attestation_duties(validator_index) - }) - .collect(); + let execution_status = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; - let execution_status = self - .canonical_head - .fork_choice_read_lock() - .get_block_execution_status(&head_block_root) - .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; + let (duties, dependent_root) = self.with_committee_cache( + head_block_root, + epoch, + |committee_cache, dependent_root| { + let duties = validator_indices + .iter() + .map(|validator_index| { + let validator_index = *validator_index as usize; + committee_cache.get_attestation_duties(validator_index) + }) + .collect(); - Ok((duties, dependent_root, execution_status)) - }) + Ok((duties, dependent_root)) + }, + )?; + Ok((duties, dependent_root, execution_status)) } /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. @@ -2908,6 +2913,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { event_handler.register(EventKind::Block(SseBlock { slot, block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), })); } } @@ -4055,9 +4061,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. - pub fn is_optimistic_block( + pub fn is_optimistic_block<Payload: ExecPayload<T::EthSpec>>( &self, - block: &SignedBeaconBlock<T::EthSpec>, + block: &SignedBeaconBlock<T::EthSpec, Payload>, ) -> Result<bool, BeaconChainError> { // Check if the block is pre-Bellatrix. if self.slot_is_prior_to_bellatrix(block.slot()) { @@ -4081,9 +4087,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// /// There is a potential race condition when syncing where the block_root of `head_block` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_head_block( + pub fn is_optimistic_head_block<Payload: ExecPayload<T::EthSpec>>( &self, - head_block: &SignedBeaconBlock<T::EthSpec>, + head_block: &SignedBeaconBlock<T::EthSpec, Payload>, ) -> Result<bool, BeaconChainError> { // Check if the block is pre-Bellatrix. if self.slot_is_prior_to_bellatrix(head_block.slot()) { diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index a07b346c1b..aff4deeaf9 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -300,6 +300,23 @@ impl<T: BeaconChainTypes> CanonicalHead<T> { .ok_or(Error::HeadMissingFromForkChoice(head_block_root)) } + /// Returns a clone of the `CachedHead` and the execution status of the contained head block. + /// + /// This will only return `Err` in the scenario where `self.fork_choice` has advanced + /// significantly past the cached `head_snapshot`. In such a scenario it is likely prudent to + /// run `BeaconChain::recompute_head` to update the cached values. + pub fn head_and_execution_status( + &self, + ) -> Result<(CachedHead<T::EthSpec>, ExecutionStatus), Error> { + let head = self.cached_head(); + let head_block_root = head.head_block_root(); + let execution_status = self + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::HeadMissingFromForkChoice(head_block_root))?; + Ok((head, execution_status)) + } + /// Returns a clone of `self.cached_head`. /// /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). @@ -713,6 +730,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { ) -> Result<(), Error> { let old_snapshot = &old_cached_head.snapshot; let new_snapshot = &new_cached_head.snapshot; + let new_head_is_optimistic = new_head_proto_block.execution_status.is_optimistic(); // Detect and potentially report any re-orgs. let reorg_distance = detect_reorg( @@ -798,6 +816,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { current_duty_dependent_root, previous_duty_dependent_root, epoch_transition: is_epoch_transition, + execution_optimistic: new_head_is_optimistic, })); } (Err(e), _) | (_, Err(e)) => { @@ -825,6 +844,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { new_head_block: new_snapshot.beacon_block_root, new_head_state: new_snapshot.beacon_state_root(), epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), + execution_optimistic: new_head_is_optimistic, })); } } @@ -841,6 +861,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { finalized_proto_block: ProtoBlock, ) -> Result<(), Error> { let new_snapshot = &new_cached_head.snapshot; + let finalized_block_is_optimistic = finalized_proto_block.execution_status.is_optimistic(); self.op_pool .prune_all(&new_snapshot.beacon_state, self.epoch()?); @@ -884,6 +905,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { // specific state root at the first slot of the finalized epoch (which // might be a skip slot). state: finalized_proto_block.state_root, + execution_optimistic: finalized_block_is_optimistic, })); } } @@ -1216,6 +1238,7 @@ fn observe_head_block_delays<E: EthSpec, S: SlotClock>( let block_time_set_as_head = timestamp_now(); let head_block_root = head_block.root; let head_block_slot = head_block.slot; + let head_block_is_optimistic = head_block.execution_status.is_optimistic(); // Calculate the total delay between the start of the slot and when it was set as head. let block_delay_total = get_slot_delay_ms(block_time_set_as_head, head_block_slot, slot_clock); @@ -1308,6 +1331,7 @@ fn observe_head_block_delays<E: EthSpec, S: SlotClock>( observed_delay: block_delays.observed, imported_delay: block_delays.imported, set_as_head_delay: block_delays.set_as_head, + execution_optimistic: head_block_is_optimistic, })); } } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 2adae6c166..e9dc8619ac 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -37,6 +37,7 @@ use state_processing::{ }; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::fmt; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -1778,3 +1779,10 @@ where (honest_head, faulty_head) } } + +// Junk `Debug` impl to satistfy certain trait bounds during testing. +impl<T: BeaconChainTypes> fmt::Debug for BeaconChainHarness<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "BeaconChainHarness") + } +} diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 07fb992393..5cc703aa1a 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -40,6 +40,7 @@ tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } logging = { path = "../../common/logging" } serde_json = "1.0.58" +proto_array = { path = "../../consensus/proto_array" } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 35a35bcb74..6805d7104c 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -60,11 +60,17 @@ fn cached_attestation_duties<T: BeaconChainTypes>( ) -> Result<ApiDuties, warp::reject::Rejection> { let head_block_root = chain.canonical_head.cached_head().head_block_root(); - let (duties, dependent_root, _execution_status) = chain + let (duties, dependent_root, execution_status) = chain .validator_attestation_duties(request_indices, request_epoch, head_block_root) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(duties, request_indices, dependent_root, chain) + convert_to_api_response( + duties, + request_indices, + dependent_root, + execution_status.is_optimistic(), + chain, + ) } /// Compute some attester duties by reading a `BeaconState` from disk, completely ignoring the @@ -76,35 +82,42 @@ fn compute_historic_attester_duties<T: BeaconChainTypes>( ) -> Result<ApiDuties, warp::reject::Rejection> { // If the head is quite old then it might still be relevant for a historical request. // - // Use the `with_head` function to read & clone in a single call to avoid race conditions. - let state_opt = chain - .with_head(|head| { - if head.beacon_state.current_epoch() <= request_epoch { - Ok(Some(( - head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - ))) - } else { - Ok(None) - } - }) - .map_err(warp_utils::reject::beacon_chain_error)?; + // Avoid holding the `cached_head` longer than necessary. + let state_opt = { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + let head = &cached_head.snapshot; - let mut state = if let Some((state_root, mut state)) = state_opt { - // If we've loaded the head state it might be from a previous epoch, ensure it's in a - // suitable epoch. - ensure_state_knows_attester_duties_for_epoch( - &mut state, - state_root, - request_epoch, - &chain.spec, - )?; - state - } else { - StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + if head.beacon_state.current_epoch() <= request_epoch { + Some(( + head.beacon_state_root(), + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + execution_status.is_optimistic(), + )) + } else { + None + } }; + let (mut state, execution_optimistic) = + if let Some((state_root, mut state, execution_optimistic)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_knows_attester_duties_for_epoch( + &mut state, + state_root, + request_epoch, + &chain.spec, + )?; + (state, execution_optimistic) + } else { + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)? + }; + // Sanity-check the state lookup. if !(state.current_epoch() == request_epoch || state.current_epoch() + 1 == request_epoch) { return Err(warp_utils::reject::custom_server_error(format!( @@ -140,7 +153,13 @@ fn compute_historic_attester_duties<T: BeaconChainTypes>( .collect::<Result<_, _>>() .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(duties, request_indices, dependent_root, chain) + convert_to_api_response( + duties, + request_indices, + dependent_root, + execution_optimistic, + chain, + ) } fn ensure_state_knows_attester_duties_for_epoch<E: EthSpec>( @@ -178,6 +197,7 @@ fn convert_to_api_response<T: BeaconChainTypes>( duties: Vec<Option<AttestationDuty>>, indices: &[u64], dependent_root: Hash256, + execution_optimistic: bool, chain: &BeaconChain<T>, ) -> Result<ApiDuties, warp::reject::Rejection> { // Protect against an inconsistent slot clock. @@ -213,6 +233,7 @@ fn convert_to_api_response<T: BeaconChainTypes>( Ok(api_types::DutiesResponse { dependent_root, + execution_optimistic: Some(execution_optimistic), data, }) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 73f50985bd..91425e2f10 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,8 +1,10 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; +use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; +use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; +use types::{Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -22,32 +24,78 @@ impl BlockId { pub fn root<T: BeaconChainTypes>( &self, chain: &BeaconChain<T>, - ) -> Result<Hash256, warp::Rejection> { + ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => Ok(chain.canonical_head.cached_head().head_block_root()), - CoreBlockId::Genesis => Ok(chain.genesis_block_root), - CoreBlockId::Finalized => Ok(chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .root), - CoreBlockId::Justified => Ok(chain - .canonical_head - .cached_head() - .justified_checkpoint() - .root), - CoreBlockId::Slot(slot) => chain - .block_root_at_slot(*slot, WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block at slot {}", - slot - )) - }) - }), - CoreBlockId::Root(root) => Ok(*root), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.head_block_root(), + execution_status.is_optimistic(), + )) + } + CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), + CoreBlockId::Finalized => { + let finalized_checkpoint = + chain.canonical_head.cached_head().finalized_checkpoint(); + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + Ok((finalized_checkpoint.root, execution_optimistic)) + } + CoreBlockId::Justified => { + let justified_checkpoint = + chain.canonical_head.cached_head().justified_checkpoint(); + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + Ok((justified_checkpoint.root, execution_optimistic)) + } + CoreBlockId::Slot(slot) => { + let execution_optimistic = chain + .is_optimistic_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + let root = chain + .block_root_at_slot(*slot, WhenSlotSkipped::None) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block at slot {}", + slot + )) + }) + })?; + Ok((root, execution_optimistic)) + } + CoreBlockId::Root(root) => { + // This matches the behaviour of other consensus clients (e.g. Teku). + if root == &Hash256::zero() { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))); + }; + if chain + .store + .block_exists(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let execution_optimistic = chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_block(root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok((*root, execution_optimistic)) + } else { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))); + } + } } } @@ -55,11 +103,20 @@ impl BlockId { pub fn blinded_block<T: BeaconChainTypes>( &self, chain: &BeaconChain<T>, - ) -> Result<SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, warp::Rejection> { + ) -> Result<(SignedBlindedBeaconBlock<T::EthSpec>, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => Ok(chain.head_beacon_block().clone_as_blinded()), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic(), + )) + } CoreBlockId::Slot(slot) => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -71,7 +128,7 @@ impl BlockId { slot ))); } - Ok(block) + Ok((block, execution_optimistic)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -80,8 +137,8 @@ impl BlockId { }) } _ => { - let root = self.root(chain)?; - chain + let (root, execution_optimistic) = self.root(chain)?; + let block = chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) .and_then(|root_opt| { @@ -91,7 +148,8 @@ impl BlockId { root )) }) - }) + })?; + Ok((block, execution_optimistic)) } } } @@ -100,11 +158,20 @@ impl BlockId { pub async fn full_block<T: BeaconChainTypes>( &self, chain: &BeaconChain<T>, - ) -> Result<Arc<SignedBeaconBlock<T::EthSpec>>, warp::Rejection> { + ) -> Result<(Arc<SignedBeaconBlock<T::EthSpec>>, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => Ok(chain.head_beacon_block()), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.snapshot.beacon_block.clone(), + execution_status.is_optimistic(), + )) + } CoreBlockId::Slot(slot) => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_block(&root) .await @@ -117,7 +184,7 @@ impl BlockId { slot ))); } - Ok(Arc::new(block)) + Ok((Arc::new(block), execution_optimistic)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -126,18 +193,20 @@ impl BlockId { }) } _ => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| { - block_opt.map(Arc::new).ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block with root {}", - root - )) - }) + block_opt + .map(|block| (Arc::new(block), execution_optimistic)) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) }) } } @@ -151,3 +220,9 @@ impl FromStr for BlockId { CoreBlockId::from_str(s).map(Self) } } + +impl fmt::Display for BlockId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 14f260e57b..a27e5015cf 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -25,7 +25,7 @@ use beacon_chain::{ AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; -use block_id::BlockId; +pub use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -34,7 +34,7 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -use state_id::StateId; +pub use state_id::StateId; use std::borrow::Cow; use std::convert::TryInto; use std::future::Future; @@ -53,8 +53,8 @@ use types::{ SyncCommitteeMessage, SyncContributionData, }; use version::{ - add_consensus_version_header, fork_versioned_response, inconsistent_fork_rejection, - unsupported_version_rejection, V1, + add_consensus_version_header, execution_optimistic_fork_versioned_response, + fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, }; use warp::http::StatusCode; use warp::sse::Event; @@ -77,6 +77,9 @@ const SYNC_TOLERANCE_EPOCHS: u64 = 8; /// A custom type which allows for both unsecured and TLS-enabled HTTP servers. type HttpServer = (SocketAddr, Pin<Box<dyn Future<Output = ()> + Send>>); +/// Alias for readability. +pub type ExecutionOptimistic = bool; + /// Configuration used when serving the HTTP server over TLS. #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct TlsConfig { @@ -304,7 +307,7 @@ pub fn serve<T: BeaconChainTypes>( .untuple_one() }; - let eth1_v1 = single_version(V1); + let eth_v1 = single_version(V1); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -413,7 +416,7 @@ pub fn serve<T: BeaconChainTypes>( */ // GET beacon/genesis - let get_beacon_genesis = eth1_v1 + let get_beacon_genesis = eth_v1 .and(warp::path("beacon")) .and(warp::path("genesis")) .and(warp::path::end()) @@ -433,7 +436,7 @@ pub fn serve<T: BeaconChainTypes>( * beacon/states/{state_id} */ - let beacon_states_path = eth1_v1 + let beacon_states_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("states")) .and(warp::path::param::<StateId>().or_else(|_| async { @@ -450,10 +453,12 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { blocking_json_task(move || { - state_id - .root(&chain) + let (root, execution_optimistic) = state_id.root(&chain)?; + + Ok(root) .map(api_types::RootData::from) .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) }) }); @@ -463,7 +468,14 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path("fork")) .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { - blocking_json_task(move || state_id.fork(&chain).map(api_types::GenericResponse::from)) + blocking_json_task(move || { + let (fork, execution_optimistic) = + state_id.fork_and_execution_optimistic(&chain)?; + Ok(api_types::ExecutionOptimisticResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + }) + }) }); // GET beacon/states/{state_id}/finality_checkpoints @@ -473,15 +485,24 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { blocking_json_task(move || { - state_id - .map_state(&chain, |state| { - Ok(api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }) - }) - .map(api_types::GenericResponse::from) + let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }); @@ -497,35 +518,45 @@ pub fn serve<T: BeaconChainTypes>( query_res: Result<api_types::ValidatorBalancesQuery, warp::Rejection>| { blocking_json_task(move || { let query = query_res?; - state_id - .map_state(&chain, |state| { - Ok(state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 - } + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query.id.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => { + &validator.pubkey == pubkey + } + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) }) - }) - }) - .map(|(index, (_, balance))| { - Some(api_types::ValidatorBalanceData { - index: index as u64, - balance: *balance, - }) - }) - .collect::<Vec<_>>()) - }) - .map(api_types::GenericResponse::from) + .map(|(index, (_, balance))| { + Some(api_types::ValidatorBalanceData { + index: index as u64, + balance: *balance, + }) + }) + .collect::<Vec<_>>(), + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -542,57 +573,67 @@ pub fn serve<T: BeaconChainTypes>( query_res: Result<api_types::ValidatorsQuery, warp::Rejection>| { blocking_json_task(move || { let query = query_res?; - state_id - .map_state(&chain, |state| { - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; - Ok(state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query.id.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => { + &validator.pubkey == pubkey + } + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) + }) + // filter by status(es) if provided and map the result + .filter_map(|(index, (validator, balance))| { + let status = api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ); + + let status_matches = + query.status.as_ref().map_or(true, |statuses| { + statuses.contains(&status) + || statuses.contains(&status.superstatus()) + }); + + if status_matches { + Some(api_types::ValidatorData { + index: index as u64, + balance: *balance, + status, + validator: validator.clone(), + }) + } else { + None } }) - }) - }) - // filter by status(es) if provided and map the result - .filter_map(|(index, (validator, balance))| { - let status = api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ); + .collect::<Vec<_>>(), + execution_optimistic, + )) + }, + )?; - let status_matches = - query.status.as_ref().map_or(true, |statuses| { - statuses.contains(&status) - || statuses.contains(&status.superstatus()) - }); - - if status_matches { - Some(api_types::ValidatorData { - index: index as u64, - balance: *balance, - status, - validator: validator.clone(), - }) - } else { - None - } - }) - .collect::<Vec<_>>()) - }) - .map(api_types::GenericResponse::from) + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -610,41 +651,51 @@ pub fn serve<T: BeaconChainTypes>( .and_then( |state_id: StateId, chain: Arc<BeaconChain<T>>, validator_id: ValidatorId| { blocking_json_task(move || { - state_id - .map_state(&chain, |state| { - let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => { - state.validators().iter().position(|v| v.pubkey == *pubkey) - } - ValidatorId::Index(index) => Some(*index as usize), - }; + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => { + state.validators().iter().position(|v| v.pubkey == *pubkey) + } + ValidatorId::Index(index) => Some(*index as usize), + }; - index_opt - .and_then(|index| { - let validator = state.validators().get(index)?; - let balance = *state.balances().get(index)?; - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; + Ok(( + index_opt + .and_then(|index| { + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; - Some(api_types::ValidatorData { - index: index as u64, - balance, - status: api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ), - validator: validator.clone(), - }) - }) - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "unknown validator: {}", - validator_id - )) - }) - }) - .map(api_types::GenericResponse::from) + Some(api_types::ValidatorData { + index: index as u64, + balance, + status: api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + })?, + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -658,86 +709,98 @@ pub fn serve<T: BeaconChainTypes>( .and_then( |state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::CommitteesQuery| { blocking_json_task(move || { - state_id.map_state(&chain, |state| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); - let committee_cache = match RelativeEpoch::from_epoch(current_epoch, epoch) - { - Ok(relative_epoch) - if state.committee_cache_is_initialized(relative_epoch) => - { - state.committee_cache(relative_epoch).map(Cow::Borrowed) - } - _ => CommitteeCache::initialized(state, epoch, &chain.spec) - .map(Cow::Owned), - } - .map_err(|e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = T::EthSpec::slots_per_historical_root() as u64; - let first_subsequent_restore_point_slot = - ((epoch.start_slot(T::EthSpec::slots_per_epoch()) / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request(format!( - "epoch out of bounds, try state at slot {}", - first_subsequent_restore_point_slot, - )) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, too far in future".into(), - ) + let committee_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state + .committee_cache_is_initialized(relative_epoch) => + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } + _ => CommitteeCache::initialized(state, epoch, &chain.spec) + .map(Cow::Owned), + } + .map_err(|e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() as u64; + let first_subsequent_restore_point_slot = ((epoch + .start_slot(T::EthSpec::slots_per_epoch()) + / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request(format!( + "epoch out of bounds, try state at slot {}", + first_subsequent_restore_point_slot, + )) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, too far in future".into(), + ) + } + } + _ => warp_utils::reject::beacon_chain_error(e.into()), + })?; + + // Use either the supplied slot or all slots in the epoch. + let slots = + query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = + query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request( + format!("{} is not in epoch {}", slot, epoch), + )); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(api_types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } } - } - _ => warp_utils::reject::beacon_chain_error(e.into()), - })?; - // Use either the supplied slot or all slots in the epoch. - let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { - epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() - }); - - // Use either the supplied committee index or all available indices. - let indices = query.index.map(|index| vec![index]).unwrap_or_else(|| { - (0..committee_cache.committees_per_slot()).collect() - }); - - let mut response = Vec::with_capacity(slots.len() * indices.len()); - - for slot in slots { - // It is not acceptable to query with a slot that is not within the - // specified epoch. - if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "{} is not in epoch {}", - slot, epoch - ))); - } - - for &index in &indices { - let committee = committee_cache - .get_beacon_committee(slot, index) - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "committee index {} does not exist in epoch {}", - index, epoch - )) - })?; - - response.push(api_types::CommitteeData { - index, - slot, - validators: committee - .committee - .iter() - .map(|i| *i as u64) - .collect(), - }); - } - } - - Ok(api_types::GenericResponse::from(response)) + Ok((response, execution_optimistic)) + }, + )?; + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), }) }) }, @@ -754,28 +817,35 @@ pub fn serve<T: BeaconChainTypes>( chain: Arc<BeaconChain<T>>, query: api_types::SyncCommitteesQuery| { blocking_json_task(move || { - let sync_committee = state_id.map_state(&chain, |state| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - state - .get_built_sync_committee(epoch, &chain.spec) - .map(|committee| committee.clone()) - .map_err(|e| match e { - BeaconStateError::SyncCommitteeNotKnown { .. } => { - warp_utils::reject::custom_bad_request(format!( + let (sync_committee, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + Ok(( + state + .get_built_sync_committee(epoch, &chain.spec) + .map(|committee| committee.clone()) + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( "state at epoch {} has no sync committee for epoch {}", current_epoch, epoch )) - } - BeaconStateError::IncorrectStateVariant => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} is not activated for Altair", - current_epoch, - )) - } - e => warp_utils::reject::beacon_state_error(e), - }) - })?; + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + })?, + execution_optimistic, + )) + }, + )?; let validators = chain .validator_indices(sync_committee.pubkeys.iter()) @@ -793,7 +863,8 @@ pub fn serve<T: BeaconChainTypes>( validator_aggregates, }; - Ok(api_types::GenericResponse::from(response)) + Ok(api_types::GenericResponse::from(response) + .add_execution_optimistic(execution_optimistic)) }) }, ); @@ -805,7 +876,7 @@ pub fn serve<T: BeaconChainTypes>( // things. Returning non-canonical things is hard for us since we don't already have a // mechanism for arbitrary forwards block iteration, we only support iterating forwards along // the canonical chain. - let get_beacon_headers = eth1_v1 + let get_beacon_headers = eth_v1 .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::query::<api_types::HeadersQuery>()) @@ -814,15 +885,24 @@ pub fn serve<T: BeaconChainTypes>( .and_then( |query: api_types::HeadersQuery, chain: Arc<BeaconChain<T>>| { blocking_json_task(move || { - let (root, block) = match (query.slot, query.parent_root) { + let (root, block, execution_optimistic) = match (query.slot, query.parent_root) + { // No query parameters, return the canonical head block. (None, None) => { - let block = chain.head_beacon_block(); - (block.canonical_root(), block.clone_as_blinded()) + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic(), + ) } // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { - let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; + let (parent, execution_optimistic) = + BlockId::from_root(parent_root).blinded_block(&chain)?; let (root, _slot) = chain .forwards_iter_block_roots(parent.slot()) .map_err(warp_utils::reject::beacon_chain_error)? @@ -841,13 +921,21 @@ pub fn serve<T: BeaconChainTypes>( BlockId::from_root(root) .blinded_block(&chain) - .map(|block| (root, block))? + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic)| { + (root, block, execution_optimistic) + })? } // Slot is supplied, search by slot and optionally filter by // parent root. (Some(slot), parent_root_opt) => { - let root = BlockId::from_slot(slot).root(&chain)?; - let block = BlockId::from_root(root).blinded_block(&chain)?; + let (root, execution_optimistic) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic) = + BlockId::from_root(root).blinded_block(&chain)?; // If the parent root was supplied, check that it matches the block // obtained via a slot lookup. @@ -860,7 +948,7 @@ pub fn serve<T: BeaconChainTypes>( } } - (root, block) + (root, block, execution_optimistic) } }; @@ -873,13 +961,14 @@ pub fn serve<T: BeaconChainTypes>( }, }; - Ok(api_types::GenericResponse::from(vec![data])) + Ok(api_types::GenericResponse::from(vec![data]) + .add_execution_optimistic(execution_optimistic)) }) }, ); // GET beacon/headers/{block_id} - let get_beacon_headers_block_id = eth1_v1 + let get_beacon_headers_block_id = eth_v1 .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::path::param::<BlockId>().or_else(|_| async { @@ -891,8 +980,11 @@ pub fn serve<T: BeaconChainTypes>( .and(chain_filter.clone()) .and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| { blocking_json_task(move || { - let root = block_id.root(&chain)?; - let block = BlockId::from_root(root).blinded_block(&chain)?; + let (root, execution_optimistic) = block_id.root(&chain)?; + // Ignore the second `execution_optimistic` since the first one has more + // information about the original request. + let (block, _execution_optimistic) = + BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) @@ -908,7 +1000,10 @@ pub fn serve<T: BeaconChainTypes>( }, }; - Ok(api_types::GenericResponse::from(data)) + Ok(api_types::ExecutionOptimisticResponse { + execution_optimistic: Some(execution_optimistic), + data, + }) }) }); @@ -917,7 +1012,7 @@ pub fn serve<T: BeaconChainTypes>( */ // POST beacon/blocks - let post_beacon_blocks = eth1_v1 + let post_beacon_blocks = eth_v1 .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::path::end()) @@ -1013,7 +1108,7 @@ pub fn serve<T: BeaconChainTypes>( */ // POST beacon/blocks - let post_beacon_blinded_blocks = eth1_v1 + let post_beacon_blinded_blocks = eth_v1 .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::path::end()) @@ -1115,7 +1210,7 @@ pub fn serve<T: BeaconChainTypes>( )) }); - let beacon_blocks_path_v1 = eth1_v1 + let beacon_blocks_path_v1 = eth_v1 .and(warp::path("beacon")) .and(warp::path("blocks")) .and(block_id_or_err) @@ -1138,10 +1233,11 @@ pub fn serve<T: BeaconChainTypes>( chain: Arc<BeaconChain<T>>, accept_header: Option<api_types::Accept>| { async move { - let block = block_id.full_block(&chain).await?; + let (block, execution_optimistic) = block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; + match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -1153,8 +1249,13 @@ pub fn serve<T: BeaconChainTypes>( e )) }), - _ => fork_versioned_response(endpoint_version, fork_name, block) - .map(|res| warp::reply::json(&res).into_response()), + _ => execution_optimistic_fork_versioned_response( + endpoint_version, + fork_name, + execution_optimistic, + block, + ) + .map(|res| warp::reply::json(&res).into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) } @@ -1168,10 +1269,12 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| { blocking_json_task(move || { - block_id - .root(&chain) - .map(api_types::RootData::from) - .map(api_types::GenericResponse::from) + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + Ok(api_types::GenericResponse::from(api_types::RootData::from( + block.canonical_root(), + )) + .add_execution_optimistic(execution_optimistic)) }) }); @@ -1182,10 +1285,12 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| { blocking_json_task(move || { - block_id - .blinded_block(&chain) - .map(|block| block.message().body().attestations().clone()) - .map(api_types::GenericResponse::from) + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + Ok( + api_types::GenericResponse::from(block.message().body().attestations().clone()) + .add_execution_optimistic(execution_optimistic), + ) }) }); @@ -1193,7 +1298,7 @@ pub fn serve<T: BeaconChainTypes>( * beacon/pool */ - let beacon_pool_path = eth1_v1 + let beacon_pool_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("pool")) .and(chain_filter.clone()); @@ -1519,7 +1624,7 @@ pub fn serve<T: BeaconChainTypes>( * config */ - let config_path = eth1_v1.and(warp::path("config")); + let config_path = eth_v1.and(warp::path("config")); // GET config/fork_schedule let get_config_fork_schedule = config_path @@ -1593,7 +1698,10 @@ pub fn serve<T: BeaconChainTypes>( chain: Arc<BeaconChain<T>>| { blocking_task(move || match accept_header { Some(api_types::Accept::Ssz) => { - let state = state_id.state(&chain)?; + // We can ignore the optimistic status for the "fork" since it's a + // specification constant that doesn't change across competing heads of the + // beacon chain. + let (state, _execution_optimistic) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1609,44 +1717,71 @@ pub fn serve<T: BeaconChainTypes>( )) }) } - _ => state_id.map_state(&chain, |state| { - let fork_name = state - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let res = fork_versioned_response(endpoint_version, fork_name, &state)?; - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }), + _ => state_id.map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let fork_name = state + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let res = execution_optimistic_fork_versioned_response( + endpoint_version, + fork_name, + execution_optimistic, + &state, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }, + ), }) }, ); // GET debug/beacon/heads - let get_debug_beacon_heads = eth1_v1 + let get_debug_beacon_heads = any_version .and(warp::path("debug")) .and(warp::path("beacon")) .and(warp::path("heads")) .and(warp::path::end()) .and(chain_filter.clone()) - .and_then(|chain: Arc<BeaconChain<T>>| { - blocking_json_task(move || { - let heads = chain - .heads() - .into_iter() - .map(|(root, slot)| api_types::ChainHeadData { slot, root }) - .collect::<Vec<_>>(); - Ok(api_types::GenericResponse::from(heads)) - }) - }); + .and_then( + |endpoint_version: EndpointVersion, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| { + let execution_optimistic = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_block(&root) + .ok() + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(api_types::ChainHeadData { + slot, + root, + execution_optimistic, + }) + }) + .collect::<Result<Vec<_>, warp::Rejection>>(); + Ok(api_types::GenericResponse::from(heads?)) + }) + }, + ); /* * node */ // GET node/identity - let get_node_identity = eth1_v1 + let get_node_identity = eth_v1 .and(warp::path("node")) .and(warp::path("identity")) .and(warp::path::end()) @@ -1684,7 +1819,7 @@ pub fn serve<T: BeaconChainTypes>( }); // GET node/version - let get_node_version = eth1_v1 + let get_node_version = eth_v1 .and(warp::path("node")) .and(warp::path("version")) .and(warp::path::end()) @@ -1697,7 +1832,7 @@ pub fn serve<T: BeaconChainTypes>( }); // GET node/syncing - let get_node_syncing = eth1_v1 + let get_node_syncing = eth_v1 .and(warp::path("node")) .and(warp::path("syncing")) .and(warp::path::end()) @@ -1726,7 +1861,7 @@ pub fn serve<T: BeaconChainTypes>( ); // GET node/health - let get_node_health = eth1_v1 + let get_node_health = eth_v1 .and(warp::path("node")) .and(warp::path("health")) .and(warp::path::end()) @@ -1751,7 +1886,7 @@ pub fn serve<T: BeaconChainTypes>( }); // GET node/peers/{peer_id} - let get_node_peers_by_id = eth1_v1 + let get_node_peers_by_id = eth_v1 .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::param::<String>()) @@ -1808,7 +1943,7 @@ pub fn serve<T: BeaconChainTypes>( ); // GET node/peers - let get_node_peers = eth1_v1 + let get_node_peers = eth_v1 .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::end()) @@ -1877,7 +2012,7 @@ pub fn serve<T: BeaconChainTypes>( ); // GET node/peer_count - let get_node_peer_count = eth1_v1 + let get_node_peer_count = eth_v1 .and(warp::path("node")) .and(warp::path("peer_count")) .and(warp::path::end()) @@ -1918,7 +2053,7 @@ pub fn serve<T: BeaconChainTypes>( */ // GET validator/duties/proposer/{epoch} - let get_validator_duties_proposer = eth1_v1 + let get_validator_duties_proposer = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("proposer")) @@ -2061,7 +2196,7 @@ pub fn serve<T: BeaconChainTypes>( ); // GET validator/attestation_data?slot,committee_index - let get_validator_attestation_data = eth1_v1 + let get_validator_attestation_data = eth_v1 .and(warp::path("validator")) .and(warp::path("attestation_data")) .and(warp::path::end()) @@ -2093,7 +2228,7 @@ pub fn serve<T: BeaconChainTypes>( ); // GET validator/aggregate_attestation?attestation_data_root,slot - let get_validator_aggregate_attestation = eth1_v1 + let get_validator_aggregate_attestation = eth_v1 .and(warp::path("validator")) .and(warp::path("aggregate_attestation")) .and(warp::path::end()) @@ -2125,7 +2260,7 @@ pub fn serve<T: BeaconChainTypes>( ); // POST validator/duties/attester/{epoch} - let post_validator_duties_attester = eth1_v1 + let post_validator_duties_attester = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("attester")) @@ -2147,7 +2282,7 @@ pub fn serve<T: BeaconChainTypes>( ); // POST validator/duties/sync - let post_validator_duties_sync = eth1_v1 + let post_validator_duties_sync = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("sync")) @@ -2169,7 +2304,7 @@ pub fn serve<T: BeaconChainTypes>( ); // GET validator/sync_committee_contribution - let get_validator_sync_committee_contribution = eth1_v1 + let get_validator_sync_committee_contribution = eth_v1 .and(warp::path("validator")) .and(warp::path("sync_committee_contribution")) .and(warp::path::end()) @@ -2192,7 +2327,7 @@ pub fn serve<T: BeaconChainTypes>( ); // POST validator/aggregate_and_proofs - let post_validator_aggregate_and_proofs = eth1_v1 + let post_validator_aggregate_and_proofs = eth_v1 .and(warp::path("validator")) .and(warp::path("aggregate_and_proofs")) .and(warp::path::end()) @@ -2292,7 +2427,7 @@ pub fn serve<T: BeaconChainTypes>( }, ); - let post_validator_contribution_and_proofs = eth1_v1 + let post_validator_contribution_and_proofs = eth_v1 .and(warp::path("validator")) .and(warp::path("contribution_and_proofs")) .and(warp::path::end()) @@ -2319,7 +2454,7 @@ pub fn serve<T: BeaconChainTypes>( ); // POST validator/beacon_committee_subscriptions - let post_validator_beacon_committee_subscriptions = eth1_v1 + let post_validator_beacon_committee_subscriptions = eth_v1 .and(warp::path("validator")) .and(warp::path("beacon_committee_subscriptions")) .and(warp::path::end()) @@ -2359,7 +2494,7 @@ pub fn serve<T: BeaconChainTypes>( ); // POST validator/prepare_beacon_proposer - let post_validator_prepare_beacon_proposer = eth1_v1 + let post_validator_prepare_beacon_proposer = eth_v1 .and(warp::path("validator")) .and(warp::path("prepare_beacon_proposer")) .and(warp::path::end()) @@ -2407,7 +2542,7 @@ pub fn serve<T: BeaconChainTypes>( ); // POST validator/register_validator - let post_validator_register_validator = eth1_v1 + let post_validator_register_validator = eth_v1 .and(warp::path("validator")) .and(warp::path("register_validator")) .and(warp::path::end()) @@ -2480,7 +2615,7 @@ pub fn serve<T: BeaconChainTypes>( }, ); // POST validator/sync_committee_subscriptions - let post_validator_sync_committee_subscriptions = eth1_v1 + let post_validator_sync_committee_subscriptions = eth_v1 .and(warp::path("validator")) .and(warp::path("sync_committee_subscriptions")) .and(warp::path::end()) @@ -2760,7 +2895,8 @@ pub fn serve<T: BeaconChainTypes>( .and(chain_filter.clone()) .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { blocking_task(move || { - let state = state_id.state(&chain)?; + // This debug endpoint provides no indication of optimistic status. + let (state, _execution_optimistic) = state_id.state(&chain)?; Response::builder() .status(200) .header("Content-Type", "application/ssz") @@ -2899,7 +3035,7 @@ pub fn serve<T: BeaconChainTypes>( ))) }); - let get_events = eth1_v1 + let get_events = eth_v1 .and(warp::path("events")) .and(warp::path::end()) .and(multi_key_query::<api_types::EventQuery>()) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index bddae55549..13788a07b2 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -55,10 +55,16 @@ pub fn proposer_duties<T: BeaconChainTypes>( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, _execution_status, _fork) = + let (proposers, dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, request_epoch, dependent_root, proposers) + convert_to_api_response( + chain, + request_epoch, + dependent_root, + execution_status.is_optimistic(), + proposers, + ) } else if request_epoch > current_epoch .safe_add(1) @@ -88,17 +94,18 @@ fn try_proposer_duties_from_cache<T: BeaconChainTypes>( request_epoch: Epoch, chain: &BeaconChain<T>, ) -> Result<Option<ApiDuties>, warp::reject::Rejection> { - let (head_slot, head_block_root, head_decision_root) = { - let head = chain.canonical_head.cached_head(); - let head_block_root = head.head_block_root(); - let decision_root = head - .snapshot - .beacon_state - .proposer_shuffling_decision_root(head_block_root) - .map_err(warp_utils::reject::beacon_state_error)?; - (head.head_slot(), head_block_root, decision_root) - }; - let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + let head_block_root = head.head_block_root(); + let head_decision_root = head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; + let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); + let execution_optimistic = chain + .is_optimistic_head_block(head_block) + .map_err(warp_utils::reject::beacon_chain_error)?; let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch @@ -120,7 +127,13 @@ fn try_proposer_duties_from_cache<T: BeaconChainTypes>( .get_epoch::<T::EthSpec>(dependent_root, request_epoch) .cloned() .map(|indices| { - convert_to_api_response(chain, request_epoch, dependent_root, indices.to_vec()) + convert_to_api_response( + chain, + request_epoch, + dependent_root, + execution_optimistic, + indices.to_vec(), + ) }) .transpose() } @@ -139,7 +152,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>( current_epoch: Epoch, chain: &BeaconChain<T>, ) -> Result<ApiDuties, warp::reject::Rejection> { - let (indices, dependent_root, _execution_status, fork) = + let (indices, dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; @@ -151,7 +164,13 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, current_epoch, dependent_root, indices) + convert_to_api_response( + chain, + current_epoch, + dependent_root, + execution_status.is_optimistic(), + indices, + ) } /// Compute some proposer duties by reading a `BeaconState` from disk, completely ignoring the @@ -162,31 +181,37 @@ fn compute_historic_proposer_duties<T: BeaconChainTypes>( ) -> Result<ApiDuties, warp::reject::Rejection> { // If the head is quite old then it might still be relevant for a historical request. // - // Use the `with_head` function to read & clone in a single call to avoid race conditions. - let state_opt = chain - .with_head(|head| { - if head.beacon_state.current_epoch() <= epoch { - Ok(Some(( - head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - ))) - } else { - Ok(None) - } - }) - .map_err(warp_utils::reject::beacon_chain_error)?; - - let state = if let Some((state_root, mut state)) = state_opt { - // If we've loaded the head state it might be from a previous epoch, ensure it's in a - // suitable epoch. - ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) + // Avoid holding the `cached_head` longer than necessary. + let state_opt = { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() .map_err(warp_utils::reject::beacon_chain_error)?; - state - } else { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + let head = &cached_head.snapshot; + + if head.beacon_state.current_epoch() <= epoch { + Some(( + head.beacon_state_root(), + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + execution_status.is_optimistic(), + )) + } else { + None + } }; + let (state, execution_optimistic) = + if let Some((state_root, mut state, execution_optimistic)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) + .map_err(warp_utils::reject::beacon_chain_error)?; + (state, execution_optimistic) + } else { + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + }; + // Ensure the state lookup was correct. if state.current_epoch() != epoch { return Err(warp_utils::reject::custom_server_error(format!( @@ -208,7 +233,7 @@ fn compute_historic_proposer_duties<T: BeaconChainTypes>( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, epoch, dependent_root, indices) + convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) } /// Converts the internal representation of proposer duties into one that is compatible with the @@ -217,6 +242,7 @@ fn convert_to_api_response<T: BeaconChainTypes>( chain: &BeaconChain<T>, epoch: Epoch, dependent_root: Hash256, + execution_optimistic: bool, indices: Vec<usize>, ) -> Result<ApiDuties, warp::reject::Rejection> { let index_to_pubkey_map = chain @@ -251,6 +277,7 @@ fn convert_to_api_response<T: BeaconChainTypes>( } else { Ok(api_types::DutiesResponse { dependent_root, + execution_optimistic: Some(execution_optimistic), data: proposer_data, }) } diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 8604c91899..af47c242d6 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -1,14 +1,17 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::ExecutionOptimistic; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; +use std::fmt; use std::str::FromStr; -use types::{BeaconState, EthSpec, Fork, Hash256, Slot}; +use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; /// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading /// states or parts of states from the database. -pub struct StateId(CoreStateId); +#[derive(Debug)] +pub struct StateId(pub CoreStateId); impl StateId { - pub fn slot(slot: Slot) -> Self { + pub fn from_slot(slot: Slot) -> Self { Self(CoreStateId::Slot(slot)) } @@ -16,54 +19,128 @@ impl StateId { pub fn root<T: BeaconChainTypes>( &self, chain: &BeaconChain<T>, - ) -> Result<Hash256, warp::Rejection> { - let slot = match &self.0 { - CoreStateId::Head => return Ok(chain.canonical_head.cached_head().head_state_root()), - CoreStateId::Genesis => return Ok(chain.genesis_state_root), - CoreStateId::Finalized => chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - CoreStateId::Justified => chain - .canonical_head - .cached_head() - .justified_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - CoreStateId::Slot(slot) => *slot, - CoreStateId::Root(root) => return Ok(*root), + ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { + let (slot, execution_optimistic) = match &self.0 { + CoreStateId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok(( + cached_head.head_state_root(), + execution_status.is_optimistic(), + )); + } + CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), + CoreStateId::Finalized => { + let finalized_checkpoint = + chain.canonical_head.cached_head().finalized_checkpoint(); + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)? + } + CoreStateId::Justified => { + let justified_checkpoint = + chain.canonical_head.cached_head().justified_checkpoint(); + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)? + } + CoreStateId::Slot(slot) => ( + *slot, + chain + .is_optimistic_head() + .map_err(warp_utils::reject::beacon_chain_error)?, + ), + CoreStateId::Root(root) => { + if let Some(hot_summary) = chain + .store + .load_hot_state_summary(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let execution_optimistic = chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_block_no_fallback(&hot_summary.latest_block_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic)); + } else if let Some(_cold_state_slot) = chain + .store + .load_cold_state_slot(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let finalized_root = fork_choice + .cached_fork_choice_view() + .finalized_checkpoint + .root; + let execution_optimistic = fork_choice + .is_optimistic_block_no_fallback(&finalized_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic)); + } else { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon state for state root {}", + root + ))); + } + } }; - chain + let root = chain .state_root_at_slot(slot) .map_err(warp_utils::reject::beacon_chain_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) - }) + })?; + + Ok((root, execution_optimistic)) } /// Return the `fork` field of the state identified by `self`. + /// Also returns the `execution_optimistic` value of the state. + pub fn fork_and_execution_optimistic<T: BeaconChainTypes>( + &self, + chain: &BeaconChain<T>, + ) -> Result<(Fork, bool), warp::Rejection> { + self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| { + Ok((state.fork(), execution_optimistic)) + }) + } + + /// Convenience function to compute `fork` when `execution_optimistic` isn't desired. pub fn fork<T: BeaconChainTypes>( &self, chain: &BeaconChain<T>, ) -> Result<Fork, warp::Rejection> { - self.map_state(chain, |state| Ok(state.fork())) + self.fork_and_execution_optimistic(chain) + .map(|(fork, _)| fork) } /// Return the `BeaconState` identified by `self`. pub fn state<T: BeaconChainTypes>( &self, chain: &BeaconChain<T>, - ) -> Result<BeaconState<T::EthSpec>, warp::Rejection> { - let (state_root, slot_opt) = match &self.0 { - CoreStateId::Head => return Ok(chain.head_beacon_state_cloned()), + ) -> Result<(BeaconState<T::EthSpec>, ExecutionOptimistic), warp::Rejection> { + let ((state_root, execution_optimistic), slot_opt) = match &self.0 { + CoreStateId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok(( + cached_head + .snapshot + .beacon_state + .clone_with_only_committee_caches(), + execution_status.is_optimistic(), + )); + } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), _ => (self.root(chain)?, None), }; - chain + let state = chain .get_state(&state_root, slot_opt) .map_err(warp_utils::reject::beacon_chain_error) .and_then(|opt| { @@ -73,13 +150,17 @@ impl StateId { state_root )) }) - }) + })?; + + Ok((state, execution_optimistic)) } + /* /// Map a function across the `BeaconState` identified by `self`. /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. + #[allow(dead_code)] pub fn map_state<T: BeaconChainTypes, F, U>( &self, chain: &BeaconChain<T>, @@ -95,6 +176,36 @@ impl StateId { _ => func(&self.state(chain)?), } } + */ + + /// Functions the same as `map_state` but additionally computes the value of + /// `execution_optimistic` of the state identified by `self`. + /// + /// This is to avoid re-instantiating `state` unnecessarily. + pub fn map_state_and_execution_optimistic<T: BeaconChainTypes, F, U>( + &self, + chain: &BeaconChain<T>, + func: F, + ) -> Result<U, warp::Rejection> + where + F: Fn(&BeaconState<T::EthSpec>, bool) -> Result<U, warp::Rejection>, + { + let (state, execution_optimistic) = match &self.0 { + CoreStateId::Head => { + let (head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return func( + &head.snapshot.beacon_state, + execution_status.is_optimistic(), + ); + } + _ => self.state(chain)?, + }; + + func(&state, execution_optimistic) + } } impl FromStr for StateId { @@ -104,3 +215,35 @@ impl FromStr for StateId { CoreStateId::from_str(s).map(Self) } } + +impl fmt::Display for StateId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// Returns the first slot of the checkpoint's `epoch` and the execution status of the checkpoint's +/// `root`. +pub fn checkpoint_slot_and_execution_optimistic<T: BeaconChainTypes>( + chain: &BeaconChain<T>, + checkpoint: Checkpoint, +) -> Result<(Slot, ExecutionOptimistic), warp::reject::Rejection> { + let slot = checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()); + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let finalized_checkpoint = fork_choice.cached_fork_choice_view().finalized_checkpoint; + + // If the checkpoint is pre-finalization, just use the optimistic status of the finalized + // block. + let root = if checkpoint.epoch < finalized_checkpoint.epoch { + &finalized_checkpoint.root + } else { + &checkpoint.root + }; + + let execution_optimistic = fork_choice + .is_optimistic_block_no_fallback(root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + + Ok((slot, execution_optimistic)) +} diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 3ebc3c4ec8..54a3e075d3 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -22,7 +22,7 @@ use types::{ }; /// The struct that is returned to the requesting HTTP client. -type SyncDuties = api_types::GenericResponse<Vec<SyncDuty>>; +type SyncDuties = api_types::ExecutionOptimisticResponse<Vec<SyncDuty>>; /// Handles a request from the HTTP API for sync committee duties. pub fn sync_committee_duties<T: BeaconChainTypes>( @@ -34,14 +34,20 @@ pub fn sync_committee_duties<T: BeaconChainTypes>( altair_fork_epoch } else { // Empty response for networks with Altair disabled. - return Ok(convert_to_response(vec![])); + return Ok(convert_to_response(vec![], false)); }; + // Even when computing duties from state, any block roots pulled using the request epoch are + // still dependent on the head. So using `is_optimistic_head` is fine for both cases. + let execution_optimistic = chain + .is_optimistic_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + // Try using the head's sync committees to satisfy the request. This should be sufficient for // the vast majority of requests. Rather than checking if we think the request will succeed in a // way prone to data races, we attempt the request immediately and check the error code. match chain.sync_committee_duties_from_head(request_epoch, request_indices) { - Ok(duties) => return Ok(convert_to_response(duties)), + Ok(duties) => return Ok(convert_to_response(duties, execution_optimistic)), Err(BeaconChainError::SyncDutiesError(BeaconStateError::SyncCommitteeNotKnown { .. })) @@ -60,7 +66,7 @@ pub fn sync_committee_duties<T: BeaconChainTypes>( )), e => warp_utils::reject::beacon_chain_error(e), })?; - Ok(convert_to_response(duties)) + Ok(convert_to_response(duties, execution_optimistic)) } /// Slow path for duties: load a state and use it to compute the duties. @@ -117,8 +123,9 @@ fn duties_from_state_load<T: BeaconChainTypes>( } } -fn convert_to_response(duties: Vec<Option<SyncDuty>>) -> SyncDuties { +fn convert_to_response(duties: Vec<Option<SyncDuty>>, execution_optimistic: bool) -> SyncDuties { api_types::GenericResponse::from(duties.into_iter().flatten().collect::<Vec<_>>()) + .add_execution_optimistic(execution_optimistic) } /// Receive sync committee duties, storing them in the pools & broadcasting them. diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 48dfc17ffa..917e85e649 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -16,7 +16,10 @@ fn end_of_epoch_state<T: BeaconChainTypes>( chain: &BeaconChain<T>, ) -> Result<BeaconState<T::EthSpec>, warp::reject::Rejection> { let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); - StateId::slot(target_slot).state(chain) + // The execution status is not returned, any functions which rely upon this method might return + // optimistic information without explicitly declaring so. + let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?; + Ok(state) } /// Generate an `EpochProcessingSummary` for `state`. diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 854ef0c858..87ba3a4663 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,4 +1,6 @@ -use crate::api_types::{EndpointVersion, ForkVersionedResponse}; +use crate::api_types::{ + EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse, +}; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; use types::{ForkName, InconsistentFork}; @@ -25,6 +27,26 @@ pub fn fork_versioned_response<T: Serialize>( }) } +pub fn execution_optimistic_fork_versioned_response<T: Serialize>( + endpoint_version: EndpointVersion, + fork_name: ForkName, + execution_optimistic: bool, + data: T, +) -> Result<ExecutionOptimisticForkVersionedResponse<T>, warp::reject::Rejection> { + let fork_name = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + Some(fork_name) + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(ExecutionOptimisticForkVersionedResponse { + version: fork_name, + execution_optimistic: Some(execution_optimistic), + data, + }) +} + /// Add the `Eth-Consensus-Version` header to a response. pub fn add_consensus_version_header<T: Reply>(reply: T, fork_name: ForkName) -> WithHeader<T> { reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b57a87dfca..37c267fd46 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -8,13 +8,15 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::*, + types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; +use http_api::{BlockId, StateId}; use lighthouse_network::{Enr, EnrExt, PeerId}; use network::NetworkMessage; +use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_slot_processing; @@ -25,8 +27,8 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - AggregateSignature, BeaconState, BitList, Domain, EthSpec, Hash256, Keypair, MainnetEthSpec, - RelativeEpoch, SelectionProof, SignedRoot, Slot, + AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, + MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; type E = MainnetEthSpec; @@ -74,6 +76,19 @@ impl ApiTester { Self::new_from_spec(spec).await } + pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + // Set whether the chain has undergone each hard fork. + if altair { + spec.altair_fork_epoch = Some(Epoch::new(0)); + } + if bellatrix { + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + } + Self::new_from_spec(spec).await + } + pub async fn new_from_spec(spec: ChainSpec) -> Self { let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) @@ -325,99 +340,43 @@ impl ApiTester { fn interesting_state_ids(&self) -> Vec<StateId> { let mut ids = vec![ - StateId::Head, - StateId::Genesis, - StateId::Finalized, - StateId::Justified, - StateId::Slot(Slot::new(0)), - StateId::Slot(Slot::new(32)), - StateId::Slot(Slot::from(SKIPPED_SLOTS[0])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[1])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[2])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), - StateId::Root(Hash256::zero()), + StateId(CoreStateId::Head), + StateId(CoreStateId::Genesis), + StateId(CoreStateId::Finalized), + StateId(CoreStateId::Justified), + StateId(CoreStateId::Slot(Slot::new(0))), + StateId(CoreStateId::Slot(Slot::new(32))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[0]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[1]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[2]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[3]))), + StateId(CoreStateId::Root(Hash256::zero())), ]; - ids.push(StateId::Root( + ids.push(StateId(CoreStateId::Root( self.chain.canonical_head.cached_head().head_state_root(), - )); + ))); ids } fn interesting_block_ids(&self) -> Vec<BlockId> { let mut ids = vec![ - BlockId::Head, - BlockId::Genesis, - BlockId::Finalized, - BlockId::Justified, - BlockId::Slot(Slot::new(0)), - BlockId::Slot(Slot::new(32)), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[0])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[1])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[2])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), - BlockId::Root(Hash256::zero()), + BlockId(CoreBlockId::Head), + BlockId(CoreBlockId::Genesis), + BlockId(CoreBlockId::Finalized), + BlockId(CoreBlockId::Justified), + BlockId(CoreBlockId::Slot(Slot::new(0))), + BlockId(CoreBlockId::Slot(Slot::new(32))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[0]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[1]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[2]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[3]))), + BlockId(CoreBlockId::Root(Hash256::zero())), ]; - ids.push(BlockId::Root( + ids.push(BlockId(CoreBlockId::Root( self.chain.canonical_head.cached_head().head_block_root(), - )); + ))); ids } - - fn get_state(&self, state_id: StateId) -> Option<BeaconState<E>> { - match state_id { - StateId::Head => Some( - self.chain - .head_snapshot() - .beacon_state - .clone_with_only_committee_caches(), - ), - StateId::Genesis => self - .chain - .get_state(&self.chain.genesis_state_root, None) - .unwrap(), - StateId::Finalized => { - let finalized_slot = self - .chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - let root = self - .chain - .state_root_at_slot(finalized_slot) - .unwrap() - .unwrap(); - - self.chain.get_state(&root, Some(finalized_slot)).unwrap() - } - StateId::Justified => { - let justified_slot = self - .chain - .canonical_head - .cached_head() - .justified_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - let root = self - .chain - .state_root_at_slot(justified_slot) - .unwrap() - .unwrap(); - - self.chain.get_state(&root, Some(justified_slot)).unwrap() - } - StateId::Slot(slot) => { - let root = self.chain.state_root_at_slot(slot).unwrap().unwrap(); - - self.chain.get_state(&root, Some(slot)).unwrap() - } - StateId::Root(root) => self.chain.get_state(&root, None).unwrap(), - } - } - pub async fn test_beacon_genesis(self) -> Self { let result = self.client.get_beacon_genesis().await.unwrap().data; @@ -437,39 +396,15 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_root(state_id) + .get_beacon_states_root(state_id.0) .await .unwrap() .map(|res| res.data.root); - let expected = match state_id { - StateId::Head => Some(self.chain.canonical_head.cached_head().head_state_root()), - StateId::Genesis => Some(self.chain.genesis_state_root), - StateId::Finalized => { - let finalized_slot = self - .chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - self.chain.state_root_at_slot(finalized_slot).unwrap() - } - StateId::Justified => { - let justified_slot = self - .chain - .canonical_head - .cached_head() - .justified_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - self.chain.state_root_at_slot(justified_slot).unwrap() - } - StateId::Slot(slot) => self.chain.state_root_at_slot(slot).unwrap(), - StateId::Root(root) => Some(root), - }; + let expected = state_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); assert_eq!(result, expected, "{:?}", state_id); } @@ -481,12 +416,12 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_fork(state_id) + .get_beacon_states_fork(state_id.0) .await .unwrap() .map(|res| res.data); - let expected = self.get_state(state_id).map(|state| state.fork()); + let expected = state_id.fork(&self.chain).ok(); assert_eq!(result, expected, "{:?}", state_id); } @@ -498,18 +433,20 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_finality_checkpoints(state_id) + .get_beacon_states_finality_checkpoints(state_id.0) .await .unwrap() .map(|res| res.data); - let expected = self - .get_state(state_id) - .map(|state| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }); + let expected = + state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }); assert_eq!(result, expected, "{:?}", state_id); } @@ -520,9 +457,9 @@ impl ApiTester { pub async fn test_beacon_states_validator_balances(self) -> Self { for state_id in self.interesting_state_ids() { for validator_indices in self.interesting_validator_indices() { - let state_opt = self.get_state(state_id); + let state_opt = state_id.state(&self.chain).ok(); let validators: Vec<Validator> = match state_opt.as_ref() { - Some(state) => state.validators().clone().into(), + Some((state, _execution_optimistic)) => state.validators().clone().into(), None => vec![], }; let validator_index_ids = validator_indices @@ -545,7 +482,7 @@ impl ApiTester { let result_index_ids = self .client .get_beacon_states_validator_balances( - state_id, + state_id.0, Some(validator_index_ids.as_slice()), ) .await @@ -554,14 +491,14 @@ impl ApiTester { let result_pubkey_ids = self .client .get_beacon_states_validator_balances( - state_id, + state_id.0, Some(validator_pubkey_ids.as_slice()), ) .await .unwrap() .map(|res| res.data); - let expected = state_opt.map(|state| { + let expected = state_opt.map(|(state, _execution_optimistic)| { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { @@ -588,7 +525,10 @@ impl ApiTester { for state_id in self.interesting_state_ids() { for statuses in self.interesting_validator_statuses() { for validator_indices in self.interesting_validator_indices() { - let state_opt = self.get_state(state_id); + let state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let validators: Vec<Validator> = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -613,7 +553,7 @@ impl ApiTester { let result_index_ids = self .client .get_beacon_states_validators( - state_id, + state_id.0, Some(validator_index_ids.as_slice()), None, ) @@ -624,7 +564,7 @@ impl ApiTester { let result_pubkey_ids = self .client .get_beacon_states_validators( - state_id, + state_id.0, Some(validator_pubkey_ids.as_slice()), None, ) @@ -675,7 +615,10 @@ impl ApiTester { pub async fn test_beacon_states_validator_id(self) -> Self { for state_id in self.interesting_state_ids() { - let state_opt = self.get_state(state_id); + let state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let validators = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -690,7 +633,7 @@ impl ApiTester { for validator_id in validator_ids { let result = self .client - .get_beacon_states_validator_id(state_id, validator_id) + .get_beacon_states_validator_id(state_id.0, validator_id) .await .unwrap() .map(|res| res.data); @@ -727,12 +670,15 @@ impl ApiTester { pub async fn test_beacon_states_committees(self) -> Self { for state_id in self.interesting_state_ids() { - let mut state_opt = self.get_state(state_id); + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let results = self .client - .get_beacon_states_committees(state_id, None, None, epoch_opt) + .get_beacon_states_committees(state_id.0, None, None, epoch_opt) .await .unwrap() .map(|res| res.data); @@ -769,37 +715,6 @@ impl ApiTester { self } - fn get_block_root(&self, block_id: BlockId) -> Option<Hash256> { - match block_id { - BlockId::Head => Some(self.chain.canonical_head.cached_head().head_block_root()), - BlockId::Genesis => Some(self.chain.genesis_block_root), - BlockId::Finalized => Some( - self.chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .root, - ), - BlockId::Justified => Some( - self.chain - .canonical_head - .cached_head() - .justified_checkpoint() - .root, - ), - BlockId::Slot(slot) => self - .chain - .block_root_at_slot(slot, WhenSlotSkipped::None) - .unwrap(), - BlockId::Root(root) => Some(root), - } - } - - async fn get_block(&self, block_id: BlockId) -> Option<SignedBeaconBlock<E>> { - let root = self.get_block_root(block_id)?; - self.chain.get_block(&root).await.unwrap() - } - pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -877,14 +792,17 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_headers_block_id(block_id) + .get_beacon_headers_block_id(block_id.0) .await .unwrap() .map(|res| res.data); - let block_root_opt = self.get_block_root(block_id); + let block_root_opt = block_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if block_root_opt.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -892,11 +810,11 @@ impl ApiTester { } } - let block_opt = if let Some(root) = block_root_opt { - self.chain.get_block(&root).await.unwrap() - } else { - None - }; + let block_opt = block_id + .full_block(&self.chain) + .await + .ok() + .map(|(block, _execution_optimistic)| block); if block_opt.is_none() && result.is_none() { continue; @@ -934,13 +852,16 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_blocks_root(block_id) + .get_beacon_blocks_root(block_id.0) .await .unwrap() .map(|res| res.data.root); - let expected = self.get_block_root(block_id); - if let BlockId::Slot(slot) = block_id { + let expected = block_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -982,9 +903,13 @@ impl ApiTester { pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { - let expected = self.get_block(block_id).await; + let expected = block_id + .full_block(&self.chain) + .await + .ok() + .map(|(block, _execution_optimistic)| block); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -993,10 +918,10 @@ impl ApiTester { } // Check the JSON endpoint. - let json_result = self.client.get_beacon_blocks(block_id).await.unwrap(); + let json_result = self.client.get_beacon_blocks(block_id.0).await.unwrap(); if let (Some(json), Some(expected)) = (&json_result, &expected) { - assert_eq!(json.data, *expected, "{:?}", block_id); + assert_eq!(&json.data, expected.as_ref(), "{:?}", block_id); assert_eq!( json.version, Some(expected.fork_name(&self.chain.spec).unwrap()) @@ -1009,23 +934,28 @@ impl ApiTester { // Check the SSZ endpoint. let ssz_result = self .client - .get_beacon_blocks_ssz(block_id, &self.chain.spec) + .get_beacon_blocks_ssz(block_id.0, &self.chain.spec) .await .unwrap(); - assert_eq!(ssz_result, expected, "{:?}", block_id); + assert_eq!( + ssz_result.as_ref(), + expected.as_ref().map(|b| b.as_ref()), + "{:?}", + block_id + ); // Check that the legacy v1 API still works but doesn't return a version field. - let v1_result = self.client.get_beacon_blocks_v1(block_id).await.unwrap(); + let v1_result = self.client.get_beacon_blocks_v1(block_id.0).await.unwrap(); if let (Some(v1_result), Some(expected)) = (&v1_result, &expected) { assert_eq!(v1_result.version, None); - assert_eq!(v1_result.data, *expected); + assert_eq!(&v1_result.data, expected.as_ref()); } else { assert_eq!(v1_result, None); assert_eq!(expected, None); } // Check that version headers are provided. - let url = self.client.get_beacon_blocks_path(block_id).unwrap(); + let url = self.client.get_beacon_blocks_path(block_id.0).unwrap(); let builders: Vec<fn(RequestBuilder) -> RequestBuilder> = vec![ |b| b, @@ -1060,17 +990,18 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_blocks_attestations(block_id) + .get_beacon_blocks_attestations(block_id.0) .await .unwrap() .map(|res| res.data); - let expected = self - .get_block(block_id) - .await - .map(|block| block.message().body().attestations().clone().into()); + let expected = block_id.full_block(&self.chain).await.ok().map( + |(block, _execution_optimistic)| { + block.message().body().attestations().clone().into() + }, + ); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -1473,9 +1404,16 @@ impl ApiTester { pub async fn test_get_debug_beacon_states(self) -> Self { for state_id in self.interesting_state_ids() { - let result_json = self.client.get_debug_beacon_states(state_id).await.unwrap(); + let result_json = self + .client + .get_debug_beacon_states(state_id.0) + .await + .unwrap(); - let mut expected = self.get_state(state_id); + let mut expected = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { @@ -1492,7 +1430,7 @@ impl ApiTester { // Check SSZ API. let result_ssz = self .client - .get_debug_beacon_states_ssz(state_id, &self.chain.spec) + .get_debug_beacon_states_ssz(state_id.0, &self.chain.spec) .await .unwrap(); assert_eq!(result_ssz, expected, "{:?}", state_id); @@ -1500,7 +1438,7 @@ impl ApiTester { // Check legacy v1 API. let result_v1 = self .client - .get_debug_beacon_states_v1(state_id) + .get_debug_beacon_states_v1(state_id.0) .await .unwrap(); @@ -1513,7 +1451,10 @@ impl ApiTester { } // Check that version headers are provided. - let url = self.client.get_debug_beacon_states_path(state_id).unwrap(); + let url = self + .client + .get_debug_beacon_states_path(state_id.0) + .unwrap(); let builders: Vec<fn(RequestBuilder) -> RequestBuilder> = vec![|b| b, |b| b.accept(Accept::Ssz)]; @@ -1791,6 +1732,7 @@ impl ApiTester { let expected = DutiesResponse { data: expected_duties, + execution_optimistic: Some(false), dependent_root, }; @@ -2391,11 +2333,14 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_lighthouse_beacon_states_ssz(&state_id, &self.chain.spec) + .get_lighthouse_beacon_states_ssz(&state_id.0, &self.chain.spec) .await .unwrap(); - let mut expected = self.get_state(state_id); + let mut expected = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); expected.as_mut().map(|state| state.drop_all_caches()); assert_eq!(result, expected, "{:?}", state_id); @@ -2562,6 +2507,7 @@ impl ApiTester { let expected_block = EventKind::Block(SseBlock { block: block_root, slot: next_slot, + execution_optimistic: false, }); let expected_head = EventKind::Head(SseHead { @@ -2575,6 +2521,7 @@ impl ApiTester { .unwrap() .unwrap(), epoch_transition: true, + execution_optimistic: false, }); let finalized_block_root = self @@ -2593,6 +2540,7 @@ impl ApiTester { block: finalized_block_root, state: finalized_state_root, epoch: Epoch::new(3), + execution_optimistic: false, }); self.client @@ -2621,6 +2569,7 @@ impl ApiTester { new_head_block: self.reorg_block.canonical_root(), new_head_state: self.reorg_block.state_root(), epoch: self.next_block.slot().epoch(E::slots_per_epoch()), + execution_optimistic: false, }); self.client @@ -2687,6 +2636,7 @@ impl ApiTester { let expected_block = EventKind::Block(SseBlock { block: block_root, slot: next_slot, + execution_optimistic: false, }); let expected_head = EventKind::Head(SseHead { @@ -2696,6 +2646,7 @@ impl ApiTester { current_duty_dependent_root: self.chain.genesis_block_root, previous_duty_dependent_root: self.chain.genesis_block_root, epoch_transition: false, + execution_optimistic: false, }); self.client @@ -2708,6 +2659,40 @@ impl ApiTester { self } + + pub async fn test_check_optimistic_responses(&mut self) { + // Check responses are not optimistic. + let result = self + .client + .get_beacon_headers_block_id(CoreBlockId::Head) + .await + .unwrap() + .unwrap(); + + assert_eq!(result.execution_optimistic, Some(false)); + + // Change head to be optimistic. + self.chain + .canonical_head + .fork_choice_write_lock() + .proto_array_mut() + .core_proto_array_mut() + .nodes + .last_mut() + .map(|head_node| { + head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) + }); + + // Check responses are now optimistic. + let result = self + .client + .get_beacon_headers_block_id(CoreBlockId::Head) + .await + .unwrap() + .unwrap(); + + assert_eq!(result.execution_optimistic, Some(true)); + } } async fn poll_events<S: Stream<Item = Result<EventKind<T>, eth2::Error>> + Unpin, T: EthSpec>( @@ -3105,3 +3090,11 @@ async fn lighthouse_endpoints() { .test_post_lighthouse_liveness() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn optimistic_responses() { + ApiTester::new_with_hard_forks(true, true) + .await + .test_check_optimistic_responses() + .await; +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index e66cee6fde..c4b4a64a05 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1317,7 +1317,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold> } /// Load a frozen state's slot, given its root. - fn load_cold_state_slot(&self, state_root: &Hash256) -> Result<Option<Slot>, Error> { + pub fn load_cold_state_slot(&self, state_root: &Hash256) -> Result<Option<Slot>, Error> { Ok(self .cold_db .get(state_root)? @@ -1583,7 +1583,7 @@ fn no_state_root_iter() -> Option<std::iter::Empty<Result<(Hash256, Slot), Error #[derive(Debug, Clone, Copy, Default, Encode, Decode)] pub struct HotStateSummary { slot: Slot, - latest_block_root: Hash256, + pub latest_block_root: Hash256, epoch_boundary_state_root: Hash256, } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 2ee3618386..1025959165 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -332,7 +332,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_root( &self, state_id: StateId, - ) -> Result<Option<GenericResponse<RootData>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<RootData>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -351,7 +351,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_fork( &self, state_id: StateId, - ) -> Result<Option<GenericResponse<Fork>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<Fork>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -370,7 +370,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_finality_checkpoints( &self, state_id: StateId, - ) -> Result<Option<GenericResponse<FinalityCheckpointsData>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<FinalityCheckpointsData>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -390,7 +390,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ids: Option<&[ValidatorId]>, - ) -> Result<Option<GenericResponse<Vec<ValidatorBalanceData>>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<Vec<ValidatorBalanceData>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -420,7 +420,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, - ) -> Result<Option<GenericResponse<Vec<ValidatorData>>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<Vec<ValidatorData>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -460,7 +460,7 @@ impl BeaconNodeHttpClient { slot: Option<Slot>, index: Option<u64>, epoch: Option<Epoch>, - ) -> Result<Option<GenericResponse<Vec<CommitteeData>>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<Vec<CommitteeData>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -493,7 +493,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option<Epoch>, - ) -> Result<GenericResponse<SyncCommitteeByValidatorIndices>, Error> { + ) -> Result<ExecutionOptimisticResponse<SyncCommitteeByValidatorIndices>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -518,7 +518,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, validator_id: &ValidatorId, - ) -> Result<Option<GenericResponse<ValidatorData>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<ValidatorData>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -539,7 +539,7 @@ impl BeaconNodeHttpClient { &self, slot: Option<Slot>, parent_root: Option<Hash256>, - ) -> Result<Option<GenericResponse<Vec<BlockHeaderData>>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<Vec<BlockHeaderData>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -566,7 +566,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_headers_block_id( &self, block_id: BlockId, - ) -> Result<Option<GenericResponse<BlockHeaderData>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<BlockHeaderData>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -635,7 +635,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks<T: EthSpec>( &self, block_id: BlockId, - ) -> Result<Option<ForkVersionedResponse<SignedBeaconBlock<T>>>, Error> { + ) -> Result<Option<ExecutionOptimisticForkVersionedResponse<SignedBeaconBlock<T>>>, Error> { let path = self.get_beacon_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -644,20 +644,31 @@ impl BeaconNodeHttpClient { // If present, use the fork provided in the headers to decode the block. Gracefully handle // missing and malformed fork names by falling back to regular deserialisation. - let (block, version) = match response.fork_name_from_header() { + let (block, version, execution_optimistic) = match response.fork_name_from_header() { Ok(Some(fork_name)) => { - map_fork_name_with!(fork_name, SignedBeaconBlock, { - let ForkVersionedResponse { version, data } = response.json().await?; - (data, version) - }) + let (data, (version, execution_optimistic)) = + map_fork_name_with!(fork_name, SignedBeaconBlock, { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, (version, execution_optimistic)) + }); + (data, version, execution_optimistic) } Ok(None) | Err(_) => { - let ForkVersionedResponse { version, data } = response.json().await?; - (data, version) + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, version, execution_optimistic) } }; - Ok(Some(ForkVersionedResponse { + Ok(Some(ExecutionOptimisticForkVersionedResponse { version, + execution_optimistic, data: block, })) } @@ -702,7 +713,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_root( &self, block_id: BlockId, - ) -> Result<Option<GenericResponse<RootData>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<RootData>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -721,7 +732,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations<T: EthSpec>( &self, block_id: BlockId, - ) -> Result<Option<GenericResponse<Vec<Attestation<T>>>>, Error> { + ) -> Result<Option<ExecutionOptimisticResponse<Vec<Attestation<T>>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1123,7 +1134,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states<T: EthSpec>( &self, state_id: StateId, - ) -> Result<Option<ForkVersionedResponse<BeaconState<T>>>, Error> { + ) -> Result<Option<ExecutionOptimisticForkVersionedResponse<BeaconState<T>>>, Error> { let path = self.get_debug_beacon_states_path(state_id)?; self.get_opt(path).await } @@ -1132,7 +1143,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states_v1<T: EthSpec>( &self, state_id: StateId, - ) -> Result<Option<ForkVersionedResponse<BeaconState<T>>>, Error> { + ) -> Result<Option<ExecutionOptimisticForkVersionedResponse<BeaconState<T>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1160,9 +1171,24 @@ impl BeaconNodeHttpClient { .transpose() } - /// `GET debug/beacon/heads` + /// `GET v2/debug/beacon/heads` pub async fn get_debug_beacon_heads( &self, + ) -> Result<GenericResponse<Vec<ChainHeadData>>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("heads"); + + self.get(path).await + } + + /// `GET v1/debug/beacon/heads` (LEGACY) + pub async fn get_debug_beacon_heads_v1( + &self, ) -> Result<GenericResponse<Vec<ChainHeadData>>, Error> { let mut path = self.eth_path(V1)?; @@ -1494,7 +1520,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, indices: &[u64], - ) -> Result<GenericResponse<Vec<SyncDuty>>, Error> { + ) -> Result<ExecutionOptimisticResponse<Vec<SyncDuty>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 8ef3582268..c78e2c6919 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -189,6 +189,14 @@ impl fmt::Display for StateId { #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] pub struct DutiesResponse<T: Serialize + serde::de::DeserializeOwned> { pub dependent_root: Hash256, + pub execution_optimistic: Option<bool>, + pub data: T, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] +pub struct ExecutionOptimisticResponse<T: Serialize + serde::de::DeserializeOwned> { + pub execution_optimistic: Option<bool>, pub data: T, } @@ -204,6 +212,18 @@ impl<T: Serialize + serde::de::DeserializeOwned> From<T> for GenericResponse<T> } } +impl<T: Serialize + serde::de::DeserializeOwned> GenericResponse<T> { + pub fn add_execution_optimistic( + self, + execution_optimistic: bool, + ) -> ExecutionOptimisticResponse<T> { + ExecutionOptimisticResponse { + execution_optimistic: Some(execution_optimistic), + data: self.data, + } + } +} + #[derive(Debug, PartialEq, Clone, Serialize)] #[serde(bound = "T: Serialize")] pub struct GenericResponseRef<'a, T: Serialize> { @@ -216,6 +236,14 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct ExecutionOptimisticForkVersionedResponse<T> { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option<ForkName>, + pub execution_optimistic: Option<bool>, + pub data: T, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct ForkVersionedResponse<T> { #[serde(skip_serializing_if = "Option::is_none")] @@ -495,6 +523,8 @@ pub struct DepositContractData { pub struct ChainHeadData { pub slot: Slot, pub root: Hash256, + #[serde(skip_serializing_if = "Option::is_none")] + pub execution_optimistic: Option<bool>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -794,6 +824,7 @@ pub struct PeerCount { pub struct SseBlock { pub slot: Slot, pub block: Hash256, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -801,6 +832,7 @@ pub struct SseFinalizedCheckpoint { pub block: Hash256, pub state: Hash256, pub epoch: Epoch, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -811,6 +843,7 @@ pub struct SseHead { pub current_duty_dependent_root: Hash256, pub previous_duty_dependent_root: Hash256, pub epoch_transition: bool, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -823,6 +856,7 @@ pub struct SseChainReorg { pub new_head_block: Hash256, pub new_head_state: Hash256, pub epoch: Epoch, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -837,6 +871,7 @@ pub struct SseLateHead { pub observed_delay: Option<Duration>, pub imported_delay: Option<Duration>, pub set_as_head_delay: Option<Duration>, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Clone)] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index d06d52235f..984eeaada5 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1175,6 +1175,12 @@ where &self.proto_array } + /// Returns a mutable reference to `proto_array`. + /// Should only be used in testing. + pub fn proto_array_mut(&mut self) -> &mut ProtoArrayForkChoice { + &mut self.proto_array + } + /// Returns a reference to the underlying `fc_store`. pub fn fc_store(&self) -> &T { &self.fc_store From 20ebf1f3c111197b92e8f065880b03623645a1a4 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Mon, 25 Jul 2022 23:53:26 +0000 Subject: [PATCH 085/184] Realized unrealized experimentation (#3322) ## Issue Addressed Add a flag that optionally enables unrealized vote tracking. Would like to test out on testnets and benchmark differences in methods of vote tracking. This PR includes a DB schema upgrade to enable to new vote tracking style. Co-authored-by: realbigsean <sean@sigmaprime.io> Co-authored-by: Paul Hauner <paul@paulhauner.com> Co-authored-by: sean <seananderson33@gmail.com> Co-authored-by: Mac L <mjladson@pm.me> --- Cargo.lock | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 16 +- .../src/beacon_fork_choice_store.rs | 36 +- .../beacon_chain/src/block_verification.rs | 4 + beacon_node/beacon_chain/src/builder.rs | 1 + beacon_node/beacon_chain/src/chain_config.rs | 2 + beacon_node/beacon_chain/src/fork_revert.rs | 16 +- beacon_node/beacon_chain/src/lib.rs | 2 +- .../beacon_chain/src/persisted_fork_choice.rs | 10 +- beacon_node/beacon_chain/src/schema_change.rs | 31 +- .../src/schema_change/migration_schema_v10.rs | 97 ++++ .../src/schema_change/migration_schema_v7.rs | 14 +- .../beacon_chain/src/schema_change/types.rs | 147 +++++- beacon_node/beacon_chain/src/test_utils.rs | 15 +- .../beacon_chain/tests/block_verification.rs | 70 ++- .../tests/payload_invalidation.rs | 13 +- beacon_node/beacon_chain/tests/store_tests.rs | 3 +- beacon_node/beacon_chain/tests/tests.rs | 10 +- beacon_node/http_api/src/lib.rs | 9 +- .../beacon_processor/worker/gossip_methods.rs | 9 +- .../beacon_processor/worker/sync_methods.rs | 25 +- beacon_node/network/src/sync/manager.rs | 2 +- .../network/src/sync/range_sync/chain.rs | 13 +- .../src/sync/range_sync/chain_collection.rs | 7 +- beacon_node/src/cli.rs | 8 + beacon_node/src/config.rs | 4 + beacon_node/store/src/metadata.rs | 2 +- consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 423 +++++++++++++----- .../fork_choice/src/fork_choice_store.rs | 12 + consensus/fork_choice/src/lib.rs | 6 +- consensus/fork_choice/tests/tests.rs | 7 +- .../src/fork_choice_test_definition.rs | 21 +- consensus/proto_array/src/proto_array.rs | 168 ++++--- .../src/proto_array_fork_choice.rs | 83 ++-- .../src/per_epoch_processing.rs | 2 + .../src/per_epoch_processing/altair.rs | 4 +- .../altair/justification_and_finalization.rs | 14 +- .../src/per_epoch_processing/base.rs | 4 +- .../base/justification_and_finalization.rs | 14 +- .../justification_and_finalization_state.rs | 115 +++++ .../weigh_justification_and_finalization.rs | 14 +- consensus/types/src/test_utils/test_random.rs | 1 + lcli/Cargo.toml | 1 + lcli/src/parse_ssz.rs | 19 +- .../ef_tests/src/cases/epoch_processing.rs | 24 +- testing/ef_tests/src/cases/fork_choice.rs | 91 +++- 47 files changed, 1254 insertions(+), 338 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs create mode 100644 consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs diff --git a/Cargo.lock b/Cargo.lock index ab75fe2aed..adffa23f57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2125,6 +2125,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "proto_array", + "state_processing", "store", "tokio", "types", @@ -3008,6 +3009,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "snap", "state_processing", "tree_hash", "types", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c18f4a7374..b9f9727e4c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -93,6 +93,7 @@ use types::beacon_state::CloneConfig; use types::*; pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; +pub use fork_choice::CountUnrealized; pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>; @@ -1740,6 +1741,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { self.slot()?, verified.indexed_attestation(), AttestationFromBlock::False, + &self.spec, ) .map_err(Into::into) } @@ -2220,6 +2222,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub async fn process_chain_segment( self: &Arc<Self>, chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>, + count_unrealized: CountUnrealized, ) -> ChainSegmentResult<T::EthSpec> { let mut imported_blocks = 0; @@ -2284,7 +2287,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { - match self.process_block(signature_verified_block).await { + match self + .process_block(signature_verified_block, count_unrealized) + .await + { Ok(_) => imported_blocks += 1, Err(error) => { return ChainSegmentResult::Failed { @@ -2368,6 +2374,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub async fn process_block<B: IntoExecutionPendingBlock<T>>( self: &Arc<Self>, unverified_block: B, + count_unrealized: CountUnrealized, ) -> Result<Hash256, BlockError<T::EthSpec>> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2383,7 +2390,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let import_block = async move { let execution_pending = unverified_block.into_execution_pending_block(&chain)?; chain - .import_execution_pending_block(execution_pending) + .import_execution_pending_block(execution_pending, count_unrealized) .await }; @@ -2441,6 +2448,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { async fn import_execution_pending_block( self: Arc<Self>, execution_pending_block: ExecutionPendingBlock<T>, + count_unrealized: CountUnrealized, ) -> Result<Hash256, BlockError<T::EthSpec>> { let ExecutionPendingBlock { block, @@ -2499,6 +2507,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { state, confirmed_state_roots, payload_verification_status, + count_unrealized, ) }, "payload_verification_handle", @@ -2520,6 +2529,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { mut state: BeaconState<T::EthSpec>, confirmed_state_roots: Vec<Hash256>, payload_verification_status: PayloadVerificationStatus, + count_unrealized: CountUnrealized, ) -> Result<Hash256, BlockError<T::EthSpec>> { let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); @@ -2665,6 +2675,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { &state, payload_verification_status, &self.spec, + count_unrealized.and(self.config.count_unrealized.into()), ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -2690,6 +2701,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { current_slot, &indexed_attestation, AttestationFromBlock::True, + &self.spec, ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index c7663c77c4..0d65b8aa62 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -155,6 +155,8 @@ pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore< justified_checkpoint: Checkpoint, justified_balances: Vec<u64>, best_justified_checkpoint: Checkpoint, + unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, _phantom: PhantomData<E>, } @@ -201,6 +203,8 @@ where justified_balances: anchor_state.balances().clone().into(), finalized_checkpoint, best_justified_checkpoint: justified_checkpoint, + unrealized_justified_checkpoint: justified_checkpoint, + unrealized_finalized_checkpoint: finalized_checkpoint, proposer_boost_root: Hash256::zero(), _phantom: PhantomData, } @@ -216,6 +220,8 @@ where justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.clone(), best_justified_checkpoint: self.best_justified_checkpoint, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, } } @@ -233,6 +239,8 @@ where justified_checkpoint: persisted.justified_checkpoint, justified_balances: persisted.justified_balances, best_justified_checkpoint: persisted.best_justified_checkpoint, + unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, proposer_boost_root: persisted.proposer_boost_root, _phantom: PhantomData, }) @@ -280,6 +288,14 @@ where &self.finalized_checkpoint } + fn unrealized_justified_checkpoint(&self) -> &Checkpoint { + &self.unrealized_justified_checkpoint + } + + fn unrealized_finalized_checkpoint(&self) -> &Checkpoint { + &self.unrealized_finalized_checkpoint + } + fn proposer_boost_root(&self) -> Hash256 { self.proposer_boost_root } @@ -323,6 +339,14 @@ where self.best_justified_checkpoint = checkpoint } + fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint) { + self.unrealized_justified_checkpoint = checkpoint; + } + + fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint) { + self.unrealized_finalized_checkpoint = checkpoint; + } + fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) { self.proposer_boost_root = proposer_boost_root; } @@ -330,22 +354,26 @@ where /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. #[superstruct( - variants(V1, V7, V8), + variants(V1, V7, V8, V10), variant_attributes(derive(Encode, Decode)), no_enum )] pub struct PersistedForkChoiceStore { #[superstruct(only(V1, V7))] pub balances_cache: BalancesCacheV1, - #[superstruct(only(V8))] + #[superstruct(only(V8, V10))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec<u64>, pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V7, V8))] + #[superstruct(only(V10))] + pub unrealized_justified_checkpoint: Checkpoint, + #[superstruct(only(V10))] + pub unrealized_finalized_checkpoint: Checkpoint, + #[superstruct(only(V7, V8, V10))] pub proposer_boost_root: Hash256, } -pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV8; +pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV10; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c8341cd60b..0031bd2c6c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1416,6 +1416,10 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>( block_root: Hash256, chain: &BeaconChain<T>, ) -> Result<(), BlockError<T::EthSpec>> { + // The finalized checkpoint is being read from fork choice, rather than the cached head. + // + // Fork choice has the most up-to-date view of finalization and there's no point importing a + // block which conflicts with the fork-choice view of finalization. let finalized_slot = chain .canonical_head .cached_head() diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index cef33ee4f7..252b7cef5a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -647,6 +647,7 @@ where store.clone(), Some(current_slot), &self.spec, + self.chain_config.count_unrealized.into(), )?; } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 36c2f41d9d..d5e3d19814 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -24,6 +24,7 @@ pub struct ChainConfig { /// /// If set to 0 then block proposal will not wait for fork choice at all. pub fork_choice_before_proposal_timeout_ms: u64, + pub count_unrealized: bool, } impl Default for ChainConfig { @@ -35,6 +36,7 @@ impl Default for ChainConfig { enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, + count_unrealized: false, } } } diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index fc89429d3f..1d2787d985 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,5 +1,5 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::{ForkChoice, PayloadVerificationStatus}; +use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; use itertools::process_results; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; @@ -99,6 +99,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It store: Arc<HotColdDB<E, Hot, Cold>>, current_slot: Option<Slot>, spec: &ChainSpec, + count_unrealized_config: CountUnrealized, ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -163,7 +164,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; let mut state = finalized_snapshot.beacon_state; - for block in blocks { + let blocks_len = blocks.len(); + for (i, block) in blocks.into_iter().enumerate() { complete_state_advance(&mut state, None, block.slot(), spec) .map_err(|e| format!("State advance failed: {:?}", e))?; @@ -183,6 +185,15 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It // This scenario is so rare that it seems OK to double-verify some blocks. let payload_verification_status = PayloadVerificationStatus::Optimistic; + // Because we are replaying a single chain of blocks, we only need to calculate unrealized + // justification for the last block in the chain. + let is_last_block = i + 1 == blocks_len; + let count_unrealized = if is_last_block { + count_unrealized_config + } else { + CountUnrealized::False + }; + fork_choice .on_block( block.slot(), @@ -193,6 +204,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It &state, payload_verification_status, spec, + count_unrealized, ) .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 728057c90f..9cb734f2a0 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -44,7 +44,7 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index eb4c761913..eb5078df2c 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,5 +1,6 @@ use crate::beacon_fork_choice_store::{ - PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, + PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV7, + PersistedForkChoiceStoreV8, }; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -7,10 +8,10 @@ use store::{DBColumn, Error, StoreItem}; use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV8; +pub type PersistedForkChoice = PersistedForkChoiceV10; #[superstruct( - variants(V1, V7, V8), + variants(V1, V7, V8, V10), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -22,6 +23,8 @@ pub struct PersistedForkChoice { pub fork_choice_store: PersistedForkChoiceStoreV7, #[superstruct(only(V8))] pub fork_choice_store: PersistedForkChoiceStoreV8, + #[superstruct(only(V10))] + pub fork_choice_store: PersistedForkChoiceStoreV10, } macro_rules! impl_store_item { @@ -45,3 +48,4 @@ macro_rules! impl_store_item { impl_store_item!(PersistedForkChoiceV1); impl_store_item!(PersistedForkChoiceV7); impl_store_item!(PersistedForkChoiceV8); +impl_store_item!(PersistedForkChoiceV10); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index a48f1d3756..411ef947d9 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,4 +1,5 @@ //! Utilities for managing database schema changes. +mod migration_schema_v10; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; @@ -6,7 +7,9 @@ mod migration_schema_v9; mod types; use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; -use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; +use crate::persisted_fork_choice::{ + PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV7, PersistedForkChoiceV8, +}; use crate::types::ChainSpec; use slog::{warn, Logger}; use std::path::Path; @@ -130,6 +133,32 @@ pub fn migrate_schema<T: BeaconChainTypes>( migration_schema_v9::downgrade_from_v9::<T>(db.clone(), log)?; db.store_schema_version(to) } + (SchemaVersion(9), SchemaVersion(10)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::<PersistedForkChoiceV8>(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?; + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + (SchemaVersion(10), SchemaVersion(9)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::<PersistedForkChoiceV10>(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?; + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs new file mode 100644 index 0000000000..70e0007851 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs @@ -0,0 +1,97 @@ +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8}; +use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8}; +use crate::schema_change::{ + types::{SszContainerV10, SszContainerV7}, + StoreError, +}; +use proto_array::core::SszContainer; +use ssz::{Decode, Encode}; + +pub fn update_fork_choice( + mut fork_choice: PersistedForkChoiceV8, +) -> Result<PersistedForkChoiceV10, StoreError> { + let ssz_container_v7 = SszContainerV7::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + // These transformations instantiate `node.unrealized_justified_checkpoint` and + // `node.unrealized_finalized_checkpoint` to `None`. + let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); + let ssz_container: SszContainer = ssz_container_v10.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +pub fn downgrade_fork_choice( + mut fork_choice: PersistedForkChoiceV10, +) -> Result<PersistedForkChoiceV8, StoreError> { + let ssz_container_v10 = SszContainerV10::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + let ssz_container_v7: SszContainerV7 = ssz_container_v10.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +impl From<PersistedForkChoiceStoreV8> for PersistedForkChoiceStoreV10 { + fn from(other: PersistedForkChoiceStoreV8) -> Self { + Self { + balances_cache: other.balances_cache, + time: other.time, + finalized_checkpoint: other.finalized_checkpoint, + justified_checkpoint: other.justified_checkpoint, + justified_balances: other.justified_balances, + best_justified_checkpoint: other.best_justified_checkpoint, + unrealized_justified_checkpoint: other.best_justified_checkpoint, + unrealized_finalized_checkpoint: other.finalized_checkpoint, + proposer_boost_root: other.proposer_boost_root, + } + } +} + +impl From<PersistedForkChoiceV8> for PersistedForkChoiceV10 { + fn from(other: PersistedForkChoiceV8) -> Self { + Self { + fork_choice: other.fork_choice, + fork_choice_store: other.fork_choice_store.into(), + } + } +} + +impl From<PersistedForkChoiceStoreV10> for PersistedForkChoiceStoreV8 { + fn from(other: PersistedForkChoiceStoreV10) -> Self { + Self { + balances_cache: other.balances_cache, + time: other.time, + finalized_checkpoint: other.finalized_checkpoint, + justified_checkpoint: other.justified_checkpoint, + justified_balances: other.justified_balances, + best_justified_checkpoint: other.best_justified_checkpoint, + proposer_boost_root: other.proposer_boost_root, + } + } +} + +impl From<PersistedForkChoiceV10> for PersistedForkChoiceV8 { + fn from(other: PersistedForkChoiceV10) -> Self { + Self { + fork_choice: other.fork_choice, + fork_choice_store: other.fork_choice_store.into(), + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index 9222266ba9..81147b8af6 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -2,7 +2,7 @@ use crate::beacon_chain::BeaconChainTypes; use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; -use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7}; +use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7}; use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::ForkChoice; @@ -86,7 +86,8 @@ pub(crate) fn update_fork_choice<T: BeaconChainTypes>( // to `None`. let ssz_container_v7: SszContainerV7 = ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); - let ssz_container: SszContainer = ssz_container_v7.into(); + let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); + let ssz_container: SszContainer = ssz_container_v10.into(); let mut fork_choice: ProtoArrayForkChoice = ssz_container.into(); update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) @@ -97,6 +98,13 @@ pub(crate) fn update_fork_choice<T: BeaconChainTypes>( update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) .map_err(StoreError::SchemaMigrationError)?; + // Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence. + let ssz_container = SszContainer::from(&fork_choice); + let ssz_container_v7 = SszContainerV7::from(ssz_container); + + persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); + persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; + Ok(()) } @@ -301,8 +309,6 @@ fn update_store_justified_checkpoint( .ok_or("Proto node with current finalized checkpoint not found")?; fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; - persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes(); - persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; Ok(()) } diff --git a/beacon_node/beacon_chain/src/schema_change/types.rs b/beacon_node/beacon_chain/src/schema_change/types.rs index 8d41a384f6..02a54c1a3f 100644 --- a/beacon_node/beacon_chain/src/schema_change/types.rs +++ b/beacon_node/beacon_chain/src/schema_change/types.rs @@ -12,7 +12,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); #[superstruct( - variants(V1, V6, V7), + variants(V1, V6, V7, V10), variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), no_enum )] @@ -30,18 +30,24 @@ pub struct ProtoNode { #[superstruct(only(V1, V6))] pub finalized_epoch: Epoch, #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub justified_checkpoint: Option<Checkpoint>, #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub finalized_checkpoint: Option<Checkpoint>, pub weight: u64, #[ssz(with = "four_byte_option_usize")] pub best_child: Option<usize>, #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option<usize>, - #[superstruct(only(V6, V7))] + #[superstruct(only(V6, V7, V10))] pub execution_status: ExecutionStatus, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V10))] + pub unrealized_justified_checkpoint: Option<Checkpoint>, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V10))] + pub unrealized_finalized_checkpoint: Option<Checkpoint>, } impl Into<ProtoNodeV6> for ProtoNodeV1 { @@ -88,9 +94,31 @@ impl Into<ProtoNodeV7> for ProtoNodeV6 { } } -impl Into<ProtoNode> for ProtoNodeV7 { - fn into(self) -> ProtoNode { - ProtoNode { +impl Into<ProtoNodeV10> for ProtoNodeV7 { + fn into(self) -> ProtoNodeV10 { + ProtoNodeV10 { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + } + } +} + +impl Into<ProtoNodeV7> for ProtoNodeV10 { + fn into(self) -> ProtoNodeV7 { + ProtoNodeV7 { slot: self.slot, state_root: self.state_root, target_root: self.target_root, @@ -108,8 +136,50 @@ impl Into<ProtoNode> for ProtoNodeV7 { } } +impl Into<ProtoNode> for ProtoNodeV10 { + fn into(self) -> ProtoNode { + ProtoNode { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + } + } +} + +impl From<ProtoNode> for ProtoNodeV7 { + fn from(container: ProtoNode) -> Self { + Self { + slot: container.slot, + state_root: container.state_root, + target_root: container.target_root, + current_epoch_shuffling_id: container.current_epoch_shuffling_id, + next_epoch_shuffling_id: container.next_epoch_shuffling_id, + root: container.root, + parent: container.parent, + justified_checkpoint: container.justified_checkpoint, + finalized_checkpoint: container.finalized_checkpoint, + weight: container.weight, + best_child: container.best_child, + best_descendant: container.best_descendant, + execution_status: container.execution_status, + } + } +} + #[superstruct( - variants(V1, V6, V7), + variants(V1, V6, V7, V10), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -122,9 +192,9 @@ pub struct SszContainer { pub justified_epoch: Epoch, #[superstruct(only(V1, V6))] pub finalized_epoch: Epoch, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub justified_checkpoint: Checkpoint, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub finalized_checkpoint: Checkpoint, #[superstruct(only(V1))] pub nodes: Vec<ProtoNodeV1>, @@ -132,8 +202,10 @@ pub struct SszContainer { pub nodes: Vec<ProtoNodeV6>, #[superstruct(only(V7))] pub nodes: Vec<ProtoNodeV7>, + #[superstruct(only(V10))] + pub nodes: Vec<ProtoNodeV10>, pub indices: Vec<(Hash256, usize)>, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub previous_proposer_boost: ProposerBoost, } @@ -174,7 +246,41 @@ impl SszContainerV6 { } } -impl Into<SszContainer> for SszContainerV7 { +impl Into<SszContainerV10> for SszContainerV7 { + fn into(self) -> SszContainerV10 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV10 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + +impl Into<SszContainerV7> for SszContainerV10 { + fn into(self) -> SszContainerV7 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV7 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + +impl Into<SszContainer> for SszContainerV10 { fn into(self) -> SszContainer { let nodes = self.nodes.into_iter().map(Into::into).collect(); @@ -190,3 +296,20 @@ impl Into<SszContainer> for SszContainerV7 { } } } + +impl From<SszContainer> for SszContainerV7 { + fn from(container: SszContainer) -> Self { + let nodes = container.nodes.into_iter().map(Into::into).collect(); + + Self { + votes: container.votes, + balances: container.balances, + prune_threshold: container.prune_threshold, + justified_checkpoint: container.justified_checkpoint, + finalized_checkpoint: container.finalized_checkpoint, + nodes, + indices: container.indices, + previous_proposer_boost: container.previous_proposer_boost, + } + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e9dc8619ac..1297e7d78b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -17,6 +17,7 @@ use execution_layer::{ test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, ExecutionLayer, }; +use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; @@ -1370,8 +1371,11 @@ where block: SignedBeaconBlock<E>, ) -> Result<SignedBeaconBlockHash, BlockError<E>> { self.set_current_slot(slot); - let block_hash: SignedBeaconBlockHash = - self.chain.process_block(Arc::new(block)).await?.into(); + let block_hash: SignedBeaconBlockHash = self + .chain + .process_block(Arc::new(block), CountUnrealized::True) + .await? + .into(); self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } @@ -1380,8 +1384,11 @@ where &self, block: SignedBeaconBlock<E>, ) -> Result<SignedBeaconBlockHash, BlockError<E>> { - let block_hash: SignedBeaconBlockHash = - self.chain.process_block(Arc::new(block)).await?.into(); + let block_hash: SignedBeaconBlockHash = self + .chain + .process_block(Arc::new(block), CountUnrealized::True) + .await? + .into(); self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 4b3e1e72fe..43dda7ab05 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -4,6 +4,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; @@ -147,14 +148,14 @@ async fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![]) + .process_chain_segment(vec![], CountUnrealized::True) .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment(blocks.clone()) + .process_chain_segment(blocks.clone(), CountUnrealized::True) .await .into_block_error() .expect("should import chain segment"); @@ -187,7 +188,7 @@ async fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment(chunk.to_vec()) + .process_chain_segment(chunk.to_vec(), CountUnrealized::True) .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); @@ -227,7 +228,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -247,7 +248,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -278,7 +279,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -299,7 +300,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -325,7 +326,7 @@ async fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -342,12 +343,18 @@ async fn assert_invalid_signature( .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. - let _ = harness.chain.process_chain_segment(ancestor_blocks).await; + let _ = harness + .chain + .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .await; assert!( matches!( harness .chain - .process_block(snapshots[block_index].beacon_block.clone()) + .process_block( + snapshots[block_index].beacon_block.clone(), + CountUnrealized::True + ) .await, Err(BlockError::InvalidSignature) ), @@ -397,7 +404,7 @@ async fn invalid_signature_gossip_block() { .collect(); harness .chain - .process_chain_segment(ancestor_blocks) + .process_chain_segment(ancestor_blocks, CountUnrealized::True) .await .into_block_error() .expect("should import all blocks prior to the one being tested"); @@ -405,10 +412,10 @@ async fn invalid_signature_gossip_block() { matches!( harness .chain - .process_block(Arc::new(SignedBeaconBlock::from_block( - block, - junk_signature() - ))) + .process_block( + Arc::new(SignedBeaconBlock::from_block(block, junk_signature())), + CountUnrealized::True + ) .await, Err(BlockError::InvalidSignature) ), @@ -441,7 +448,7 @@ async fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -639,7 +646,7 @@ async fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -716,11 +723,18 @@ async fn block_gossip_verification() { harness .chain - .process_block(gossip_verified) + .process_block(gossip_verified, CountUnrealized::True) .await .expect("should import valid gossip verified block"); } + // Recompute the head to ensure we cache the latest view of fork choice. + harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); + /* * This test ensures that: * @@ -978,7 +992,11 @@ async fn verify_block_for_gossip_slashing_detection() { .verify_block_for_gossip(Arc::new(block1)) .await .unwrap(); - harness.chain.process_block(verified_block).await.unwrap(); + harness + .chain + .process_block(verified_block, CountUnrealized::True) + .await + .unwrap(); unwrap_err( harness .chain @@ -1009,7 +1027,11 @@ async fn verify_block_for_gossip_doppelganger_detection() { .await .unwrap(); let attestations = verified_block.block.message().body().attestations().clone(); - harness.chain.process_block(verified_block).await.unwrap(); + harness + .chain + .process_block(verified_block, CountUnrealized::True) + .await + .unwrap(); for att in attestations.iter() { let epoch = att.data.target.epoch; @@ -1148,7 +1170,7 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_block(Arc::new(base_block.clone())) + .process_block(Arc::new(base_block.clone()), CountUnrealized::True) .await .err() .expect("should error when processing base block"), @@ -1162,7 +1184,7 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)]) + .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1276,7 +1298,7 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_block(Arc::new(altair_block.clone())) + .process_block(Arc::new(altair_block.clone()), CountUnrealized::True) .await .err() .expect("should error when processing altair block"), @@ -1290,7 +1312,7 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)]) + .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True) .await, ChainSegmentResult::Failed { imported_blocks: 0, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index a4e62cf969..f2ebb430d4 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -9,7 +9,9 @@ use execution_layer::{ json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, ExecutionLayer, ForkChoiceState, PayloadAttributes, }; -use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; +use fork_choice::{ + CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, +}; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; use std::sync::Arc; @@ -648,7 +650,7 @@ async fn invalidates_all_descendants() { let fork_block_root = rig .harness .chain - .process_block(Arc::new(fork_block)) + .process_block(Arc::new(fork_block), CountUnrealized::True) .await .unwrap(); rig.recompute_head().await; @@ -740,7 +742,7 @@ async fn switches_heads() { let fork_block_root = rig .harness .chain - .process_block(Arc::new(fork_block)) + .process_block(Arc::new(fork_block), CountUnrealized::True) .await .unwrap(); rig.recompute_head().await; @@ -984,7 +986,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.clone()).await, + rig.harness.chain.process_block(block.clone(), CountUnrealized::True).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -998,7 +1000,8 @@ async fn invalid_parent() { Duration::from_secs(0), &state, PayloadVerificationStatus::Optimistic, - &rig.harness.chain.spec + &rig.harness.chain.spec, + CountUnrealized::True, ), Err(ForkChoiceError::ProtoArrayError(message)) if message.contains(&format!( diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 560e865a8f..b5b8152e8d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -10,6 +10,7 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, WhenSlotSkipped, }; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; @@ -2124,7 +2125,7 @@ async fn weak_subjectivity_sync() { beacon_chain.slot_clock.set_slot(block.slot().as_u64()); beacon_chain - .process_block(Arc::new(full_block)) + .process_block(Arc::new(full_block), CountUnrealized::True) .await .unwrap(); beacon_chain.recompute_head_at_current_slot().await.unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index f98580db3f..80a122976f 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,6 +8,7 @@ use beacon_chain::{ }, BeaconChain, StateSkipConfig, WhenSlotSkipped, }; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ @@ -499,7 +500,7 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap()) + .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) @@ -613,7 +614,7 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap()) + .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) .unwrap(); let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect(); @@ -683,7 +684,10 @@ async fn run_skip_slot_test(skip_slots: u64) { assert_eq!( harness_b .chain - .process_block(harness_a.chain.head_snapshot().beacon_block.clone()) + .process_block( + harness_a.chain.head_snapshot().beacon_block.clone(), + CountUnrealized::True + ) .await .unwrap(), harness_a.chain.head_snapshot().beacon_block_root diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a27e5015cf..31ae7486e6 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -23,7 +23,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, validator_monitor::{get_block_delay_ms, timestamp_now}, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - ProduceBlockVerification, WhenSlotSkipped, + CountUnrealized, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; @@ -1035,7 +1035,10 @@ pub fn serve<T: BeaconChainTypes>( let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); - match chain.process_block(block.clone()).await { + match chain + .process_block(block.clone(), CountUnrealized::True) + .await + { Ok(root) => { info!( log, @@ -1179,7 +1182,7 @@ pub fn serve<T: BeaconChainTypes>( PubsubMessage::BeaconBlock(new_block.clone()), )?; - match chain.process_block(new_block).await { + match chain.process_block(new_block, CountUnrealized::True).await { Ok(_) => { // Update the head since it's likely this block will become the new // head. diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index b88b58b8bf..1b1dc12d87 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -6,7 +6,8 @@ use beacon_chain::{ observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, + BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, + GossipVerifiedBlock, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; @@ -899,7 +900,11 @@ impl<T: BeaconChainTypes> Worker<T> { ) { let block: Arc<_> = verified_block.block.clone(); - match self.chain.process_block(verified_block).await { + match self + .chain + .process_block(verified_block, CountUnrealized::True) + .await + { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 84e3c95c69..ffcadb8689 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -7,10 +7,10 @@ use crate::beacon_processor::DuplicateCache; use crate::metrics; use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; -use beacon_chain::ExecutionPayloadError; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; +use beacon_chain::{CountUnrealized, ExecutionPayloadError}; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; use std::sync::Arc; @@ -21,7 +21,7 @@ use types::{Epoch, Hash256, SignedBeaconBlock}; #[derive(Clone, Debug, PartialEq)] pub enum ChainSegmentProcessId { /// Processing Id of a range syncing batch. - RangeBatchId(ChainId, Epoch), + RangeBatchId(ChainId, Epoch, CountUnrealized), /// Processing ID for a backfill syncing batch. BackSyncBatchId(Epoch), /// Processing Id of the parent lookup of a block. @@ -89,7 +89,7 @@ impl<T: BeaconChainTypes> Worker<T> { } }; let slot = block.slot(); - let result = self.chain.process_block(block).await; + let result = self.chain.process_block(block, CountUnrealized::True).await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -133,12 +133,15 @@ impl<T: BeaconChainTypes> Worker<T> { ) { let result = match sync_type { // this a request from the range sync - ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => { let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_blocks(downloaded_blocks.iter()).await { + match self + .process_blocks(downloaded_blocks.iter(), count_unrealized) + .await + { (_, Ok(_)) => { debug!(self.log, "Batch processed"; "batch_epoch" => epoch, @@ -207,7 +210,10 @@ impl<T: BeaconChainTypes> Worker<T> { ); // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse - match self.process_blocks(downloaded_blocks.iter().rev()).await { + match self + .process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True) + .await + { (imported_blocks, Err(e)) => { debug!(self.log, "Parent lookup failed"; "error" => %e.message); BatchProcessResult::Failed { @@ -231,9 +237,14 @@ impl<T: BeaconChainTypes> Worker<T> { async fn process_blocks<'a>( &self, downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>, + count_unrealized: CountUnrealized, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect(); - match self.chain.process_chain_segment(blocks).await { + match self + .chain + .process_chain_segment(blocks, count_unrealized) + .await + { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if imported_blocks > 0 { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index d0919406b2..fe27a33c5c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -532,7 +532,7 @@ impl<T: BeaconChainTypes> SyncManager<T> { .parent_block_processed(chain_hash, result, &mut self.network), }, SyncMessage::BatchProcessed { sync_type, result } => match sync_type { - ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { self.range_sync.handle_block_process_result( &mut self.network, chain_id, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index ef5ba23e66..caa08165a9 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -2,7 +2,7 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::beacon_processor::{ChainSegmentProcessId, FailureMode}; use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChainTypes, CountUnrealized}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use rand::seq::SliceRandom; @@ -100,6 +100,8 @@ pub struct SyncingChain<T: BeaconChainTypes> { /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. beacon_processor_send: Sender<BeaconWorkEvent<T>>, + is_finalized_segment: bool, + /// The chain's log. log: slog::Logger, } @@ -126,6 +128,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { target_head_root: Hash256, peer_id: PeerId, beacon_processor_send: Sender<BeaconWorkEvent<T>>, + is_finalized_segment: bool, log: &slog::Logger, ) -> Self { let mut peers = FnvHashMap::default(); @@ -148,6 +151,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { current_processing_batch: None, validated_batches: 0, beacon_processor_send, + is_finalized_segment, log: log.new(o!("chain" => id)), } } @@ -302,7 +306,12 @@ impl<T: BeaconChainTypes> SyncingChain<T> { // for removing chains and checking completion is in the callback. let blocks = batch.start_processing()?; - let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); + let count_unrealized = if self.is_finalized_segment { + CountUnrealized::False + } else { + CountUnrealized::True + }; + let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized); self.current_processing_batch = Some(batch_id); if let Err(e) = self diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 7ddfc3f70a..e76adff3af 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -472,10 +472,10 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { network: &mut SyncNetworkContext<T::EthSpec>, ) { let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot); - let collection = if let RangeSyncType::Finalized = sync_type { - &mut self.finalized_chains + let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type { + (&mut self.finalized_chains, true) } else { - &mut self.head_chains + (&mut self.head_chains, false) }; match collection.entry(id) { Entry::Occupied(mut entry) => { @@ -501,6 +501,7 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { target_head_root, peer, beacon_processor_send.clone(), + is_finalized, &self.log, ); debug_assert_eq!(new_chain.get_id(), id); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 964873a949..b36f154ae8 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -708,4 +708,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("250") .takes_value(true) ) + .arg( + Arg::with_name("count-unrealized") + .long("count-unrealized") + .hidden(true) + .help("**EXPERIMENTAL** Enables an alternative, potentially more performant FFG \ + vote tracking method.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c91bd711e5..fb0cbe0c92 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -630,6 +630,10 @@ pub fn get_config<E: EthSpec>( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } + if cli_args.is_present("count-unrealized") { + client_config.chain.count_unrealized = true; + } + Ok(client_config) } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 5551f1f44d..235550ddd7 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(9); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(10); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 429ab1b8c5..b2570092e6 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" [dependencies] types = { path = "../types" } +state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 984eeaada5..c3a88433f2 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,6 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing; use std::cmp::Ordering; use std::marker::PhantomData; use std::time::Duration; @@ -51,6 +52,9 @@ pub enum Error<T> { MissingFinalizedBlock { finalized_checkpoint: Checkpoint, }, + UnrealizedVoteProcessing(state_processing::EpochProcessingError), + ParticipationCacheBuild(BeaconStateError), + ValidatorStatuses(BeaconStateError), } impl<T> From<InvalidAttestation> for Error<T> { @@ -59,6 +63,12 @@ impl<T> From<InvalidAttestation> for Error<T> { } } +impl<T> From<state_processing::EpochProcessingError> for Error<T> { + fn from(e: state_processing::EpochProcessingError) -> Self { + Error::UnrealizedVoteProcessing(e) + } +} + #[derive(Debug)] pub enum InvalidBlock { UnknownParent(Hash256), @@ -114,6 +124,66 @@ impl<T> From<String> for Error<T> { } } +/// Indicates whether the unrealized justification of a block should be calculated and tracked. +/// If a block has been finalized, this can be set to false. This is useful when syncing finalized +/// portions of the chain. Otherwise this should always be set to true. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum CountUnrealized { + True, + False, +} + +impl CountUnrealized { + pub fn is_true(&self) -> bool { + matches!(self, CountUnrealized::True) + } + + pub fn and(&self, other: CountUnrealized) -> CountUnrealized { + if self.is_true() && other.is_true() { + CountUnrealized::True + } else { + CountUnrealized::False + } + } +} + +impl From<bool> for CountUnrealized { + fn from(count_unrealized: bool) -> Self { + if count_unrealized { + CountUnrealized::True + } else { + CountUnrealized::False + } + } +} + +#[derive(Copy, Clone)] +enum UpdateJustifiedCheckpointSlots { + OnTick { + current_slot: Slot, + }, + OnBlock { + state_slot: Slot, + current_slot: Slot, + }, +} + +impl UpdateJustifiedCheckpointSlots { + fn current_slot(&self) -> Slot { + match self { + UpdateJustifiedCheckpointSlots::OnTick { current_slot } => *current_slot, + UpdateJustifiedCheckpointSlots::OnBlock { current_slot, .. } => *current_slot, + } + } + + fn state_slot(&self) -> Option<Slot> { + match self { + UpdateJustifiedCheckpointSlots::OnTick { .. } => None, + UpdateJustifiedCheckpointSlots::OnBlock { state_slot, .. } => Some(*state_slot), + } + } +} + /// Indicates if a block has been verified by an execution payload. /// /// There is no variant for "invalid", since such a block should never be added to fork choice. @@ -162,51 +232,6 @@ fn compute_start_slot_at_epoch<E: EthSpec>(epoch: Epoch) -> Slot { epoch.start_slot(E::slots_per_epoch()) } -/// Called whenever the current time increases. -/// -/// ## Specification -/// -/// Equivalent to: -/// -/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick -fn on_tick<T, E>(store: &mut T, time: Slot) -> Result<(), Error<T::Error>> -where - T: ForkChoiceStore<E>, - E: EthSpec, -{ - let previous_slot = store.get_current_slot(); - - if time > previous_slot + 1 { - return Err(Error::InconsistentOnTick { - previous_slot, - time, - }); - } - - // Update store time. - store.set_current_slot(time); - - let current_slot = store.get_current_slot(); - - // Reset proposer boost if this is a new slot. - if current_slot > previous_slot { - store.set_proposer_boost_root(Hash256::zero()); - } - - // Not a new epoch, return. - if !(current_slot > previous_slot && compute_slots_since_epoch_start::<E>(current_slot) == 0) { - return Ok(()); - } - - if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { - store - .set_justified_checkpoint(*store.best_justified_checkpoint()) - .map_err(Error::ForkChoiceStoreError)?; - } - - Ok(()) -} - /// Used for queuing attestations from the current slot. Only contains the minimum necessary /// information about the attestation. #[derive(Clone, PartialEq, Encode, Decode)] @@ -356,7 +381,7 @@ where // If the current slot is not provided, use the value that was last provided to the store. let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); - let proto_array = ProtoArrayForkChoice::new( + let proto_array = ProtoArrayForkChoice::new::<E>( finalized_block_slot, finalized_block_state_root, *fc_store.justified_checkpoint(), @@ -473,7 +498,7 @@ where current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, Error<T::Error>> { - self.update_time(current_slot)?; + self.update_time(current_slot, spec)?; let store = &mut self.fc_store; @@ -482,6 +507,7 @@ where *store.finalized_checkpoint(), store.justified_balances(), store.proposer_boost_root(), + current_slot, spec, )?; @@ -539,13 +565,11 @@ where /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint fn should_update_justified_checkpoint( &mut self, - current_slot: Slot, - state: &BeaconState<E>, + new_justified_checkpoint: Checkpoint, + slots: UpdateJustifiedCheckpointSlots, spec: &ChainSpec, ) -> Result<bool, Error<T::Error>> { - self.update_time(current_slot)?; - - let new_justified_checkpoint = &state.current_justified_checkpoint(); + self.update_time(slots.current_slot(), spec)?; if compute_slots_since_epoch_start::<E>(self.fc_store.get_current_slot()) < spec.safe_slots_to_update_justified @@ -557,11 +581,13 @@ where compute_start_slot_at_epoch::<E>(self.fc_store.justified_checkpoint().epoch); // This sanity check is not in the spec, but the invariant is implied. - if justified_slot >= state.slot() { - return Err(Error::AttemptToRevertJustification { - store: justified_slot, - state: state.slot(), - }); + if let Some(state_slot) = slots.state_slot() { + if justified_slot >= state_slot { + return Err(Error::AttemptToRevertJustification { + store: justified_slot, + state: state_slot, + }); + } } // We know that the slot for `new_justified_checkpoint.root` is not greater than @@ -629,15 +655,15 @@ where state: &BeaconState<E>, payload_verification_status: PayloadVerificationStatus, spec: &ChainSpec, + count_unrealized: CountUnrealized, ) -> Result<(), Error<T::Error>> { - let current_slot = self.update_time(current_slot)?; + let current_slot = self.update_time(current_slot, spec)?; // Parent block must be known. - if !self.proto_array.contains_block(&block.parent_root()) { - return Err(Error::InvalidBlock(InvalidBlock::UnknownParent( - block.parent_root(), - ))); - } + let parent_block = self + .proto_array + .get_block(&block.parent_root()) + .ok_or_else(|| Error::InvalidBlock(InvalidBlock::UnknownParent(block.parent_root())))?; // Blocks cannot be in the future. If they are, their consideration must be delayed until // the are in the past. @@ -686,29 +712,110 @@ where self.fc_store.set_proposer_boost_root(block_root); } - // Update justified checkpoint. - if state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch { - if state.current_justified_checkpoint().epoch - > self.fc_store.best_justified_checkpoint().epoch + let update_justified_checkpoint_slots = UpdateJustifiedCheckpointSlots::OnBlock { + state_slot: state.slot(), + current_slot, + }; + + // Update store with checkpoints if necessary + self.update_checkpoints( + state.current_justified_checkpoint(), + state.finalized_checkpoint(), + update_justified_checkpoint_slots, + spec, + )?; + + // Update unrealized justified/finalized checkpoints. + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized + .is_true() + { + let block_epoch = block.slot().epoch(E::slots_per_epoch()); + + // If the parent checkpoints are already at the same epoch as the block being imported, + // it's impossible for the unrealized checkpoints to differ from the parent's. This + // holds true because: + // + // 1. A child block cannot have lower FFG checkpoints than its parent. + // 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`. + // 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`. + // + // This is an optimization. It should reduce the amount of times we run + // `process_justification_and_finalization` by approximately 1/3rd when the chain is + // performing optimally. + let parent_checkpoints = parent_block + .unrealized_justified_checkpoint + .zip(parent_block.unrealized_finalized_checkpoint) + .filter(|(parent_justified, parent_finalized)| { + parent_justified.epoch == block_epoch + && parent_finalized.epoch + 1 >= block_epoch + }); + + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = + if let Some((parent_justified, parent_finalized)) = parent_checkpoints { + (parent_justified, parent_finalized) + } else { + let justification_and_finalization_state = match block { + BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { + let participation_cache = + per_epoch_processing::altair::ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild)?; + per_epoch_processing::altair::process_justification_and_finalization( + state, + &participation_cache, + )? + } + BeaconBlockRef::Base(_) => { + let mut validator_statuses = + per_epoch_processing::base::ValidatorStatuses::new(state, spec) + .map_err(Error::ValidatorStatuses)?; + validator_statuses + .process_attestations(state) + .map_err(Error::ValidatorStatuses)?; + per_epoch_processing::base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )? + } + }; + + ( + justification_and_finalization_state.current_justified_checkpoint(), + justification_and_finalization_state.finalized_checkpoint(), + ) + }; + + // Update best known unrealized justified & finalized checkpoints + if unrealized_justified_checkpoint.epoch + > self.fc_store.unrealized_justified_checkpoint().epoch { self.fc_store - .set_best_justified_checkpoint(state.current_justified_checkpoint()); + .set_unrealized_justified_checkpoint(unrealized_justified_checkpoint); } - if self.should_update_justified_checkpoint(current_slot, state, spec)? { + if unrealized_finalized_checkpoint.epoch + > self.fc_store.unrealized_finalized_checkpoint().epoch + { self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint()) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; + .set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint); } - } - // Update finalized checkpoint. - if state.finalized_checkpoint().epoch > self.fc_store.finalized_checkpoint().epoch { - self.fc_store - .set_finalized_checkpoint(state.finalized_checkpoint()); - self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint()) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; - } + // If block is from past epochs, try to update store's justified & finalized checkpoints right away + if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + update_justified_checkpoint_slots, + spec, + )?; + } + + ( + Some(unrealized_justified_checkpoint), + Some(unrealized_finalized_checkpoint), + ) + } else { + (None, None) + }; let target_slot = block .slot() @@ -757,32 +864,68 @@ where // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. - self.proto_array.process_block(ProtoBlock { - slot: block.slot(), - root: block_root, - parent_root: Some(block.parent_root()), - target_root, - current_epoch_shuffling_id: AttestationShufflingId::new( - block_root, - state, - RelativeEpoch::Current, - ) - .map_err(Error::BeaconStateError)?, - next_epoch_shuffling_id: AttestationShufflingId::new( - block_root, - state, - RelativeEpoch::Next, - ) - .map_err(Error::BeaconStateError)?, - state_root: block.state_root(), - justified_checkpoint: state.current_justified_checkpoint(), - finalized_checkpoint: state.finalized_checkpoint(), - execution_status, - })?; + self.proto_array.process_block::<E>( + ProtoBlock { + slot: block.slot(), + root: block_root, + parent_root: Some(block.parent_root()), + target_root, + current_epoch_shuffling_id: AttestationShufflingId::new( + block_root, + state, + RelativeEpoch::Current, + ) + .map_err(Error::BeaconStateError)?, + next_epoch_shuffling_id: AttestationShufflingId::new( + block_root, + state, + RelativeEpoch::Next, + ) + .map_err(Error::BeaconStateError)?, + state_root: block.state_root(), + justified_checkpoint: state.current_justified_checkpoint(), + finalized_checkpoint: state.finalized_checkpoint(), + execution_status, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + }, + current_slot, + )?; Ok(()) } + /// Update checkpoints in store if necessary + fn update_checkpoints( + &mut self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + slots: UpdateJustifiedCheckpointSlots, + spec: &ChainSpec, + ) -> Result<(), Error<T::Error>> { + // Update justified checkpoint. + if justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch { + if justified_checkpoint.epoch > self.fc_store.best_justified_checkpoint().epoch { + self.fc_store + .set_best_justified_checkpoint(justified_checkpoint); + } + if self.should_update_justified_checkpoint(justified_checkpoint, slots, spec)? { + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; + } + } + + // Update finalized checkpoint. + if finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch { + self.fc_store.set_finalized_checkpoint(finalized_checkpoint); + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; + } + Ok(()) + } + /// Validates the `epoch` against the current time according to the fork choice store. /// /// ## Specification @@ -920,9 +1063,10 @@ where current_slot: Slot, attestation: &IndexedAttestation<E>, is_from_block: AttestationFromBlock, + spec: &ChainSpec, ) -> Result<(), Error<T::Error>> { // Ensure the store is up-to-date. - self.update_time(current_slot)?; + self.update_time(current_slot, spec)?; // Ignore any attestations to the zero hash. // @@ -967,12 +1111,16 @@ where /// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided /// `current_slot`. Returns the value of `self.fc_store.get_current_slot`. - pub fn update_time(&mut self, current_slot: Slot) -> Result<Slot, Error<T::Error>> { + pub fn update_time( + &mut self, + current_slot: Slot, + spec: &ChainSpec, + ) -> Result<Slot, Error<T::Error>> { while self.fc_store.get_current_slot() < current_slot { let previous_slot = self.fc_store.get_current_slot(); // Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't // get stuck in a loop. - on_tick(&mut self.fc_store, previous_slot + 1)? + self.on_tick(previous_slot + 1, spec)? } // Process any attestations that might now be eligible. @@ -981,6 +1129,63 @@ where Ok(self.fc_store.get_current_slot()) } + /// Called whenever the current time increases. + /// + /// ## Specification + /// + /// Equivalent to: + /// + /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick + fn on_tick(&mut self, time: Slot, spec: &ChainSpec) -> Result<(), Error<T::Error>> { + let store = &mut self.fc_store; + let previous_slot = store.get_current_slot(); + + if time > previous_slot + 1 { + return Err(Error::InconsistentOnTick { + previous_slot, + time, + }); + } + + // Update store time. + store.set_current_slot(time); + + let current_slot = store.get_current_slot(); + + // Reset proposer boost if this is a new slot. + if current_slot > previous_slot { + store.set_proposer_boost_root(Hash256::zero()); + } + + // Not a new epoch, return. + if !(current_slot > previous_slot + && compute_slots_since_epoch_start::<E>(current_slot) == 0) + { + return Ok(()); + } + + if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { + let store = &self.fc_store; + if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) { + let store = &mut self.fc_store; + store + .set_justified_checkpoint(*store.best_justified_checkpoint()) + .map_err(Error::ForkChoiceStoreError)?; + } + } + + // Update store.justified_checkpoint if a better unrealized justified checkpoint is known + let unrealized_justified_checkpoint = *self.fc_store.unrealized_justified_checkpoint(); + let unrealized_finalized_checkpoint = *self.fc_store.unrealized_finalized_checkpoint(); + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + UpdateJustifiedCheckpointSlots::OnTick { current_slot }, + spec, + )?; + Ok(()) + } + /// Processes and removes from the queue any queued attestations which may now be eligible for /// processing due to the slot clock incrementing. fn process_attestation_queue(&mut self) -> Result<(), Error<T::Error>> { @@ -1158,6 +1363,14 @@ where *self.fc_store.best_justified_checkpoint() } + pub fn unrealized_justified_checkpoint(&self) -> Checkpoint { + *self.fc_store.unrealized_justified_checkpoint() + } + + pub fn unrealized_finalized_checkpoint(&self) -> Checkpoint { + *self.fc_store.unrealized_finalized_checkpoint() + } + /// Returns the latest message for a given validator, if any. /// /// Returns `(block_root, block_slot)`. diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 6df0cbc2c2..a7085b024a 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -50,6 +50,12 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// Returns the `finalized_checkpoint`. fn finalized_checkpoint(&self) -> &Checkpoint; + /// Returns the `unrealized_justified_checkpoint`. + fn unrealized_justified_checkpoint(&self) -> &Checkpoint; + + /// Returns the `unrealized_finalized_checkpoint`. + fn unrealized_finalized_checkpoint(&self) -> &Checkpoint; + /// Returns the `proposer_boost_root`. fn proposer_boost_root(&self) -> Hash256; @@ -62,6 +68,12 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// Sets the `best_justified_checkpoint`. fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint); + /// Sets the `unrealized_justified_checkpoint`. + fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint); + + /// Sets the `unrealized_finalized_checkpoint`. + fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint); + /// Sets the proposer boost root. fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256); } diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 6f79b488dd..6cb2010f1a 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,9 +2,9 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, - InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - QueuedAttestation, + AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView, + ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, + PersistedForkChoice, QueuedAttestation, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 2d10319cf0..850f7c4a12 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -12,7 +12,8 @@ use beacon_chain::{ StateSkipConfig, WhenSlotSkipped, }; use fork_choice::{ - ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, + CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, + QueuedAttestation, }; use store::MemoryStore; use types::{ @@ -150,7 +151,7 @@ impl ForkChoiceTest { .chain .canonical_head .fork_choice_write_lock() - .update_time(self.harness.chain.slot().unwrap()) + .update_time(self.harness.chain.slot().unwrap(), &self.harness.spec) .unwrap(); func( self.harness @@ -292,6 +293,7 @@ impl ForkChoiceTest { &state, PayloadVerificationStatus::Verified, &self.harness.chain.spec, + CountUnrealized::True, ) .unwrap(); self @@ -334,6 +336,7 @@ impl ForkChoiceTest { &state, PayloadVerificationStatus::Verified, &self.harness.chain.spec, + CountUnrealized::True, ) .err() .expect("on_block did not return an error"); diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 2be46cc590..0cfa3a194f 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -78,7 +78,7 @@ impl ForkChoiceTestDefinition { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); - let mut fork_choice = ProtoArrayForkChoice::new( + let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>( self.finalized_block_slot, Hash256::zero(), self.justified_checkpoint, @@ -103,6 +103,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + Slot::new(0), &spec, ) .unwrap_or_else(|e| { @@ -129,6 +130,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, proposer_boost_root, + Slot::new(0), &spec, ) .unwrap_or_else(|e| { @@ -152,6 +154,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + Slot::new(0), &spec, ); @@ -190,13 +193,17 @@ impl ForkChoiceTestDefinition { execution_status: ExecutionStatus::Optimistic( ExecutionBlockHash::from_root(root), ), + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, }; - fork_choice.process_block(block).unwrap_or_else(|e| { - panic!( - "process_block op at index {} returned error: {:?}", - op_index, e - ) - }); + fork_choice + .process_block::<MainnetEthSpec>(block, slot) + .unwrap_or_else(|e| { + panic!( + "process_block op at index {} returned error: {:?}", + op_index, e + ) + }); check_bytes_round_trip(&fork_choice); } Operation::ProcessAttestation { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f3ee4ca48f..85a15fb60e 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -97,6 +97,10 @@ pub struct ProtoNode { /// Indicates if an execution node has marked this block as valid. Also contains the execution /// block hash. pub execution_status: ExecutionStatus, + #[ssz(with = "four_byte_option_checkpoint")] + pub unrealized_justified_checkpoint: Option<Checkpoint>, + #[ssz(with = "four_byte_option_checkpoint")] + pub unrealized_finalized_checkpoint: Option<Checkpoint>, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -140,6 +144,7 @@ impl ProtoArray { /// - Compare the current node with the parents best-child, updating it if the current node /// should become the best child. /// - If required, update the parents best-descendant with the current node or its best-descendant. + #[allow(clippy::too_many_arguments)] pub fn apply_score_changes<E: EthSpec>( &mut self, mut deltas: Vec<i64>, @@ -147,6 +152,7 @@ impl ProtoArray { finalized_checkpoint: Checkpoint, new_balances: &[u64], proposer_boost_root: Hash256, + current_slot: Slot, spec: &ChainSpec, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { @@ -280,7 +286,11 @@ impl ProtoArray { // If the node has a parent, try to update its best-child and best-descendant. if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + self.maybe_update_best_child_and_descendant::<E>( + parent_index, + node_index, + current_slot, + )?; } } @@ -290,7 +300,7 @@ impl ProtoArray { /// Register a block with the fork choice. /// /// It is only sane to supply a `None` parent for the genesis block. - pub fn on_block(&mut self, block: Block) -> Result<(), Error> { + pub fn on_block<E: EthSpec>(&mut self, block: Block, current_slot: Slot) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { return Ok(()); @@ -314,6 +324,8 @@ impl ProtoArray { best_child: None, best_descendant: None, execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }; // If the parent has an invalid execution status, return an error before adding the block to @@ -335,7 +347,11 @@ impl ProtoArray { self.nodes.push(node.clone()); if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + self.maybe_update_best_child_and_descendant::<E>( + parent_index, + node_index, + current_slot, + )?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { self.propagate_execution_payload_validation_by_index(parent_index)?; @@ -604,7 +620,11 @@ impl ProtoArray { /// been called without a subsequent `Self::apply_score_changes` call. This is because /// `on_new_block` does not attempt to walk backwards through the tree and update the /// best-child/best-descendant links. - pub fn find_head(&self, justified_root: &Hash256) -> Result<Hash256, Error> { + pub fn find_head<E: EthSpec>( + &self, + justified_root: &Hash256, + current_slot: Slot, + ) -> Result<Hash256, Error> { let justified_index = self .indices .get(justified_root) @@ -637,7 +657,7 @@ impl ProtoArray { .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; // Perform a sanity check that the node is indeed valid to be the head. - if !self.node_is_viable_for_head(best_node) { + if !self.node_is_viable_for_head::<E>(best_node, current_slot) { return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { start_root: *justified_root, justified_checkpoint: self.justified_checkpoint, @@ -733,10 +753,11 @@ impl ProtoArray { /// best-descendant. /// - The child is not the best child but becomes the best child. /// - The child is not the best child and does not become the best child. - fn maybe_update_best_child_and_descendant( + fn maybe_update_best_child_and_descendant<E: EthSpec>( &mut self, parent_index: usize, child_index: usize, + current_slot: Slot, ) -> Result<(), Error> { let child = self .nodes @@ -748,7 +769,8 @@ impl ProtoArray { .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - let child_leads_to_viable_head = self.node_leads_to_viable_head(child)?; + let child_leads_to_viable_head = + self.node_leads_to_viable_head::<E>(child, current_slot)?; // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. @@ -761,54 +783,54 @@ impl ProtoArray { ); let no_change = (parent.best_child, parent.best_descendant); - let (new_best_child, new_best_descendant) = if let Some(best_child_index) = - parent.best_child - { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. - change_to_child - } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; - - let best_child_leads_to_viable_head = self.node_leads_to_viable_head(best_child)?; - - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. + let (new_best_child, new_best_descendant) = + if let Some(best_child_index) = parent.best_child { + if best_child_index == child_index && !child_leads_to_viable_head { + // If the child is already the best-child of the parent but it's not viable for + // the head, remove it. + change_to_none + } else if best_child_index == child_index { + // If the child is the best-child already, set it again to ensure that the + // best-descendant of the parent is updated. change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. - no_change - } else if child.weight == best_child.weight { - // Tie-breaker of equal weights by root. - if child.root >= best_child.root { - change_to_child - } else { - no_change - } } else { - // Choose the winner by weight. - if child.weight >= best_child.weight { + let best_child = self + .nodes + .get(best_child_index) + .ok_or(Error::InvalidBestDescendant(best_child_index))?; + + let best_child_leads_to_viable_head = + self.node_leads_to_viable_head::<E>(best_child, current_slot)?; + + if child_leads_to_viable_head && !best_child_leads_to_viable_head { + // The child leads to a viable head, but the current best-child doesn't. change_to_child - } else { + } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { + // The best child leads to a viable head, but the child doesn't. no_change + } else if child.weight == best_child.weight { + // Tie-breaker of equal weights by root. + if child.root >= best_child.root { + change_to_child + } else { + no_change + } + } else { + // Choose the winner by weight. + if child.weight >= best_child.weight { + change_to_child + } else { + no_change + } } } - } - } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. - change_to_child - } else { - // There is no current best-child but the child is not viable. - no_change - }; + } else if child_leads_to_viable_head { + // There is no current best-child and the child is viable. + change_to_child + } else { + // There is no current best-child but the child is not viable. + no_change + }; let parent = self .nodes @@ -823,7 +845,11 @@ impl ProtoArray { /// Indicates if the node itself is viable for the head, or if it's best descendant is viable /// for the head. - fn node_leads_to_viable_head(&self, node: &ProtoNode) -> Result<bool, Error> { + fn node_leads_to_viable_head<E: EthSpec>( + &self, + node: &ProtoNode, + current_slot: Slot, + ) -> Result<bool, Error> { let best_descendant_is_viable_for_head = if let Some(best_descendant_index) = node.best_descendant { let best_descendant = self @@ -831,12 +857,13 @@ impl ProtoArray { .get(best_descendant_index) .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; - self.node_is_viable_for_head(best_descendant) + self.node_is_viable_for_head::<E>(best_descendant, current_slot) } else { false }; - Ok(best_descendant_is_viable_for_head || self.node_is_viable_for_head(node)) + Ok(best_descendant_is_viable_for_head + || self.node_is_viable_for_head::<E>(node, current_slot)) } /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: @@ -845,18 +872,43 @@ impl ProtoArray { /// /// Any node that has a different finalized or justified epoch should not be viable for the /// head. - fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { + fn node_is_viable_for_head<E: EthSpec>(&self, node: &ProtoNode, current_slot: Slot) -> bool { if node.execution_status.is_invalid() { return false; } - if let (Some(node_justified_checkpoint), Some(node_finalized_checkpoint)) = + let checkpoint_match_predicate = + |node_justified_checkpoint: Checkpoint, node_finalized_checkpoint: Checkpoint| { + let correct_justified = node_justified_checkpoint == self.justified_checkpoint + || self.justified_checkpoint.epoch == Epoch::new(0); + let correct_finalized = node_finalized_checkpoint == self.finalized_checkpoint + || self.finalized_checkpoint.epoch == Epoch::new(0); + correct_justified && correct_finalized + }; + + if let ( + Some(unrealized_justified_checkpoint), + Some(unrealized_finalized_checkpoint), + Some(justified_checkpoint), + Some(finalized_checkpoint), + ) = ( + node.unrealized_justified_checkpoint, + node.unrealized_finalized_checkpoint, + node.justified_checkpoint, + node.finalized_checkpoint, + ) { + if node.slot.epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { + checkpoint_match_predicate( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + ) + } else { + checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) + } + } else if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = (node.justified_checkpoint, node.finalized_checkpoint) { - (node_justified_checkpoint == self.justified_checkpoint - || self.justified_checkpoint.epoch == Epoch::new(0)) - && (node_finalized_checkpoint == self.finalized_checkpoint - || self.finalized_checkpoint.epoch == Epoch::new(0)) + checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) } else { false } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 88bf7840c2..568cfa9640 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -124,6 +124,8 @@ pub struct Block { /// Indicates if an execution node has marked this block as valid. Also contains the execution /// block hash. pub execution_status: ExecutionStatus, + pub unrealized_justified_checkpoint: Option<Checkpoint>, + pub unrealized_finalized_checkpoint: Option<Checkpoint>, } /// A Vec-wrapper which will grow to match any request. @@ -162,7 +164,7 @@ pub struct ProtoArrayForkChoice { impl ProtoArrayForkChoice { #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new<E: EthSpec>( finalized_block_slot: Slot, finalized_block_state_root: Hash256, justified_checkpoint: Checkpoint, @@ -193,10 +195,12 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, execution_status, + unrealized_justified_checkpoint: Some(justified_checkpoint), + unrealized_finalized_checkpoint: Some(finalized_checkpoint), }; proto_array - .on_block(block) + .on_block::<E>(block, finalized_block_slot) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -242,13 +246,17 @@ impl ProtoArrayForkChoice { Ok(()) } - pub fn process_block(&mut self, block: Block) -> Result<(), String> { + pub fn process_block<E: EthSpec>( + &mut self, + block: Block, + current_slot: Slot, + ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); } self.proto_array - .on_block(block) + .on_block::<E>(block, current_slot) .map_err(|e| format!("process_block_error: {:?}", e)) } @@ -258,6 +266,7 @@ impl ProtoArrayForkChoice { finalized_checkpoint: Checkpoint, justified_state_balances: &[u64], proposer_boost_root: Hash256, + current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, String> { let old_balances = &mut self.balances; @@ -279,6 +288,7 @@ impl ProtoArrayForkChoice { finalized_checkpoint, new_balances, proposer_boost_root, + current_slot, spec, ) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; @@ -286,7 +296,7 @@ impl ProtoArrayForkChoice { *old_balances = new_balances.to_vec(); self.proto_array - .find_head(&justified_checkpoint.root) + .find_head::<E>(&justified_checkpoint.root, current_slot) .map_err(|e| format!("find_head failed: {:?}", e)) } @@ -341,6 +351,8 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }) } else { None @@ -485,6 +497,7 @@ fn compute_deltas( #[cfg(test)] mod test_compute_deltas { use super::*; + use types::MainnetEthSpec; /// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. fn hash_from_index(i: usize) -> Hash256 { @@ -510,7 +523,7 @@ mod test_compute_deltas { root: finalized_root, }; - let mut fc = ProtoArrayForkChoice::new( + let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( genesis_slot, state_root, genesis_checkpoint, @@ -523,34 +536,44 @@ mod test_compute_deltas { // Add block that is a finalized descendant. fc.proto_array - .on_block(Block { - slot: genesis_slot + 1, - root: finalized_desc, - parent_root: Some(finalized_root), - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id.clone(), - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - }) + .on_block::<MainnetEthSpec>( + Block { + slot: genesis_slot + 1, + root: finalized_desc, + parent_root: Some(finalized_root), + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + genesis_slot + 1, + ) .unwrap(); // Add block that is *not* a finalized descendant. fc.proto_array - .on_block(Block { - slot: genesis_slot + 1, - root: not_finalized_desc, - parent_root: None, - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - }) + .on_block::<MainnetEthSpec>( + Block { + slot: genesis_slot + 1, + root: not_finalized_desc, + parent_root: None, + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id, + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + }, + genesis_slot + 1, + ) .unwrap(); assert!(!fc.is_descendant(unknown, unknown)); diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index d813dc42fa..cb90c67b56 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -2,6 +2,7 @@ pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; +pub use justification_and_finalization_state::JustificationAndFinalizationState; pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; pub use slashings::process_slashings; @@ -14,6 +15,7 @@ pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; pub mod historical_roots_update; +pub mod justification_and_finalization_state; pub mod registry_updates; pub mod resets; pub mod slashings; diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index 1011abe28f..d5df2fc975 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -33,7 +33,9 @@ pub fn process_epoch<T: EthSpec>( let sync_committee = state.current_sync_committee()?.clone(); // Justification and finalization. - process_justification_and_finalization(state, &participation_cache)?; + let justification_and_finalization_state = + process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); process_inactivity_updates(state, &participation_cache, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs index f47d9c0e68..1f17cf56e0 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -1,17 +1,21 @@ use super::ParticipationCache; -use crate::per_epoch_processing::weigh_justification_and_finalization; use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use safe_arith::SafeArith; use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; use types::{BeaconState, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. pub fn process_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + state: &BeaconState<T>, participation_cache: &ParticipationCache, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(()); + return Ok(justification_and_finalization_state); } let previous_epoch = state.previous_epoch(); @@ -24,7 +28,7 @@ pub fn process_justification_and_finalization<T: EthSpec>( let previous_target_balance = previous_indices.total_balance()?; let current_target_balance = current_indices.total_balance()?; weigh_justification_and_finalization( - state, + justification_and_finalization_state, total_active_balance, previous_target_balance, current_target_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index 4ae2207ff2..cb7e7d4b30 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -31,7 +31,9 @@ pub fn process_epoch<T: EthSpec>( validator_statuses.process_attestations(state)?; // Justification and finalization. - process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + let justification_and_finalization_state = + process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + justification_and_finalization_state.apply_changes_to_state(state); // Rewards and Penalties. process_rewards_and_penalties(state, &mut validator_statuses, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs index 89fb506eec..9792b54507 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs @@ -1,21 +1,25 @@ use crate::per_epoch_processing::base::TotalBalances; -use crate::per_epoch_processing::weigh_justification_and_finalization; use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use safe_arith::SafeArith; use types::{BeaconState, ChainSpec, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. pub fn process_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + state: &BeaconState<T>, total_balances: &TotalBalances, _spec: &ChainSpec, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(()); + return Ok(justification_and_finalization_state); } weigh_justification_and_finalization( - state, + justification_and_finalization_state, total_balances.current_epoch(), total_balances.previous_epoch_target_attesters(), total_balances.current_epoch_target_attesters(), diff --git a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs new file mode 100644 index 0000000000..d8a641f464 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs @@ -0,0 +1,115 @@ +use types::{BeaconState, BeaconStateError, BitVector, Checkpoint, Epoch, EthSpec, Hash256}; + +/// This is a subset of the `BeaconState` which is used to compute justification and finality +/// without modifying the `BeaconState`. +/// +/// A `JustificationAndFinalizationState` can be created from a `BeaconState` to compute +/// justification/finality changes and then applied to a `BeaconState` to enshrine those changes. +#[must_use = "this value must be applied to a state or explicitly dropped"] +pub struct JustificationAndFinalizationState<T: EthSpec> { + /* + * Immutable fields. + */ + previous_epoch: Epoch, + previous_epoch_target_root: Result<Hash256, BeaconStateError>, + current_epoch: Epoch, + current_epoch_target_root: Result<Hash256, BeaconStateError>, + /* + * Mutable fields. + */ + previous_justified_checkpoint: Checkpoint, + current_justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + justification_bits: BitVector<T::JustificationBitsLength>, +} + +impl<T: EthSpec> JustificationAndFinalizationState<T> { + pub fn new(state: &BeaconState<T>) -> Self { + let previous_epoch = state.previous_epoch(); + let current_epoch = state.current_epoch(); + Self { + previous_epoch, + previous_epoch_target_root: state.get_block_root_at_epoch(previous_epoch).copied(), + current_epoch, + current_epoch_target_root: state.get_block_root_at_epoch(current_epoch).copied(), + previous_justified_checkpoint: state.previous_justified_checkpoint(), + current_justified_checkpoint: state.current_justified_checkpoint(), + finalized_checkpoint: state.finalized_checkpoint(), + justification_bits: state.justification_bits().clone(), + } + } + + pub fn apply_changes_to_state(self, state: &mut BeaconState<T>) { + let Self { + /* + * Immutable fields do not need to be used. + */ + previous_epoch: _, + previous_epoch_target_root: _, + current_epoch: _, + current_epoch_target_root: _, + /* + * Mutable fields *must* be used. + */ + previous_justified_checkpoint, + current_justified_checkpoint, + finalized_checkpoint, + justification_bits, + } = self; + + *state.previous_justified_checkpoint_mut() = previous_justified_checkpoint; + *state.current_justified_checkpoint_mut() = current_justified_checkpoint; + *state.finalized_checkpoint_mut() = finalized_checkpoint; + *state.justification_bits_mut() = justification_bits; + } + + pub fn previous_epoch(&self) -> Epoch { + self.previous_epoch + } + + pub fn current_epoch(&self) -> Epoch { + self.current_epoch + } + + pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<Hash256, BeaconStateError> { + if epoch == self.previous_epoch { + self.previous_epoch_target_root.clone() + } else if epoch == self.current_epoch { + self.current_epoch_target_root.clone() + } else { + Err(BeaconStateError::SlotOutOfBounds) + } + } + + pub fn previous_justified_checkpoint(&self) -> Checkpoint { + self.previous_justified_checkpoint + } + + pub fn previous_justified_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.previous_justified_checkpoint + } + + pub fn current_justified_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.current_justified_checkpoint + } + + pub fn current_justified_checkpoint(&self) -> Checkpoint { + self.current_justified_checkpoint + } + + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.finalized_checkpoint + } + + pub fn finalized_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.finalized_checkpoint + } + + pub fn justification_bits(&self) -> &BitVector<T::JustificationBitsLength> { + &self.justification_bits + } + + pub fn justification_bits_mut(&mut self) -> &mut BitVector<T::JustificationBitsLength> { + &mut self.justification_bits + } +} diff --git a/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs index 6e90ee8f37..96f6a8ef14 100644 --- a/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs @@ -1,16 +1,16 @@ -use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{Error, JustificationAndFinalizationState}; use safe_arith::SafeArith; use std::ops::Range; -use types::{BeaconState, Checkpoint, EthSpec}; +use types::{Checkpoint, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. #[allow(clippy::if_same_then_else)] // For readability and consistency with spec. pub fn weigh_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + mut state: JustificationAndFinalizationState<T>, total_active_balance: u64, previous_target_balance: u64, current_target_balance: u64, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -24,7 +24,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( if previous_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { *state.current_justified_checkpoint_mut() = Checkpoint { epoch: previous_epoch, - root: *state.get_block_root_at_epoch(previous_epoch)?, + root: state.get_block_root_at_epoch(previous_epoch)?, }; state.justification_bits_mut().set(1, true)?; } @@ -32,7 +32,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( if current_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { *state.current_justified_checkpoint_mut() = Checkpoint { epoch: current_epoch, - root: *state.get_block_root_at_epoch(current_epoch)?, + root: state.get_block_root_at_epoch(current_epoch)?, }; state.justification_bits_mut().set(0, true)?; } @@ -66,5 +66,5 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( *state.finalized_checkpoint_mut() = old_current_justified_checkpoint; } - Ok(()) + Ok(state) } diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 55135a8a26..43396dedc0 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -129,6 +129,7 @@ macro_rules! impl_test_random_for_u8_array { }; } +impl_test_random_for_u8_array!(3); impl_test_random_for_u8_array!(4); impl_test_random_for_u8_array!(32); impl_test_random_for_u8_array!(48); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 6cc0e5959b..ddf0cdc8cb 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -37,3 +37,4 @@ web3 = { version = "0.18.0", default-features = false, features = ["http-tls", " eth1_test_rig = { path = "../testing/eth1_test_rig" } sensitive_url = { path = "../common/sensitive_url" } eth2 = { path = "../common/eth2" } +snap = "1.0.1" diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 3f272780db..5d988ee181 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,7 +1,9 @@ use clap::ArgMatches; use clap_utils::parse_required; use serde::Serialize; +use snap::raw::Decoder; use ssz::Decode; +use std::fs; use std::fs::File; use std::io::Read; use std::str::FromStr; @@ -29,11 +31,18 @@ pub fn run_parse_ssz<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; let format = parse_required(matches, "format")?; - let mut bytes = vec![]; - let mut file = - File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; - file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + let bytes = if filename.ends_with("ssz_snappy") { + let bytes = fs::read(filename).unwrap(); + let mut decoder = Decoder::new(); + decoder.decompress_vec(&bytes).unwrap() + } else { + let mut bytes = vec![]; + let mut file = + File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + bytes + }; info!("Using {} spec", T::spec_name()); info!("Type: {:?}", type_str); diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 08722c8e46..7546c96a78 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -88,17 +88,23 @@ impl<E: EthSpec> EpochTransition<E> for JustificationAndFinalization { BeaconState::Base(_) => { let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; validator_statuses.process_attestations(state)?; - base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - ) + let justification_and_finalization_state = + base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } BeaconState::Altair(_) | BeaconState::Merge(_) => { - altair::process_justification_and_finalization( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - ) + let justification_and_finalization_state = + altair::process_justification_and_finalization( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 4f9f4dacad..4d90bb161f 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, + BeaconChainTypes, CachedHead, CountUnrealized, }; use serde_derive::Deserialize; use ssz_derive::Decode; @@ -16,8 +16,8 @@ use std::future::Future; use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, - Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, + ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -45,17 +45,20 @@ pub struct Checks { justified_checkpoint_root: Option<Hash256>, finalized_checkpoint: Option<Checkpoint>, best_justified_checkpoint: Option<Checkpoint>, + u_justified_checkpoint: Option<Checkpoint>, + u_finalized_checkpoint: Option<Checkpoint>, proposer_boost_root: Option<Hash256>, } #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step<B, A, P> { +pub enum Step<B, A, P, S> { Tick { tick: u64 }, ValidBlock { block: B }, MaybeValidBlock { block: B, valid: bool }, Attestation { attestation: A }, PowBlock { pow_block: P }, + AttesterSlashing { attester_slashing: S }, Checks { checks: Box<Checks> }, } @@ -71,16 +74,13 @@ pub struct ForkChoiceTest<E: EthSpec> { pub description: String, pub anchor_state: BeaconState<E>, pub anchor_block: BeaconBlock<E>, - pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, PowBlock>>, + #[allow(clippy::type_complexity)] + pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, PowBlock, AttesterSlashing<E>>>, } -/// Spec for fork choice tests, with proposer boosting enabled. -/// -/// This function can be deleted once `ChainSpec::mainnet` enables proposer boosting by default. +/// Spec to be used for fork choice tests. pub fn fork_choice_spec<E: EthSpec>(fork_name: ForkName) -> ChainSpec { - let mut spec = testing_spec::<E>(fork_name); - spec.proposer_score_boost = Some(70); - spec + testing_spec::<E>(fork_name) } impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { @@ -93,7 +93,8 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { .expect("path must be valid OsStr") .to_string(); let spec = &fork_choice_spec::<E>(fork_name); - let steps: Vec<Step<String, String, String>> = yaml_decode_file(&path.join("steps.yaml"))?; + let steps: Vec<Step<String, String, String, String>> = + yaml_decode_file(&path.join("steps.yaml"))?; // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. let steps = steps .into_iter() @@ -119,6 +120,10 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) .map(|pow_block| Step::PowBlock { pow_block }) } + Step::AttesterSlashing { attester_slashing } => { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing))) + .map(|attester_slashing| Step::AttesterSlashing { attester_slashing }) + } Step::Checks { checks } => Ok(Step::Checks { checks }), }) .collect::<Result<_, _>>()?; @@ -159,7 +164,10 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { // TODO(merge): re-enable this test before production. // This test is skipped until we can do retrospective confirmations of the terminal // block after an optimistic sync. - if self.description == "block_lookup_failed" { + if self.description == "block_lookup_failed" + //TODO(sean): enable once we implement equivocation logic (https://github.com/sigp/lighthouse/issues/3241) + || self.description == "discard_equivocations" + { return Err(Error::SkippedKnownFailure); }; @@ -172,6 +180,10 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { } Step::Attestation { attestation } => tester.process_attestation(attestation)?, Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), + //TODO(sean): enable once we implement equivocation logic (https://github.com/sigp/lighthouse/issues/3241) + Step::AttesterSlashing { + attester_slashing: _, + } => (), Step::Checks { checks } => { let Checks { head, @@ -181,6 +193,8 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { justified_checkpoint_root, finalized_checkpoint, best_justified_checkpoint, + u_justified_checkpoint, + u_finalized_checkpoint, proposer_boost_root, } = checks.as_ref(); @@ -214,6 +228,14 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { .check_best_justified_checkpoint(*expected_best_justified_checkpoint)?; } + if let Some(expected_u_justified_checkpoint) = u_justified_checkpoint { + tester.check_u_justified_checkpoint(*expected_u_justified_checkpoint)?; + } + + if let Some(expected_u_finalized_checkpoint) = u_finalized_checkpoint { + tester.check_u_finalized_checkpoint(*expected_u_finalized_checkpoint)?; + } + if let Some(expected_proposer_boost_root) = proposer_boost_root { tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; } @@ -319,14 +341,18 @@ impl<E: EthSpec> Tester<E> { .chain .canonical_head .fork_choice_write_lock() - .update_time(slot) + .update_time(slot, &self.spec) .unwrap(); } pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> { let block_root = block.canonical_root(); let block = Arc::new(block); - let result = self.block_on_dangerous(self.harness.chain.process_block(block.clone()))?; + let result = self.block_on_dangerous( + self.harness + .chain + .process_block(block.clone(), CountUnrealized::True), + )?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -384,6 +410,7 @@ impl<E: EthSpec> Tester<E> { &state, PayloadVerificationStatus::Irrelevant, &self.harness.chain.spec, + self.harness.chain.config.count_unrealized.into(), ); if result.is_ok() { @@ -520,6 +547,40 @@ impl<E: EthSpec> Tester<E> { ) } + pub fn check_u_justified_checkpoint( + &self, + expected_checkpoint: Checkpoint, + ) -> Result<(), Error> { + let u_justified_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .unrealized_justified_checkpoint(); + check_equal( + "u_justified_checkpoint", + u_justified_checkpoint, + expected_checkpoint, + ) + } + + pub fn check_u_finalized_checkpoint( + &self, + expected_checkpoint: Checkpoint, + ) -> Result<(), Error> { + let u_finalized_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .unrealized_finalized_checkpoint(); + check_equal( + "u_finalized_checkpoint", + u_finalized_checkpoint, + expected_checkpoint, + ) + } + pub fn check_expected_proposer_boost_root( &self, expected_proposer_boost_root: Hash256, From f7354abe0f7c60ab01f8a77a25f6c312197f15d5 Mon Sep 17 00:00:00 2001 From: ethDreamer <mark@sigmaprime.io> Date: Tue, 26 Jul 2022 02:17:21 +0000 Subject: [PATCH 086/184] Fix Block Cache Range Math for Faster Syncing (#3358) ## Issue Addressed While messing with the deposit snapshot stuff, I had my proxy running and noticed the beacon node wasn't syncing the block cache continuously. There were long periods where it did nothing. I believe this was caused by a logical error introduced in #3234 that dealt with an issue that arose while syncing the block cache on Ropsten. The problem is that when the block cache is initially syncing, it will trigger the logic that detects the cache is far behind the execution chain in time. This will trigger a batch syncing mechanism which is intended to sync further ahead than the chain would normally. But the batch syncing is actually slower than the range this function usually estimates (in this scenario). ## Proposed Changes I believe I've fixed this function by taking the end of the range to be the maximum of (batch syncing range, usual range). I've also renamed and restructured some things a bit. It's equivalent logic but I think it's more clear what's going on. --- beacon_node/eth1/src/service.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 36a637d2ae..6f40015fac 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -908,11 +908,12 @@ impl Service { /// Returns the range of new block numbers to be considered for the given head type. fn relevant_new_block_numbers( &self, - remote_highest_block: u64, + remote_highest_block_number: u64, remote_highest_block_timestamp: Option<u64>, head_type: HeadType, ) -> Result<Option<RangeInclusive<u64>>, SingleEndpointError> { let follow_distance = self.cache_follow_distance(); + let latest_cached_block = self.latest_cached_block(); let next_required_block = match head_type { HeadType::Deposit => self .deposits() @@ -920,18 +921,14 @@ impl Service { .last_processed_block .map(|n| n + 1) .unwrap_or_else(|| self.config().deposit_contract_deploy_block), - HeadType::BlockCache => self - .inner - .block_cache - .read() - .highest_block_number() - .map(|n| n + 1) + HeadType::BlockCache => latest_cached_block + .as_ref() + .map(|block| block.number + 1) .unwrap_or_else(|| self.config().lowest_cached_block_number), }; - let latest_cached_block = self.latest_cached_block(); relevant_block_range( - remote_highest_block, + remote_highest_block_number, remote_highest_block_timestamp, next_required_block, follow_distance, @@ -1293,9 +1290,12 @@ fn relevant_block_range( let lagging = latest_cached_block.timestamp + cache_follow_distance * spec.seconds_per_eth1_block < remote_highest_block_timestamp; - let end_block = std::cmp::min( - remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), - next_required_block + CATCHUP_BATCH_SIZE, + let end_block = std::cmp::max( + std::cmp::min( + remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), + next_required_block + CATCHUP_BATCH_SIZE, + ), + remote_highest_block_number.saturating_sub(cache_follow_distance), ); if lagging && next_required_block <= end_block { return Ok(Some(next_required_block..=end_block)); From b82e2dfc51bc5b981d6fbe93c9aad6fdcaa73f4a Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 26 Jul 2022 02:17:22 +0000 Subject: [PATCH 087/184] Add merge transition docs (#3361) ## Issue Addressed NA ## Proposed Changes Add some documentation about migrating pre-merge Lighthouse to post-merge Lighthouse. ## Additional Info NA --- book/src/SUMMARY.md | 1 + book/src/merge-migration.md | 101 ++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 book/src/merge-migration.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index e2a2eb37eb..c3e99d7a86 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -3,6 +3,7 @@ * [Introduction](./intro.md) * [Become a Validator](./mainnet-validator.md) * [Become a Testnet Validator](./testnet-validator.md) +* [Merge Migration](./merge-migration.md) * [Installation](./installation.md) * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md new file mode 100644 index 0000000000..6ed6a9977a --- /dev/null +++ b/book/src/merge-migration.md @@ -0,0 +1,101 @@ +# Merge Migration + +This document provides detail for users who have been running a Lighthouse node *before* the merge +and are now preparing their node for the merge transition. + +## "Pre-Merge" and "Post-Merge" + +As of [v2.4.0](https://github.com/sigp/lighthouse/releases/tag/v2.4.0) Lighthouse can be considered +to have two modes: + +- "Pre-merge": `--execution-endpoint` flag *is not* provided. +- "Post-merge": `--execution-endpoint` flag *is* provided. + +A "pre-merge" node, by definition, will fail to transition through the merge. Such a node *must* be +upgraded before the Bellatrix upgrade. + +## Migration + +Let us look at an example of the command line arguments for a pre-merge production staking BN: + +```bash +lighthouse \ + --network mainnet \ + beacon_node \ + --http \ + --eth1-endpoints http://localhost:8545,https://TOKEN@eth2-beacon-mainnet.infura.io +``` + +Converting the above to a post-merge configuration would render: + +```bash +lighthouse \ + --network mainnet \ + beacon_node \ + --http \ + --execution-endpoint http://localhost:8551 + --execution-jwt ~/.ethereum/geth/jwtsecret +``` + +The changes here are: + +1. Remove `--eth1-endpoints` + - The endpoint at `localhost` can be retained, it is our local execution engine. Once it is + upgraded to a merge-compatible release it will be used in the post-merge environment. + - The `infura.io` endpoint will be abandoned, Infura and most other third-party node providers + *are not* compatible with post-merge BNs. +2. Add the `--execution-endpoint` flag. + - We have reused the node at `localhost`, however we've switched to the authenticated engine API + port `8551`. All execution engines will have a specific port for this API, however it might + not be `8551`, see their documentation for details. +3. Add the `--execution-jwt` flag. + - This is the path to a file containing a 32-byte secret for authenticating the BN with the + execution engine. In this example our execution engine is Geth, so we've chosen the default + location for Geth. Your execution engine might have a different path. It is critical that both + the BN and execution engine reference a file with the same value, otherwise they'll fail to + communicate. + +Note that the `--network` and `--http` flags haven't changed. The only changes required for the +merge are ensuring that `--execution-endpoint` and `--execution-jwt` flags are provided! In fact, +you can even leave the `--eth1-endpoints` flag there, it will be ignored. This is not recommended as +a deprecation warning will be logged and Lighthouse *may* remove these flags in the future. + +There are no changes required for the validator client, apart from ensure it has been updated to the +same version as the beacon node. Check the version with `lighthouse --version`. + +## The relationship between `--eth1-endpoints` and `--execution-endpoint` + +Pre-merge users will be familiar with the `--eth1-endpoints` flag. This provides a list of Ethereum +"eth1" nodes (e.g., Geth, Nethermind, etc). Each beacon node (BN) can have multiple eth1 endpoints +and each eth1 endpoint can have many BNs connection (many-to-many relationship). The eth1 node +provides a source of truth for the [deposit +contract](https://ethereum.org/en/staking/deposit-contract/) and beacon chain proposers include this +information in beacon blocks in order to on-board new validators. BNs exclusively use the `eth` +namespace on the eth1 [JSON-RPC API](https://ethereum.org/en/developers/docs/apis/json-rpc/) to +achieve this. + +To progress through the Bellatrix upgrade nodes will need a *new* connection to an "eth1" node; +`--execution-endpoint`. This connection has a few different properties. Firstly, the term "eth1 +node" has been deprecated and replaced with "execution engine". Whilst "eth1 node" and "execution +engine" still refer to the same projects (Geth, Nethermind, etc) the former refers to the pre-merge +versions and the latter refers to post-merge versions. Secondly, there is a strict one-to-one +relationship between Lighthouse and the execution engine; only one Lighthouse node can connect to +one execution engine. Thirdly, it is impossible to fully verify the post-merge chain without an +execution engine. It *was* possible to verify the pre-merge chain without an eth1 node, it was just +impossible to reliably *propose* blocks without it. + +Since an execution engine is a hard requirement in the post-merge chain and the execution engine +contains the transaction history of the Ethereum chain, there is no longer a need for the +`--eth1-endpoints` flag for information about the deposit contract. The `--execution-endpoint` can +be used for all such queries. Therefore we can say that where `--execution-endpoint` is included +`--eth1-endpoints` should be omitted. + +## What about multiple execution endpoints? + +Since an execution engine can only have one connected BN, the value of having multiple execution +engines connected to the same BN is very low. An execution engine cannot be shared between BNs to +reduce costs. + +Whilst having multiple execution engines connected to a single BN might be useful for advanced +testing scenarios, Lighthouse (and other consensus clients) have decided to support *only one* +execution endpoint. Such scenarios could be resolved with a custom-made HTTP proxy. From 904dd6252447d8162b6059e83de25424060dfe34 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Tue, 26 Jul 2022 02:17:24 +0000 Subject: [PATCH 088/184] Strict fee recipient (#3363) ## Issue Addressed Resolves #3267 Resolves #3156 ## Proposed Changes - Move the log for fee recipient checks from proposer cache insertion into block proposal so we are directly checking what we get from the EE - Only log when there is a discrepancy with the local EE, not when using the builder API. In the `builder-api` branch there is an `info` log when there is a discrepancy, I think it is more likely there will be a difference in fee recipient with the builder api because proposer payments might be made via a transaction in the block. Not really sure what patterns will become commong. - Upgrade the log from a `warn` to an `error` - not actually sure which we want, but I think this is worth an error because the local EE with default transaction ordering I think should pretty much always use the provided fee recipient - add a `strict-fee-recipient` flag to the VC so we only sign blocks with matching fee recipients. Falls back from the builder API to the local API if there is a discrepancy . Co-authored-by: realbigsean <sean@sigmaprime.io> --- beacon_node/execution_layer/src/lib.rs | 30 +++++++++++--------------- book/src/suggested-fee-recipient.md | 12 ++++++++++- consensus/types/src/payload.rs | 9 ++++++++ lighthouse/tests/validator_client.rs | 13 +++++++++++ validator_client/src/block_service.rs | 23 ++++++++++++++++++++ validator_client/src/cli.rs | 13 +++++++++++ validator_client/src/config.rs | 8 +++++++ validator_client/src/lib.rs | 1 + 8 files changed, 91 insertions(+), 18 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index e89e9ba814..5b82018749 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -450,23 +450,6 @@ impl<T: EthSpec> ExecutionLayer<T> { if let Some(preparation_data_entry) = self.proposer_preparation_data().await.get(&proposer_index) { - if let Some(suggested_fee_recipient) = self.inner.suggested_fee_recipient { - if preparation_data_entry.preparation_data.fee_recipient != suggested_fee_recipient - { - warn!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "proposer_index" => ?proposer_index, - "fee_recipient" => ?preparation_data_entry.preparation_data.fee_recipient, - "suggested_fee_recipient" => ?suggested_fee_recipient, - ) - } - } // The values provided via the API have first priority. preparation_data_entry.preparation_data.fee_recipient } else if let Some(address) = self.inner.suggested_fee_recipient { @@ -689,6 +672,19 @@ impl<T: EthSpec> ExecutionLayer<T> { .get_payload_v1::<T>(payload_id) .await .map(|full_payload| { + if full_payload.fee_recipient != suggested_fee_recipient { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?full_payload.fee_recipient, + "suggested_fee_recipient" => ?suggested_fee_recipient, + ); + } if f(self, &full_payload).is_some() { warn!( self.log(), diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index 5c77081c39..35338549e9 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -10,7 +10,8 @@ coinbase and the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the -`suggested_fee_recipient`, but users should note this trust assumption. +`suggested_fee_recipient`, but users should note this trust assumption. Check out the +[strict fee recipient](#strict-fee-recipient) section for how to mitigate this assumption. The `suggested_fee_recipient` can be provided to the VC, who will transmit it to the BN. The BN also has a choice regarding the fee recipient it passes to the execution node, creating another @@ -61,6 +62,15 @@ validators where a `suggested_fee_recipient` is not loaded from another method. The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. +## Strict Fee Recipient + +If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose +`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal +block proposal flow, as well as block proposals through the builder API. Proposals through the builder API are more likely +to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before +using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the +local execution engine for payload construction, where a strict fee recipient check will still be applied. + ## Setting the fee recipient dynamically using the keymanager API When the [validator client API](api-vc.md) is enabled, the diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index a21eeb63c2..4a8552d249 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -44,6 +44,7 @@ pub trait ExecPayload<T: EthSpec>: fn block_number(&self) -> u64; fn timestamp(&self) -> u64; fn block_hash(&self) -> ExecutionBlockHash; + fn fee_recipient(&self) -> Address; } impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { @@ -74,6 +75,10 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { fn block_hash(&self) -> ExecutionBlockHash { self.execution_payload.block_hash } + + fn fee_recipient(&self) -> Address { + self.execution_payload.fee_recipient + } } impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { @@ -104,6 +109,10 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { fn block_hash(&self) -> ExecutionBlockHash { self.execution_payload_header.block_hash } + + fn fee_recipient(&self) -> Address { + self.execution_payload_header.fee_recipient + } } #[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 4ff5434687..98b159e996 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -388,3 +388,16 @@ fn no_doppelganger_protection_flag() { .run() .with_config(|config| assert!(!config.enable_doppelganger_protection)); } +#[test] +fn strict_fee_recipient_flag() { + CommandLineTest::new() + .flag("strict-fee-recipient", None) + .run() + .with_config(|config| assert!(config.strict_fee_recipient)); +} +#[test] +fn no_strict_fee_recipient_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.strict_fee_recipient)); +} diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2ba81eac7a..649f240645 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -45,6 +45,7 @@ pub struct BlockServiceBuilder<T, E: EthSpec> { graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, private_tx_proposals: bool, + strict_fee_recipient: bool, } impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { @@ -57,6 +58,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { graffiti: None, graffiti_file: None, private_tx_proposals: false, + strict_fee_recipient: false, } } @@ -95,6 +97,11 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { self } + pub fn strict_fee_recipient(mut self, strict_fee_recipient: bool) -> Self { + self.strict_fee_recipient = strict_fee_recipient; + self + } + pub fn build(self) -> Result<BlockService<T, E>, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -113,6 +120,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { graffiti: self.graffiti, graffiti_file: self.graffiti_file, private_tx_proposals: self.private_tx_proposals, + strict_fee_recipient: self.strict_fee_recipient, }), }) } @@ -127,6 +135,7 @@ pub struct Inner<T, E: EthSpec> { graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, private_tx_proposals: bool, + strict_fee_recipient: bool, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -328,6 +337,9 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + let fee_recipient = self.validator_store.get_fee_recipient(&validator_pubkey); + + let strict_fee_recipient = self.strict_fee_recipient; // Request block from first responsive beacon node. let block = self .beacon_nodes @@ -372,6 +384,17 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { }; drop(get_timer); + // Ensure the correctness of the execution payload's fee recipient. + if strict_fee_recipient { + if let Ok(execution_payload) = block.body().execution_payload() { + if Some(execution_payload.fee_recipient()) != fee_recipient { + return Err(BlockError::Recoverable( + "Incorrect fee recipient used by builder".to_string(), + )); + } + } + } + if proposer_index != Some(block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged" diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 414be2d90f..1f8b7b08ba 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -258,4 +258,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { execution payload construction during proposals.") .takes_value(false), ) + .arg( + Arg::with_name("strict-fee-recipient") + .long("strict-fee-recipient") + .help("If this flag is set, Lighthouse will refuse to sign any block whose \ + `fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. \ + This applies to both the normal block proposal flow, as well as block proposals \ + through the builder API. Proposals through the builder API are more likely to have a \ + discrepancy in `fee_recipient` so you should be aware of how your connected relay \ + sends proposer payments before using this flag. If this flag is used, a fee recipient \ + mismatch in the builder API flow will result in a fallback to the local execution engine \ + for payload construction, where a strict fee recipient check will still be applied.") + .takes_value(false), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index ddbe7f3630..725414b1b9 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -56,6 +56,9 @@ pub struct Config { /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option<Vec<PathBuf>>, + /// Enabling this will make sure the validator client never signs a block whose `fee_recipient` + /// does not match the `suggested_fee_recipient`. + pub strict_fee_recipient: bool, } impl Default for Config { @@ -89,6 +92,7 @@ impl Default for Config { enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, private_tx_proposals: false, + strict_fee_recipient: false, } } } @@ -300,6 +304,10 @@ impl Config { config.private_tx_proposals = true; } + if cli_args.is_present("strict-fee-recipient") { + config.strict_fee_recipient = true; + } + Ok(config) } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index b78b072cf8..1baa9f6bb2 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -414,6 +414,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) .private_tx_proposals(config.private_tx_proposals) + .strict_fee_recipient(config.strict_fee_recipient) .build()?; let attestation_service = AttestationServiceBuilder::new() From d316305411cce7cd4df95abbcc575c9e2070f896 Mon Sep 17 00:00:00 2001 From: Mac L <mjladson@pm.me> Date: Tue, 26 Jul 2022 08:50:16 +0000 Subject: [PATCH 089/184] Add `is_optimistic` to `eth/v1/node/syncing` response (#3374) ## Issue Addressed As specified in the [Beacon Chain API specs](https://github.com/ethereum/beacon-APIs/blob/master/apis/node/syncing.yaml#L32-L35) we should return `is_optimistic` as part of the response to a query for the `eth/v1/node/syncing` endpoint. ## Proposed Changes Compute the optimistic status of the head and add it to the `SyncingData` response. --- beacon_node/http_api/src/lib.rs | 5 +++++ beacon_node/http_api/tests/tests.rs | 1 + common/eth2/src/types.rs | 1 + 3 files changed, 7 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 31ae7486e6..c1980bee3d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1852,8 +1852,13 @@ pub fn serve<T: BeaconChainTypes>( // Taking advantage of saturating subtraction on slot. let sync_distance = current_slot - head_slot; + let is_optimistic = chain + .is_optimistic_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + let syncing_data = api_types::SyncingData { is_syncing: network_globals.sync_state.read().is_syncing(), + is_optimistic, head_slot, sync_distance, }; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 37c267fd46..b4c29cae42 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1281,6 +1281,7 @@ impl ApiTester { let expected = SyncingData { is_syncing: false, + is_optimistic: false, head_slot, sync_distance, }; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index c78e2c6919..3e480e0827 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -552,6 +552,7 @@ pub struct VersionData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncingData { pub is_syncing: bool, + pub is_optimistic: bool, pub head_slot: Slot, pub sync_distance: Slot, } From 44fae52cd7eb0b41908d2f8fb288cf859f5c5b7e Mon Sep 17 00:00:00 2001 From: Mac L <mjladson@pm.me> Date: Wed, 27 Jul 2022 00:51:05 +0000 Subject: [PATCH 090/184] Refuse to sign sync committee messages when head is optimistic (#3191) ## Issue Addressed Resolves #3151 ## Proposed Changes When fetching duties for sync committee contributions, check the value of `execution_optimistic` of the head block from the BN and refuse to sign any sync committee messages `if execution_optimistic == true`. ## Additional Info - Is backwards compatible with older BNs - Finding a way to add test coverage for this would be prudent. Open to suggestions. --- beacon_node/beacon_chain/src/beacon_chain.rs | 35 +++++++++++++++++-- beacon_node/beacon_chain/src/errors.rs | 3 ++ beacon_node/http_api/src/lib.rs | 6 ++++ .../src/sync_committee_service.rs | 34 ++++++++++++++---- 4 files changed, 70 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b9f9727e4c..2f35253058 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1380,10 +1380,41 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn get_aggregated_sync_committee_contribution( &self, sync_contribution_data: &SyncContributionData, - ) -> Option<SyncCommitteeContribution<T::EthSpec>> { - self.naive_sync_aggregation_pool + ) -> Result<Option<SyncCommitteeContribution<T::EthSpec>>, Error> { + if let Some(contribution) = self + .naive_sync_aggregation_pool .read() .get(sync_contribution_data) + { + self.filter_optimistic_sync_committee_contribution(contribution) + .map(Option::Some) + } else { + Ok(None) + } + } + + fn filter_optimistic_sync_committee_contribution( + &self, + contribution: SyncCommitteeContribution<T::EthSpec>, + ) -> Result<SyncCommitteeContribution<T::EthSpec>, Error> { + let beacon_block_root = contribution.beacon_block_root; + match self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&beacon_block_root) + { + // The contribution references a block that is not in fork choice, it must be + // pre-finalization. + None => Err(Error::SyncContributionDataReferencesFinalizedBlock { beacon_block_root }), + // The contribution references a fully valid `beacon_block_root`. + Some(execution_status) if execution_status.is_valid_or_irrelevant() => Ok(contribution), + // The contribution references a block that has not been verified by an EL (i.e. it + // is optimistic or invalid). Don't return the block, return an error instead. + Some(execution_status) => Err(Error::HeadBlockNotFullyVerified { + beacon_block_root, + execution_status, + }), + } } /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d3337dfafe..189cb3fdea 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -184,6 +184,9 @@ pub enum BeaconChainError { CannotAttestToFinalizedBlock { beacon_block_root: Hash256, }, + SyncContributionDataReferencesFinalizedBlock { + beacon_block_root: Hash256, + }, RuntimeShutdown, TokioJoin(tokio::task::JoinError), ProcessInvalidExecutionPayload(JoinError), diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index c1980bee3d..3284f874f9 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2324,6 +2324,12 @@ pub fn serve<T: BeaconChainTypes>( blocking_json_task(move || { chain .get_aggregated_sync_committee_contribution(&sync_committee_data) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch sync contribution: {:?}", + e + )) + })? .map(api_types::GenericResponse::from) .ok_or_else(|| { warp_utils::reject::custom_not_found( diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 105bf7d27f..73d0066f20 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -4,7 +4,7 @@ use environment::RuntimeContext; use eth2::types::BlockId; use futures::future::join_all; use futures::future::FutureExt; -use slog::{crit, debug, error, info, trace}; +use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; @@ -174,17 +174,39 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { return Ok(()); } - // Fetch block root for `SyncCommitteeContribution`. - let block_root = self + // Fetch `block_root` and `execution_optimistic` for `SyncCommitteeContribution`. + let response = self .beacon_nodes .first_success(RequireSynced::Yes, |beacon_node| async move { beacon_node.get_beacon_blocks_root(BlockId::Head).await }) .await .map_err(|e| e.to_string())? - .ok_or_else(|| format!("No block root found for slot {}", slot))? - .data - .root; + .ok_or_else(|| format!("No block root found for slot {}", slot))?; + + let block_root = response.data.root; + if let Some(execution_optimistic) = response.execution_optimistic { + if execution_optimistic { + warn!( + log, + "Refusing to sign sync committee messages for optimistic head block"; + "slot" => slot, + ); + return Ok(()); + } + } else if let Some(bellatrix_fork_epoch) = self.duties_service.spec.bellatrix_fork_epoch { + // If the slot is post Bellatrix, do not sign messages when we cannot verify the + // optimistic status of the head block. + if slot.epoch(E::slots_per_epoch()) > bellatrix_fork_epoch { + warn!( + log, + "Refusing to sign sync committee messages for a head block with an unknown \ + optimistic status"; + "slot" => slot, + ); + return Ok(()); + } + } // Spawn one task to publish all of the sync committee signatures. let validator_duties = slot_duties.duties; From 0f62d900fe72d113bda5f3c421dd7612d66e3dda Mon Sep 17 00:00:00 2001 From: Justin Traglia <jtraglia@pm.me> Date: Wed, 27 Jul 2022 00:51:06 +0000 Subject: [PATCH 091/184] Fix some typos (#3376) ## Proposed Changes This PR fixes various minor typos in the project. --- beacon_node/beacon_chain/src/early_attester_cache.rs | 2 +- .../lighthouse_network/src/behaviour/gossip_cache.rs | 12 ++++++------ beacon_node/network/src/service.rs | 4 ++-- book/src/contributing.md | 2 +- book/src/docker.md | 2 +- book/src/installation.md | 2 +- book/src/mainnet-validator.md | 2 +- book/src/setup.md | 2 +- book/src/slasher.md | 6 +++--- book/src/slashing-protection.md | 2 +- book/src/validator-import-launchpad.md | 2 +- .../built_in_network_configs/kiln/config.yaml | 2 +- .../src/per_block_processing/tests.rs | 4 ++-- scripts/local_testnet/vars.env | 2 +- scripts/tests/vars.env | 2 +- validator_client/src/lib.rs | 2 +- 16 files changed, 25 insertions(+), 25 deletions(-) diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 62b584968f..1ddbe13241 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -86,7 +86,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> { /// /// - There is a cache `item` present. /// - If `request_slot` is in the same epoch as `item.epoch`. - /// - If `request_index` does not exceed `item.comittee_count`. + /// - If `request_index` does not exceed `item.committee_count`. pub fn try_attest( &self, request_slot: Slot, diff --git a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs index 93687e555b..4842605f7a 100644 --- a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs @@ -30,9 +30,9 @@ pub struct GossipCache { proposer_slashing: Option<Duration>, /// Timeout for attester slashings. attester_slashing: Option<Duration>, - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. signed_contribution_and_proof: Option<Duration>, - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. sync_committee_message: Option<Duration>, } @@ -51,9 +51,9 @@ pub struct GossipCacheBuilder { proposer_slashing: Option<Duration>, /// Timeout for attester slashings. attester_slashing: Option<Duration>, - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. signed_contribution_and_proof: Option<Duration>, - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. sync_committee_message: Option<Duration>, } @@ -101,13 +101,13 @@ impl GossipCacheBuilder { self } - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. pub fn signed_contribution_and_proof_timeout(mut self, timeout: Duration) -> Self { self.signed_contribution_and_proof = Some(timeout); self } - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. pub fn sync_committee_message_timeout(mut self, timeout: Duration) -> Self { self.sync_committee_message = Some(timeout); self diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index c21183608a..9e3302af24 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -362,7 +362,7 @@ impl<T: BeaconChainTypes> NetworkService<T> { Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), // process any sync committee service events - Some(msg) = self.sync_committee_service.next() => self.on_sync_commitee_service_message(msg), + Some(msg) = self.sync_committee_service.next() => self.on_sync_committee_service_message(msg), event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, @@ -774,7 +774,7 @@ impl<T: BeaconChainTypes> NetworkService<T> { } } - fn on_sync_commitee_service_message(&mut self, msg: SubnetServiceMessage) { + fn on_sync_committee_service_message(&mut self, msg: SubnetServiceMessage) { match msg { SubnetServiceMessage::Subscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { diff --git a/book/src/contributing.md b/book/src/contributing.md index 9204ff8463..4b21d1ecf2 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -33,7 +33,7 @@ Lighthouse maintains two permanent branches: - [`stable`][stable]: Always points to the latest stable release. - This is ideal for most users. - [`unstable`][unstable]: Used for development, contains the latest PRs. - - Developers should base thier PRs on this branch. + - Developers should base their PRs on this branch. ## Ethereum consensus client diff --git a/book/src/docker.md b/book/src/docker.md index 9a0378f091..f22b8a2008 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -73,7 +73,7 @@ The `stability` is: The `arch` is: * `-amd64` for x86_64, e.g. Intel, AMD -* `-arm64` for aarch64, e.g. Rasperry Pi 4 +* `-arm64` for aarch64, e.g. Raspberry Pi 4 * empty for a multi-arch image (works on either `amd64` or `arm64` platforms) The `modernity` is: diff --git a/book/src/installation.md b/book/src/installation.md index 38fbe6b780..e222c401a2 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -14,7 +14,7 @@ The community maintains additional installation methods (currently only one). Additionally, there are two extra guides for specific uses: -- [Rapsberry Pi 4 guide](./pi.md). +- [Raspberry Pi 4 guide](./pi.md). - [Cross-compiling guide for developers](./cross-compiling.md). ## Minimum System Requirements diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 0f91b8e272..41735f85bb 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -34,7 +34,7 @@ Remember, if you get stuck you can always reach out on our [Discord][discord]. > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages -> occured through the use of Lighthouse. We have an experienced internal security team and have +> occurred through the use of Lighthouse. We have an experienced internal security team and have > undergone multiple third-party security-reviews, however the possibility of bugs or malicious > interference remains a real and constant threat. Validators should be prepared to lose some rewards > due to the actions of other actors on the consensus layer or software bugs. See the diff --git a/book/src/setup.md b/book/src/setup.md index dfff9290e6..e8c56623be 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -19,7 +19,7 @@ The additional requirements for developers are: ## Using `make` -Commands to run the test suite are avaiable via the `Makefile` in the +Commands to run the test suite are available via the `Makefile` in the project root for the benefit of CI/CD. We list some of these commands below so you can run them locally and avoid CI failures: diff --git a/book/src/slasher.md b/book/src/slasher.md index 05107238c3..889f9c6cbc 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -1,6 +1,6 @@ # Running a Slasher -Lighthouse includes a slasher for identifying slashable offences comitted by other validators and +Lighthouse includes a slasher for identifying slashable offences committed by other validators and including proof of those offences in blocks. Running a slasher is a good way to contribute to the health of the network, and doing so can earn @@ -69,7 +69,7 @@ The slasher uses MDBX as its backing store, which places a hard limit on the siz file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after initialization if the limit is reached. -By default the limit is set to accomodate the default history length and around 300K validators but +By default the limit is set to accommodate the default history length and around 300K validators but you can set it lower if running with a reduced history length. The space required scales approximately linearly in validator count and history length, i.e. if you halve either you can halve the space required. @@ -134,7 +134,7 @@ the slot duration. ### Chunk Size and Validator Chunk Size * Flags: `--slasher-chunk-size EPOCHS`, `--slasher-validator-chunk-size NUM_VALIDATORS` -* Arguments: number of ecochs, number of validators +* Arguments: number of epochs, number of validators * Defaults: 16, 256 Adjusting these parameter should only be done in conjunction with reading in detail diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index 9ae6c102e3..a60c8e36dc 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -54,7 +54,7 @@ Examples where it is **ineffective** are: clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using different datadirs, or two clients on completely different machines (e.g. one on a cloud server and one running locally). You are responsible for ensuring that your validator keys are never - running simultanously – the slashing protection DB **cannot protect you in this case**. + running simultaneously – the slashing protection DB **cannot protect you in this case**. * Importing keys from another client without also importing voting history. * If you use `--init-slashing-protection` to recreate a missing slashing protection database. diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md index aee9ac7b96..9849b91b70 100644 --- a/book/src/validator-import-launchpad.md +++ b/book/src/validator-import-launchpad.md @@ -1,6 +1,6 @@ # Importing from the Ethereum Staking Launch pad -The [Staking Lauchpad](https://github.com/ethereum/eth2.0-deposit) is a website +The [Staking Launchpad](https://github.com/ethereum/eth2.0-deposit) is a website from the Ethereum Foundation which guides users how to use the [`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) command-line program to generate consensus validator keys. diff --git a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml b/common/eth2_network_config/built_in_network_configs/kiln/config.yaml index 797c0672c3..5631c8a0bf 100644 --- a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/kiln/config.yaml @@ -6,7 +6,7 @@ PRESET_BASE: 'mainnet' MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 95000 # Mar 11th, 2022, 14:00 UTC MIN_GENESIS_TIME: 1647007200 -# Gensis fork +# Genesis fork GENESIS_FORK_VERSION: 0x70000069 # 300 seconds (5 min) GENESIS_DELAY: 300 diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 2daefdacad..2a84d1d2d2 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -394,7 +394,7 @@ async fn invalid_attestation_no_committee_for_index() { &spec, ); - // Expecting NoCommitee because we manually set the attestation's index to be invalid + // Expecting NoCommittee because we manually set the attestation's index to be invalid assert_eq!( result, Err(BlockProcessingError::AttestationInvalid { @@ -471,7 +471,7 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() { &spec, ); - // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the commitee size. + // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the committee size. assert_eq!( result, Err(BlockProcessingError::BeaconStateError( diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index efb1046452..b6ea89794f 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80 # Number of beacon_node instances that you intend to run BN_COUNT=4 -# Number of valicator clients +# Number of validator clients VC_COUNT=$BN_COUNT # Number of seconds to delay to start genesis block. diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index d51fe2aef2..376fe3d8c5 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80 # Number of beacon_node instances that you intend to run BN_COUNT=4 -# Number of valicator clients +# Number of validator clients VC_COUNT=$BN_COUNT # Number of seconds to delay to start genesis block. diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 1baa9f6bb2..f10142d614 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -440,7 +440,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context.service_context("sync_committee".into()), ); - // Wait until genesis has occured. + // Wait until genesis has occurred. // // It seems most sensible to move this into the `start_service` function, but I'm caution // of making too many changes this close to genesis (<1 week). From e29765e1181b2ed6b632197a9e0abdd9f4a34682 Mon Sep 17 00:00:00 2001 From: Justin Traglia <jtraglia@pm.me> Date: Wed, 27 Jul 2022 00:51:07 +0000 Subject: [PATCH 092/184] Reformat tables and add borders (#3377) ## Proposed Changes This PR reformats Markdown tables and ensures all tables have borders. --- book/src/advanced_database.md | 10 +-- book/src/api-vc-endpoints.md | 132 ++++++++++++++-------------- book/src/installation-priorities.md | 8 +- book/src/suggested-fee-recipient.md | 46 +++++----- 4 files changed, 98 insertions(+), 98 deletions(-) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 178936cf61..397d9a28b5 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -23,11 +23,11 @@ states to slow down dramatically. A lower _slots per restore point_ value (SPRP) frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows some example values. -| Use Case | SPRP | Yearly Disk Usage | Load Historical State | -| ---------------------- | -------------- | ----------------- | --------------------- | -| Block explorer/analysis | 32 | 1.4 TB | 155 ms | -| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | -| Validator only (default) | 8192 | 5.7 GB | 41 s | +| Use Case | SPRP | Yearly Disk Usage | Load Historical State | +|--------------------------|------|-------------------|-----------------------| +| Block explorer/analysis | 32 | 1.4 TB | 155 ms | +| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | +| Validator only (default) | 8192 | 5.7 GB | 41 s | As you can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 69cd83db5c..9aedf6e249 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -24,12 +24,12 @@ Returns the software version and `git` commit hash for the Lighthouse binary. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/version` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/version` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -47,12 +47,12 @@ Returns information regarding the health of the host machine. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/health` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/health` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | *Note: this endpoint is presently only available on Linux.* @@ -83,12 +83,12 @@ Returns the Ethereum proof-of-stake consensus specification loaded for this vali ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/spec` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/spec` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -168,12 +168,12 @@ file may be read by a local user with access rights. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/auth` -Method | GET -Required Headers | - -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------| +| Path | `/lighthouse/auth` | +| Method | GET | +| Required Headers | - | +| Typical Responses | 200 | ### Example Path @@ -195,12 +195,12 @@ Lists all validators managed by this validator client. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -232,12 +232,12 @@ Get a validator by their `voting_pubkey`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/:voting_pubkey` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Path @@ -262,12 +262,12 @@ Update some values for the validator with `voting_pubkey`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/:voting_pubkey` -Method | PATCH -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | PATCH | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Path @@ -301,12 +301,12 @@ Validators are generated from the mnemonic according to ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -359,12 +359,12 @@ Import a keystore into the validator client. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/keystore` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/keystore` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -433,12 +433,12 @@ generated with the path `m/12381/3600/i/42`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/mnemonic` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/mnemonic` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -479,12 +479,12 @@ Create any number of new validators, all of which will refer to a ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/web3signer` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/web3signer` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Request Body diff --git a/book/src/installation-priorities.md b/book/src/installation-priorities.md index 69d871c396..0008e327b7 100644 --- a/book/src/installation-priorities.md +++ b/book/src/installation-priorities.md @@ -4,10 +4,10 @@ When publishing releases, Lighthouse will include an "Update Priority" section i The "Update Priority" section will include a table which may appear like so: -|User Class |Beacon Node | Validator Client| ---- | --- | --- -|Staking Users| Medium Priority | Low Priority | -|Non-Staking Users| Low Priority|---| +| User Class | Beacon Node | Validator Client | +|-------------------|-----------------|------------------| +| Staking Users | Medium Priority | Low Priority | +| Non-Staking Users | Low Priority | --- | To understand this table, the following terms are important: diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index 35338549e9..c401abfb7a 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -10,7 +10,7 @@ coinbase and the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the -`suggested_fee_recipient`, but users should note this trust assumption. Check out the +`suggested_fee_recipient`, but users should note this trust assumption. Check out the [strict fee recipient](#strict-fee-recipient) section for how to mitigate this assumption. The `suggested_fee_recipient` can be provided to the VC, who will transmit it to the BN. The BN also @@ -64,10 +64,10 @@ validator client does not transmit a `suggested_fee_recipient` to the BN. ## Strict Fee Recipient -If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose -`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal -block proposal flow, as well as block proposals through the builder API. Proposals through the builder API are more likely -to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before +If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose +`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal +block proposal flow, as well as block proposals through the builder API. Proposals through the builder API are more likely +to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the local execution engine for payload construction, where a strict fee recipient check will still be applied. @@ -79,12 +79,12 @@ for setting the fee recipient dynamically for a given public key. When used, the will be saved in `validator_definitions.yml` so that it persists across restarts of the validator client. -| Property | Specification | -| --- | --- | -Path | `/eth/v1/validator/{pubkey}/feerecipient` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 202, 404 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 202, 404 | #### Example Request Body ```json @@ -114,12 +114,12 @@ null The same path with a `GET` request can be used to query the fee recipient for a given public key at any time. -| Property | Specification | -| --- | --- | -Path | `/eth/v1/validator/{pubkey}/feerecipient` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 404 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 404 | ```bash DATADIR=$HOME/.lighthouse/mainnet @@ -146,12 +146,12 @@ curl -X GET \ The same path with a `DELETE` request can be used to remove the fee recipient for a given public key at any time. This is useful if you want the fee recipient to fall back to the validator client (or beacon node) default. -| Property | Specification | -| --- | --- | -Path | `/eth/v1/validator/{pubkey}/feerecipient` -Method | DELETE -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 204, 404 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | DELETE | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 204, 404 | ```bash DATADIR=$HOME/.lighthouse/mainnet From 947ad9f14a2802e974bf62270c29a092fa894350 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Wed, 27 Jul 2022 00:51:08 +0000 Subject: [PATCH 093/184] Allow syncing or accepted in integration test (#3378) ## Issue Addressed Unblock CI for this failure: https://github.com/sigp/lighthouse/runs/7529551988 The root cause is a disagreement between the test and Nethermind over whether the appropriate status for a payload with an unknown parent is SYNCING or ACCEPTED. According to the spec, SYNCING is correct so we should update the test to expect this correct behaviour. However Geth still returns `ACCEPTED`, so for now we allow either. --- testing/execution_engine_integration/src/test_rig.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 7dac2010b6..7126268c37 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -488,7 +488,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatus::Accepted); + // TODO: we should remove the `Accepted` status here once Geth fixes it + assert!(matches!( + status, + PayloadStatus::Syncing | PayloadStatus::Accepted + )); /* * Execution Engine B: From f3439116da0a958b7806181479595db6c20bb255 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed, 27 Jul 2022 03:20:00 +0000 Subject: [PATCH 094/184] Return ResourceUnavailable if we are unable to reconstruct execution payloads (#3365) ## Issue Addressed Resolves #3351 ## Proposed Changes Returns a `ResourceUnavailable` rpc error if we are unable to serve full payloads to blocks by root and range requests because the execution layer is not synced. ## Additional Info This PR also changes the penalties such that a `ResourceUnavailable` error is only penalized if it is an outgoing request. If we are syncing and aren't getting full block responses, then we don't have use for the peer. However, this might not be true for the incoming request case. We let the peer decide in this case if we are still useful or if we should be banned. cc @divagant-martian please let me know if i'm missing something here. --- .../src/peer_manager/mod.rs | 10 +++- .../beacon_processor/worker/rpc_methods.rs | 57 ++++++++++++++++--- 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4b2b81060f..55b3884454 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -481,7 +481,15 @@ impl<TSpec: EthSpec> PeerManager<TSpec> { // implement a new sync type which tracks these peers and prevents the sync // algorithms from requesting blocks from them (at least for a set period of // time, multiple failures would then lead to a ban). - PeerAction::Fatal + + match direction { + // If the blocks request was initiated by us, then we have no use of this + // peer and so we ban it. + ConnectionDirection::Outgoing => PeerAction::Fatal, + // If the blocks request was initiated by the peer, then we let the peer decide if + // it wants to continue talking to us, we do not ban the peer. + ConnectionDirection::Incoming => return, + } } RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError, RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError, diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 87d4da2c6d..8ca9c35e47 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -135,6 +135,7 @@ impl<T: BeaconChainTypes> Worker<T> { executor.spawn( async move { let mut send_block_count = 0; + let mut send_response = true; for root in request.block_roots.iter() { match self .chain @@ -157,6 +158,23 @@ impl<T: BeaconChainTypes> Worker<T> { "request_root" => ?root ); } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + self.log, + "Failed to fetch execution payload for blocks by root request"; + "block_root" => ?root, + "reason" => "execution layer not synced", + ); + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Execution layer not synced".into(), + request_id, + ); + send_response = false; + break; + } Err(e) => { debug!( self.log, @@ -173,11 +191,13 @@ impl<T: BeaconChainTypes> Worker<T> { "Received BlocksByRoot Request"; "peer" => %peer_id, "requested" => request.block_roots.len(), - "returned" => send_block_count + "returned" => %send_block_count ); // send stream termination - self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + if send_response { + self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + } drop(send_on_drop); }, "load_blocks_by_root_blocks", @@ -255,6 +275,7 @@ impl<T: BeaconChainTypes> Worker<T> { executor.spawn( async move { let mut blocks_sent = 0; + let mut send_response = true; for root in block_roots { match self.chain.get_block(&root).await { @@ -280,6 +301,23 @@ impl<T: BeaconChainTypes> Worker<T> { ); break; } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + self.log, + "Failed to fetch execution payload for blocks by range request"; + "block_root" => ?root, + "reason" => "execution layer not synced", + ); + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Execution layer not synced".into(), + request_id, + ); + send_response = false; + break; + } Err(e) => { error!( self.log, @@ -320,12 +358,15 @@ impl<T: BeaconChainTypes> Worker<T> { ); } - // send the stream terminator - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlocksByRange(None), - id: request_id, - }); + if send_response { + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(None), + id: request_id, + }); + } + drop(send_on_drop); }, "load_blocks_by_range_blocks", From cf3bcca96979ba1aca3ab70a22cd20936cc282d2 Mon Sep 17 00:00:00 2001 From: Philip White <philip@mailworks.org> Date: Wed, 27 Jul 2022 03:20:01 +0000 Subject: [PATCH 095/184] Allow setting web3signer version through environment (#3368) ## Issue Addressed #3369 ## Proposed Changes The goal is to make it possible to build Lighthouse without network access, so builds can be reproducible. This parallels the existing functionality in `common/deposit_contract/build.rs`, which allows specifying a filename through the environment to avoid downloading it. In this case, by specifying the version and making it available on the filesystem, the existing logic will avoid a network download. --- .gitignore | 3 +++ testing/web3signer_tests/build.rs | 2 ++ 2 files changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index 9830ef39be..ae9f83c46d 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ perf.data* /bin genesis.ssz /clippy.toml + +# IntelliJ +/*.iml diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/build.rs index ac34b5197f..f62dff0b6f 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/build.rs @@ -29,6 +29,8 @@ pub async fn download_binary(dest_dir: PathBuf) { let version = if let Some(version) = FIXED_VERSION_STRING { version.to_string() + } else if let Ok(env_version) = env::var("LIGHTHOUSE_WEB3SIGNER_VERSION") { + env_version } else { // Get the latest release of the web3 signer repo. let latest_response: Value = client From 5bdba157e1cca36171796ab5985bf6eec65db45b Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Thu, 28 Jul 2022 07:40:03 +0000 Subject: [PATCH 096/184] Fix antithesis docker builds (#3380) ## Issue Addressed The antithesis Docker builds starting failing once we made our MSRV later than 1.58. It seems like it was because there is a new "LLVM pass manager" used by rust by default in more recent versions. Adding a new flag disables usage of the new pass manager and allows builds to pass. This adds a single flag to the antithesis `Dockerfile.libvoidstar`: `RUSTFLAGS="-Znew-llvm-pass-manager=no"`. But this flag requires us to use `nightly` so it also adds that, pinning to an arbitrary recent date. Co-authored-by: realbigsean <sean@sigmaprime.io> --- testing/antithesis/Dockerfile.libvoidstar | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 81a1beea4a..32e2d5648d 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -2,8 +2,9 @@ FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse -# build lighthouse directly with a cargo build command, bypassing the makefile -RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse +# Build lighthouse directly with a cargo build command, bypassing the Makefile. +# We have to use nightly in order to disable the new LLVM pass manager. +RUN rustup default nightly-2022-07-26 && cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Znew-llvm-pass-manager=no -Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse # build lcli binary directly with cargo install command, bypassing the makefile RUN cargo install --path /lighthouse/lcli --force --locked From efb360cc6dc534bbcd9d8097d0c7d172157a59c8 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Thu, 28 Jul 2022 07:40:05 +0000 Subject: [PATCH 097/184] Downgrade Geth to v1.10.20 in EE integration tests (#3382) ## Issue Addressed NA ## Proposed Changes The execution integration tests have started failing since Geth updated to v1.10.21. More details here: https://github.com/ethereum/go-ethereum/issues/25427#issuecomment-1197552755 This PR pins our version at v1.10.20. ## Additional Info NA --- testing/execution_engine_integration/src/geth.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 8c751ed651..ae5210b2a3 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,7 +7,7 @@ use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const GETH_BRANCH: &str = "master"; +// const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -26,8 +26,13 @@ pub fn build(execution_clients_dir: &Path) { build_utils::clone_repo(execution_clients_dir, GETH_REPO_URL).unwrap(); } + // TODO: this should be set back to the latest release once the following issue is resolved: + // + // - https://github.com/ethereum/go-ethereum/issues/25427 + // // Get the latest tag on the branch - let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + let last_release = "v1.10.20"; build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth From d04fde3ba9b35c77f747304096ed38e73d521f5c Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Thu, 28 Jul 2022 09:43:41 +0000 Subject: [PATCH 098/184] Remove equivocating validators from fork choice (#3371) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/3241 Closes https://github.com/sigp/lighthouse/issues/3242 ## Proposed Changes * [x] Implement logic to remove equivocating validators from fork choice per https://github.com/ethereum/consensus-specs/pull/2845 * [x] Update tests to v1.2.0-rc.1. The new test which exercises `equivocating_indices` is passing. * [x] Pull in some SSZ abstractions from the `tree-states` branch that make implementing Vec-compatible encoding for types like `BTreeSet` and `BTreeMap`. * [x] Implement schema upgrades and downgrades for the database (new schema version is V11). * [x] Apply attester slashings from blocks to fork choice ## Additional Info * This PR doesn't need the `BTreeMap` impl, but `tree-states` does, and I don't think there's any harm in keeping it. But I could also be convinced to drop it. Blocked on #3322. --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 16 +- .../src/beacon_fork_choice_store.rs | 27 ++- .../beacon_chain/src/persisted_fork_choice.rs | 11 +- beacon_node/beacon_chain/src/schema_change.rs | 39 +++- .../src/schema_change/migration_schema_v11.rs | 77 +++++++ beacon_node/store/src/metadata.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 57 ++--- .../fork_choice/src/fork_choice_store.rs | 7 + .../src/fork_choice_test_definition.rs | 5 + .../src/proto_array_fork_choice.rs | 194 ++++++++++++++++-- consensus/ssz/Cargo.toml | 3 +- consensus/ssz/src/decode.rs | 1 + consensus/ssz/src/decode/impls.rs | 127 ++++++++---- consensus/ssz/src/decode/try_from_iter.rs | 96 +++++++++ consensus/ssz/src/encode/impls.rs | 120 ++++++++--- consensus/ssz/src/lib.rs | 4 +- consensus/ssz/tests/tests.rs | 48 +++++ consensus/ssz_types/src/variable_list.rs | 3 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 2 + .../ef_tests/src/cases/epoch_processing.rs | 3 +- testing/ef_tests/src/cases/fork_choice.rs | 43 ++-- testing/ef_tests/src/handler.rs | 2 +- testing/ef_tests/tests/tests.rs | 3 +- 25 files changed, 742 insertions(+), 151 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs create mode 100644 consensus/ssz/src/decode/try_from_iter.rs diff --git a/Cargo.lock b/Cargo.lock index adffa23f57..e06b5f55ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1726,6 +1726,7 @@ version = "0.4.1" dependencies = [ "eth2_ssz_derive", "ethereum-types 0.12.1", + "itertools", "smallvec", ] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2f35253058..a9e26e4875 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2095,11 +2095,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> { )?) } - /// Accept some attester slashing and queue it for inclusion in an appropriate block. + /// Accept a verified attester slashing and: + /// + /// 1. Apply it to fork choice. + /// 2. Add it to the op pool. pub fn import_attester_slashing( &self, attester_slashing: SigVerifiedOp<AttesterSlashing<T::EthSpec>>, ) { + // Add to fork choice. + self.canonical_head + .fork_choice_write_lock() + .on_attester_slashing(attester_slashing.as_inner()); + + // Add to the op pool (if we have the ability to propose blocks). if self.eth1_chain.is_some() { self.op_pool.insert_attester_slashing( attester_slashing, @@ -2717,6 +2726,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> { .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state); let validator_monitor = self.validator_monitor.read(); + // Register each attester slashing in the block with fork choice. + for attester_slashing in block.body().attester_slashings() { + fork_choice.on_attester_slashing(attester_slashing); + } + // Register each attestation in the block with the fork choice service. for attestation in block.body().attestations() { let _fork_choice_attestation_timer = diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 0d65b8aa62..4f6003fda1 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -8,6 +8,7 @@ use crate::{metrics, BeaconSnapshot}; use derivative::Derivative; use fork_choice::ForkChoiceStore; use ssz_derive::{Decode, Encode}; +use std::collections::BTreeSet; use std::marker::PhantomData; use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; @@ -158,6 +159,7 @@ pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore< unrealized_justified_checkpoint: Checkpoint, unrealized_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, + equivocating_indices: BTreeSet<u64>, _phantom: PhantomData<E>, } @@ -206,6 +208,7 @@ where unrealized_justified_checkpoint: justified_checkpoint, unrealized_finalized_checkpoint: finalized_checkpoint, proposer_boost_root: Hash256::zero(), + equivocating_indices: BTreeSet::new(), _phantom: PhantomData, } } @@ -223,6 +226,7 @@ where unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, + equivocating_indices: self.equivocating_indices.clone(), } } @@ -242,6 +246,7 @@ where unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, proposer_boost_root: persisted.proposer_boost_root, + equivocating_indices: persisted.equivocating_indices, _phantom: PhantomData, }) } @@ -350,30 +355,40 @@ where fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) { self.proposer_boost_root = proposer_boost_root; } + + fn equivocating_indices(&self) -> &BTreeSet<u64> { + &self.equivocating_indices + } + + fn extend_equivocating_indices(&mut self, indices: impl IntoIterator<Item = u64>) { + self.equivocating_indices.extend(indices); + } } /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. #[superstruct( - variants(V1, V7, V8, V10), + variants(V1, V7, V8, V10, V11), variant_attributes(derive(Encode, Decode)), no_enum )] pub struct PersistedForkChoiceStore { #[superstruct(only(V1, V7))] pub balances_cache: BalancesCacheV1, - #[superstruct(only(V8, V10))] + #[superstruct(only(V8, V10, V11))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec<u64>, pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V10))] + #[superstruct(only(V10, V11))] pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V10))] + #[superstruct(only(V10, V11))] pub unrealized_finalized_checkpoint: Checkpoint, - #[superstruct(only(V7, V8, V10))] + #[superstruct(only(V7, V8, V10, V11))] pub proposer_boost_root: Hash256, + #[superstruct(only(V11))] + pub equivocating_indices: BTreeSet<u64>, } -pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV10; +pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV11; diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index eb5078df2c..a60dacdc7c 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,6 +1,6 @@ use crate::beacon_fork_choice_store::{ - PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV7, - PersistedForkChoiceStoreV8, + PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11, + PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, }; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -8,10 +8,10 @@ use store::{DBColumn, Error, StoreItem}; use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV10; +pub type PersistedForkChoice = PersistedForkChoiceV11; #[superstruct( - variants(V1, V7, V8, V10), + variants(V1, V7, V8, V10, V11), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -25,6 +25,8 @@ pub struct PersistedForkChoice { pub fork_choice_store: PersistedForkChoiceStoreV8, #[superstruct(only(V10))] pub fork_choice_store: PersistedForkChoiceStoreV10, + #[superstruct(only(V11))] + pub fork_choice_store: PersistedForkChoiceStoreV11, } macro_rules! impl_store_item { @@ -49,3 +51,4 @@ impl_store_item!(PersistedForkChoiceV1); impl_store_item!(PersistedForkChoiceV7); impl_store_item!(PersistedForkChoiceV8); impl_store_item!(PersistedForkChoiceV10); +impl_store_item!(PersistedForkChoiceV11); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 411ef947d9..b6c70b5435 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,5 +1,6 @@ //! Utilities for managing database schema changes. mod migration_schema_v10; +mod migration_schema_v11; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; @@ -8,7 +9,8 @@ mod types; use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{ - PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV7, PersistedForkChoiceV8, + PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, + PersistedForkChoiceV8, }; use crate::types::ChainSpec; use slog::{warn, Logger}; @@ -36,6 +38,12 @@ pub fn migrate_schema<T: BeaconChainTypes>( migrate_schema::<T>(db.clone(), datadir, from, next, log.clone(), spec)?; migrate_schema::<T>(db, datadir, next, to, log, spec) } + // Downgrade across multiple versions by recursively migrating one step at a time. + (_, _) if to.as_u64() + 1 < from.as_u64() => { + let next = SchemaVersion(from.as_u64() - 1); + migrate_schema::<T>(db.clone(), datadir, from, next, log.clone(), spec)?; + migrate_schema::<T>(db, datadir, next, to, log, spec) + } // // Migrations from before SchemaVersion(5) are deprecated. @@ -159,6 +167,35 @@ pub fn migrate_schema<T: BeaconChainTypes>( Ok(()) } + // Upgrade from v10 to v11 adding support for equivocating indices to fork choice. + (SchemaVersion(10), SchemaVersion(11)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::<PersistedForkChoiceV10>(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice); + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // Downgrade from v11 to v10 removing support for equivocating indices from fork choice. + (SchemaVersion(11), SchemaVersion(10)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::<PersistedForkChoiceV11>(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = + migration_schema_v11::downgrade_fork_choice(fork_choice, log); + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs new file mode 100644 index 0000000000..dde80a5cac --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs @@ -0,0 +1,77 @@ +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11}; +use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11}; +use slog::{warn, Logger}; +use std::collections::BTreeSet; + +/// Add the equivocating indices field. +pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 { + let PersistedForkChoiceStoreV10 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + } = fork_choice_v10.fork_choice_store; + + PersistedForkChoiceV11 { + fork_choice: fork_choice_v10.fork_choice, + fork_choice_store: PersistedForkChoiceStoreV11 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + equivocating_indices: BTreeSet::new(), + }, + } +} + +pub fn downgrade_fork_choice( + fork_choice_v11: PersistedForkChoiceV11, + log: Logger, +) -> PersistedForkChoiceV10 { + let PersistedForkChoiceStoreV11 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + equivocating_indices, + } = fork_choice_v11.fork_choice_store; + + if !equivocating_indices.is_empty() { + warn!( + log, + "Deleting slashed validators from fork choice store"; + "count" => equivocating_indices.len(), + "message" => "this may make your node more susceptible to following the wrong chain", + ); + } + + PersistedForkChoiceV10 { + fork_choice: fork_choice_v11.fork_choice, + fork_choice_store: PersistedForkChoiceStoreV10 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + }, + } +} diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 235550ddd7..d72dbcd23d 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(10); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(11); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c3a88433f2..a31d8ade6b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,19 +1,23 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing; +use state_processing::{ + per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, +}; use std::cmp::Ordering; +use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlockRef, BeaconState, - BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, + consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, AttesterSlashing, BeaconBlockRef, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, + ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; #[derive(Debug)] pub enum Error<T> { InvalidAttestation(InvalidAttestation), + InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), ProtoArrayError(String), InvalidProtoArrayBytes(String), @@ -63,6 +67,12 @@ impl<T> From<InvalidAttestation> for Error<T> { } } +impl<T> From<AttesterSlashingValidationError> for Error<T> { + fn from(e: AttesterSlashingValidationError) -> Self { + Error::InvalidAttesterSlashing(e) + } +} + impl<T> From<state_processing::EpochProcessingError> for Error<T> { fn from(e: state_processing::EpochProcessingError) -> Self { Error::UnrealizedVoteProcessing(e) @@ -413,26 +423,6 @@ where Ok(fork_choice) } - /* - /// Instantiates `Self` from some existing components. - /// - /// This is useful if the existing components have been loaded from disk after a process - /// restart. - pub fn from_components( - fc_store: T, - proto_array: ProtoArrayForkChoice, - queued_attestations: Vec<QueuedAttestation>, - ) -> Self { - Self { - fc_store, - proto_array, - queued_attestations, - forkchoice_update_parameters: None, - _phantom: PhantomData, - } - } - */ - /// Returns cached information that can be used to issue a `forkchoiceUpdated` message to an /// execution engine. /// @@ -507,6 +497,7 @@ where *store.finalized_checkpoint(), store.justified_balances(), store.proposer_boost_root(), + store.equivocating_indices(), current_slot, spec, )?; @@ -1109,6 +1100,22 @@ where Ok(()) } + /// Apply an attester slashing to fork choice. + /// + /// We assume that the attester slashing provided to this function has already been verified. + pub fn on_attester_slashing(&mut self, slashing: &AttesterSlashing<E>) { + let attesting_indices_set = |att: &IndexedAttestation<E>| { + att.attesting_indices + .iter() + .copied() + .collect::<BTreeSet<_>>() + }; + let att1_indices = attesting_indices_set(&slashing.attestation_1); + let att2_indices = attesting_indices_set(&slashing.attestation_2); + self.fc_store + .extend_equivocating_indices(att1_indices.intersection(&att2_indices).copied()); + } + /// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided /// `current_slot`. Returns the value of `self.fc_store.get_current_slot`. pub fn update_time( @@ -1325,8 +1332,6 @@ where // If the parent block has execution enabled, always import the block. // - // TODO(bellatrix): this condition has not yet been merged into the spec. - // // See: // // https://github.com/ethereum/consensus-specs/pull/2844 diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index a7085b024a..6a4616e9f3 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeSet; use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": @@ -76,4 +77,10 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// Sets the proposer boost root. fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256); + + /// Gets the equivocating indices. + fn equivocating_indices(&self) -> &BTreeSet<u64>; + + /// Adds to the set of equivocating indices. + fn extend_equivocating_indices(&mut self, indices: impl IntoIterator<Item = u64>); } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 0cfa3a194f..fcb1b94d6f 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -6,6 +6,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::InvalidationOperation; use serde_derive::{Deserialize, Serialize}; +use std::collections::BTreeSet; use types::{ AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Slot, @@ -88,6 +89,7 @@ impl ForkChoiceTestDefinition { ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), ) .expect("should create fork choice struct"); + let equivocating_indices = BTreeSet::new(); for (op_index, op) in self.operations.into_iter().enumerate() { match op.clone() { @@ -103,6 +105,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + &equivocating_indices, Slot::new(0), &spec, ) @@ -130,6 +133,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, proposer_boost_root, + &equivocating_indices, Slot::new(0), &spec, ) @@ -154,6 +158,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + &equivocating_indices, Slot::new(0), &spec, ); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 568cfa9640..4767919f70 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -4,7 +4,7 @@ use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -260,12 +260,14 @@ impl ProtoArrayForkChoice { .map_err(|e| format!("process_block_error: {:?}", e)) } + #[allow(clippy::too_many_arguments)] pub fn find_head<E: EthSpec>( &mut self, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, justified_state_balances: &[u64], proposer_boost_root: Hash256, + equivocating_indices: &BTreeSet<u64>, current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, String> { @@ -278,6 +280,7 @@ impl ProtoArrayForkChoice { &mut self.votes, old_balances, new_balances, + equivocating_indices, ) .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; @@ -439,6 +442,7 @@ fn compute_deltas( votes: &mut ElasticList<VoteTracker>, old_balances: &[u64], new_balances: &[u64], + equivocating_indices: &BTreeSet<u64>, ) -> Result<Vec<i64>, Error> { let mut deltas = vec![0_i64; indices.len()]; @@ -449,6 +453,38 @@ fn compute_deltas( continue; } + // Handle newly slashed validators by deducting their weight from their current vote. We + // determine if they are newly slashed by checking whether their `vote.current_root` is + // non-zero. After applying the deduction a single time we set their `current_root` to zero + // and never update it again (thus preventing repeat deductions). + // + // Even if they make new attestations which are processed by `process_attestation` these + // will only update their `vote.next_root`. + if equivocating_indices.contains(&(val_index as u64)) { + // First time we've processed this slashing in fork choice: + // + // 1. Add a negative delta for their `current_root`. + // 2. Set their `current_root` (permanently) to zero. + if !vote.current_root.is_zero() { + let old_balance = old_balances.get(val_index).copied().unwrap_or(0); + + if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { + let delta = deltas + .get(current_delta_index) + .ok_or(Error::InvalidNodeDelta(current_delta_index))? + .checked_sub(old_balance as i64) + .ok_or(Error::DeltaOverflow(current_delta_index))?; + + // Array access safe due to check on previous line. + deltas[current_delta_index] = delta; + } + + vote.current_root = Hash256::zero(); + } + // We've handled this slashed validator, continue without applying an ordinary delta. + continue; + } + // If the validator was not included in the _old_ balances (i.e., it did not exist yet) // then say its balance was zero. let old_balance = old_balances.get(val_index).copied().unwrap_or(0); @@ -605,6 +641,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -617,8 +654,14 @@ mod test_compute_deltas { new_balances.push(0); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -649,6 +692,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -661,8 +705,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -700,6 +750,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -712,8 +763,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -746,6 +803,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -758,8 +816,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -797,6 +861,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There is only one block. indices.insert(hash_from_index(1), 0); @@ -819,8 +884,14 @@ mod test_compute_deltas { next_epoch: Epoch::new(0), }); - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 1, "deltas should have expected length"); @@ -849,6 +920,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -861,8 +933,14 @@ mod test_compute_deltas { new_balances.push(NEW_BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -902,6 +980,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There are two blocks. indices.insert(hash_from_index(1), 0); @@ -921,8 +1000,14 @@ mod test_compute_deltas { }); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 2, "deltas should have expected length"); @@ -951,6 +1036,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There are two blocks. indices.insert(hash_from_index(1), 0); @@ -970,8 +1056,14 @@ mod test_compute_deltas { }); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 2, "deltas should have expected length"); @@ -992,4 +1084,72 @@ mod test_compute_deltas { ); } } + + #[test] + fn validator_equivocates() { + const OLD_BALANCE: u64 = 42; + const NEW_BALANCE: u64 = 43; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + + // There are two blocks. + indices.insert(hash_from_index(1), 0); + indices.insert(hash_from_index(2), 1); + + // There are two validators. + let old_balances = vec![OLD_BALANCE; 2]; + let new_balances = vec![NEW_BALANCE; 2]; + + // Both validator move votes from block 1 to block 2. + for _ in 0..2 { + votes.0.push(VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(2), + next_epoch: Epoch::new(0), + }); + } + + // Validator 0 is slashed. + let equivocating_indices = BTreeSet::from_iter([0]); + + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); + + assert_eq!(deltas.len(), 2, "deltas should have expected length"); + + assert_eq!( + deltas[0], + -2 * OLD_BALANCE as i64, + "block 1 should have lost two old balances" + ); + assert_eq!( + deltas[1], NEW_BALANCE as i64, + "block 2 should have gained one balance" + ); + + // Validator 0's current root should have been reset. + assert_eq!(votes.0[0].current_root, Hash256::zero()); + assert_eq!(votes.0[0].next_root, hash_from_index(2)); + + // Validator 1's current root should have been updated. + assert_eq!(votes.0[1].current_root, hash_from_index(2)); + + // Re-computing the deltas should be a no-op (no repeat deduction for the slashed validator). + let deltas = compute_deltas( + &indices, + &mut votes, + &new_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); + assert_eq!(deltas, vec![0, 0]); + } } diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index 7ba3e0678c..a153c2efc1 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -14,7 +14,8 @@ eth2_ssz_derive = "0.3.0" [dependencies] ethereum-types = "0.12.1" -smallvec = "1.6.1" +smallvec = { version = "1.6.1", features = ["const_generics"] } +itertools = "0.10.3" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs index 604cc68d7b..10b3573b16 100644 --- a/consensus/ssz/src/decode.rs +++ b/consensus/ssz/src/decode.rs @@ -5,6 +5,7 @@ use std::cmp::Ordering; type SmallVec8<T> = SmallVec<[T; 8]>; pub mod impls; +pub mod try_from_iter; /// Returned when SSZ decoding fails. #[derive(Debug, PartialEq, Clone)] diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index 0e6b390830..d91ddabe02 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -1,7 +1,11 @@ use super::*; +use crate::decode::try_from_iter::{TryCollect, TryFromIter}; use core::num::NonZeroUsize; use ethereum_types::{H160, H256, U128, U256}; +use itertools::process_results; use smallvec::SmallVec; +use std::collections::{BTreeMap, BTreeSet}; +use std::iter::{self, FromIterator}; use std::sync::Arc; macro_rules! impl_decodable_for_uint { @@ -380,14 +384,14 @@ macro_rules! impl_for_vec { fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { if bytes.is_empty() { - Ok(vec![].into()) + Ok(Self::from_iter(iter::empty())) } else if T::is_ssz_fixed_len() { bytes .chunks(T::ssz_fixed_len()) - .map(|chunk| T::from_ssz_bytes(chunk)) + .map(T::from_ssz_bytes) .collect() } else { - decode_list_of_variable_length_items(bytes, $max_len).map(|vec| vec.into()) + decode_list_of_variable_length_items(bytes, $max_len) } } } @@ -395,26 +399,73 @@ macro_rules! impl_for_vec { } impl_for_vec!(Vec<T>, None); -impl_for_vec!(SmallVec<[T; 1]>, Some(1)); -impl_for_vec!(SmallVec<[T; 2]>, Some(2)); -impl_for_vec!(SmallVec<[T; 3]>, Some(3)); -impl_for_vec!(SmallVec<[T; 4]>, Some(4)); -impl_for_vec!(SmallVec<[T; 5]>, Some(5)); -impl_for_vec!(SmallVec<[T; 6]>, Some(6)); -impl_for_vec!(SmallVec<[T; 7]>, Some(7)); -impl_for_vec!(SmallVec<[T; 8]>, Some(8)); +impl_for_vec!(SmallVec<[T; 1]>, None); +impl_for_vec!(SmallVec<[T; 2]>, None); +impl_for_vec!(SmallVec<[T; 3]>, None); +impl_for_vec!(SmallVec<[T; 4]>, None); +impl_for_vec!(SmallVec<[T; 5]>, None); +impl_for_vec!(SmallVec<[T; 6]>, None); +impl_for_vec!(SmallVec<[T; 7]>, None); +impl_for_vec!(SmallVec<[T; 8]>, None); + +impl<K, V> Decode for BTreeMap<K, V> +where + K: Decode + Ord, + V: Decode, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { + if bytes.is_empty() { + Ok(Self::from_iter(iter::empty())) + } else if <(K, V)>::is_ssz_fixed_len() { + bytes + .chunks(<(K, V)>::ssz_fixed_len()) + .map(<(K, V)>::from_ssz_bytes) + .collect() + } else { + decode_list_of_variable_length_items(bytes, None) + } + } +} + +impl<T> Decode for BTreeSet<T> +where + T: Decode + Ord, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { + if bytes.is_empty() { + Ok(Self::from_iter(iter::empty())) + } else if T::is_ssz_fixed_len() { + bytes + .chunks(T::ssz_fixed_len()) + .map(T::from_ssz_bytes) + .collect() + } else { + decode_list_of_variable_length_items(bytes, None) + } + } +} /// Decodes `bytes` as if it were a list of variable-length items. /// -/// The `ssz::SszDecoder` can also perform this functionality, however it it significantly faster -/// as it is optimized to read same-typed items whilst `ssz::SszDecoder` supports reading items of -/// differing types. -pub fn decode_list_of_variable_length_items<T: Decode>( +/// The `ssz::SszDecoder` can also perform this functionality, however this function is +/// significantly faster as it is optimized to read same-typed items whilst `ssz::SszDecoder` +/// supports reading items of differing types. +pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>>( bytes: &[u8], max_len: Option<usize>, -) -> Result<Vec<T>, DecodeError> { +) -> Result<Container, DecodeError> { if bytes.is_empty() { - return Ok(vec![]); + return Container::try_from_iter(iter::empty()).map_err(|e| { + DecodeError::BytesInvalid(format!("Error trying to collect empty list: {:?}", e)) + }); } let first_offset = read_offset(bytes)?; @@ -433,35 +484,27 @@ pub fn decode_list_of_variable_length_items<T: Decode>( ))); } - // Only initialize the vec with a capacity if a maximum length is provided. - // - // We assume that if a max length is provided then the application is able to handle an - // allocation of this size. - let mut values = if max_len.is_some() { - Vec::with_capacity(num_items) - } else { - vec![] - }; - let mut offset = first_offset; - for i in 1..=num_items { - let slice_option = if i == num_items { - bytes.get(offset..) - } else { - let start = offset; + process_results( + (1..=num_items).map(|i| { + let slice_option = if i == num_items { + bytes.get(offset..) + } else { + let start = offset; - let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; - offset = sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; + let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; + offset = + sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; - bytes.get(start..offset) - }; + bytes.get(start..offset) + }; - let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; - - values.push(T::from_ssz_bytes(slice)?); - } - - Ok(values) + let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; + T::from_ssz_bytes(slice) + }), + |iter| iter.try_collect(), + )? + .map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {:?}", e))) } #[cfg(test)] diff --git a/consensus/ssz/src/decode/try_from_iter.rs b/consensus/ssz/src/decode/try_from_iter.rs new file mode 100644 index 0000000000..22db02d4fc --- /dev/null +++ b/consensus/ssz/src/decode/try_from_iter.rs @@ -0,0 +1,96 @@ +use smallvec::SmallVec; +use std::collections::{BTreeMap, BTreeSet}; +use std::convert::Infallible; +use std::fmt::Debug; + +/// Partial variant of `std::iter::FromIterator`. +/// +/// This trait is implemented for types which can be constructed from an iterator of decoded SSZ +/// values, but which may refuse values once a length limit is reached. +pub trait TryFromIter<T>: Sized { + type Error: Debug; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>; +} + +// It would be nice to be able to do a blanket impl, e.g. +// +// `impl TryFromIter<T> for C where C: FromIterator<T>` +// +// However this runs into trait coherence issues due to the type parameter `T` on `TryFromIter`. +// +// E.g. If we added an impl downstream for `List<T, N>` then another crate downstream of that +// could legally add an impl of `FromIterator<Local> for List<Local, N>` which would create +// two conflicting implementations for `List<Local, N>`. Hence the `List<T, N>` impl is disallowed +// by the compiler in the presence of the blanket impl. That's obviously annoying, so we opt to +// abandon the blanket impl in favour of impls for selected types. +impl<T> TryFromIter<T> for Vec<T> { + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>, + { + Ok(Self::from_iter(iter)) + } +} + +impl<T, const N: usize> TryFromIter<T> for SmallVec<[T; N]> { + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>, + { + Ok(Self::from_iter(iter)) + } +} + +impl<K, V> TryFromIter<(K, V)> for BTreeMap<K, V> +where + K: Ord, +{ + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = (K, V)>, + { + Ok(Self::from_iter(iter)) + } +} + +impl<T> TryFromIter<T> for BTreeSet<T> +where + T: Ord, +{ + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>, + { + Ok(Self::from_iter(iter)) + } +} + +/// Partial variant of `collect`. +pub trait TryCollect: Iterator { + fn try_collect<C>(self) -> Result<C, C::Error> + where + C: TryFromIter<Self::Item>; +} + +impl<I> TryCollect for I +where + I: Iterator, +{ + fn try_collect<C>(self) -> Result<C, C::Error> + where + C: TryFromIter<Self::Item>, + { + C::try_from_iter(self) + } +} diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index 5728685d01..cfd95ba40d 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -2,6 +2,7 @@ use super::*; use core::num::NonZeroUsize; use ethereum_types::{H160, H256, U128, U256}; use smallvec::SmallVec; +use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; macro_rules! impl_encodable_for_uint { @@ -220,6 +221,65 @@ impl<T: Encode> Encode for Arc<T> { } } +// Encode transparently through references. +impl<'a, T: Encode> Encode for &'a T { + fn is_ssz_fixed_len() -> bool { + T::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + T::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + T::ssz_append(self, buf) + } + + fn ssz_bytes_len(&self) -> usize { + T::ssz_bytes_len(self) + } +} + +/// Compute the encoded length of a vector-like sequence of `T`. +pub fn sequence_ssz_bytes_len<I, T>(iter: I) -> usize +where + I: Iterator<Item = T> + ExactSizeIterator, + T: Encode, +{ + // Compute length before doing any iteration. + let length = iter.len(); + if <T as Encode>::is_ssz_fixed_len() { + <T as Encode>::ssz_fixed_len() * length + } else { + let mut len = iter.map(|item| item.ssz_bytes_len()).sum(); + len += BYTES_PER_LENGTH_OFFSET * length; + len + } +} + +/// Encode a vector-like sequence of `T`. +pub fn sequence_ssz_append<I, T>(iter: I, buf: &mut Vec<u8>) +where + I: Iterator<Item = T> + ExactSizeIterator, + T: Encode, +{ + if T::is_ssz_fixed_len() { + buf.reserve(T::ssz_fixed_len() * iter.len()); + + for item in iter { + item.ssz_append(buf); + } + } else { + let mut encoder = SszEncoder::container(buf, iter.len() * BYTES_PER_LENGTH_OFFSET); + + for item in iter { + encoder.append(&item); + } + + encoder.finalize(); + } +} + macro_rules! impl_for_vec { ($type: ty) => { impl<T: Encode> Encode for $type { @@ -228,32 +288,11 @@ macro_rules! impl_for_vec { } fn ssz_bytes_len(&self) -> usize { - if <T as Encode>::is_ssz_fixed_len() { - <T as Encode>::ssz_fixed_len() * self.len() - } else { - let mut len = self.iter().map(|item| item.ssz_bytes_len()).sum(); - len += BYTES_PER_LENGTH_OFFSET * self.len(); - len - } + sequence_ssz_bytes_len(self.iter()) } fn ssz_append(&self, buf: &mut Vec<u8>) { - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * self.len()); - - for item in self { - item.ssz_append(buf); - } - } else { - let mut encoder = - SszEncoder::container(buf, self.len() * BYTES_PER_LENGTH_OFFSET); - - for item in self { - encoder.append(item); - } - - encoder.finalize(); - } + sequence_ssz_append(self.iter(), buf) } } }; @@ -269,6 +308,41 @@ impl_for_vec!(SmallVec<[T; 6]>); impl_for_vec!(SmallVec<[T; 7]>); impl_for_vec!(SmallVec<[T; 8]>); +impl<K, V> Encode for BTreeMap<K, V> +where + K: Encode + Ord, + V: Encode, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_bytes_len(&self) -> usize { + sequence_ssz_bytes_len(self.iter()) + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + sequence_ssz_append(self.iter(), buf) + } +} + +impl<T> Encode for BTreeSet<T> +where + T: Encode + Ord, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_bytes_len(&self) -> usize { + sequence_ssz_bytes_len(self.iter()) + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + sequence_ssz_append(self.iter(), buf) + } +} + impl Encode for bool { fn is_ssz_fixed_len() -> bool { true diff --git a/consensus/ssz/src/lib.rs b/consensus/ssz/src/lib.rs index df00c514e2..e71157a3ee 100644 --- a/consensus/ssz/src/lib.rs +++ b/consensus/ssz/src/lib.rs @@ -40,8 +40,8 @@ pub mod legacy; mod union_selector; pub use decode::{ - impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, Decode, - DecodeError, SszDecoder, SszDecoderBuilder, + impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, + try_from_iter::TryFromIter, Decode, DecodeError, SszDecoder, SszDecoderBuilder, }; pub use encode::{encode_length, Encode, SszEncoder}; pub use union_selector::UnionSelector; diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index 7bd6252ad0..e41fc15dd4 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -4,6 +4,8 @@ use ssz_derive::{Decode, Encode}; mod round_trip { use super::*; + use std::collections::BTreeMap; + use std::iter::FromIterator; fn round_trip<T: Encode + Decode + std::fmt::Debug + PartialEq>(items: Vec<T>) { for item in items { @@ -321,6 +323,52 @@ mod round_trip { round_trip(vec); } + + #[test] + fn btree_map_fixed() { + let data = vec![ + BTreeMap::new(), + BTreeMap::from_iter(vec![(0u8, 0u16), (1, 2), (2, 4), (4, 6)]), + ]; + round_trip(data); + } + + #[test] + fn btree_map_variable_value() { + let data = vec![ + BTreeMap::new(), + BTreeMap::from_iter(vec![ + ( + 0u64, + ThreeVariableLen { + a: 1, + b: vec![3, 5, 7], + c: vec![], + d: vec![0, 0], + }, + ), + ( + 1, + ThreeVariableLen { + a: 99, + b: vec![1], + c: vec![2, 3, 4, 5, 6, 7, 8, 9, 10], + d: vec![4, 5, 6, 7, 8], + }, + ), + ( + 2, + ThreeVariableLen { + a: 0, + b: vec![], + c: vec![], + d: vec![], + }, + ), + ]), + ]; + round_trip(data); + } } mod derive_macro { diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 1414d12c8c..5acf74608a 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -255,7 +255,8 @@ where }) .map(Into::into) } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len)).map(|vec| vec.into()) + ssz::decode_list_of_variable_length_items(bytes, Some(max_len)) + .map(|vec: Vec<_>| vec.into()) } } } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 13d8f631cc..b237bfb761 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.10 +TESTS_TAG := v1.2.0-rc.1 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 2eb4ce5407..87953a6141 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -33,6 +33,8 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientSnapshot", # Merkle-proof tests for light clients "tests/.*/.*/merkle/single_proof", + # Capella tests are disabled for now. + "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*" ] diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 7546c96a78..0283d13da4 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -276,7 +276,8 @@ impl<E: EthSpec, T: EpochTransition<E>> Case for EpochProcessing<E, T> { && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" } - ForkName::Altair | ForkName::Merge => true, // TODO: revisit when tests are out + // No phase0 tests for Altair and later. + ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 4d90bb161f..7d90f2ee9a 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -52,13 +52,13 @@ pub struct Checks { #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step<B, A, P, S> { +pub enum Step<B, A, AS, P> { Tick { tick: u64 }, ValidBlock { block: B }, MaybeValidBlock { block: B, valid: bool }, Attestation { attestation: A }, + AttesterSlashing { attester_slashing: AS }, PowBlock { pow_block: P }, - AttesterSlashing { attester_slashing: S }, Checks { checks: Box<Checks> }, } @@ -75,12 +75,7 @@ pub struct ForkChoiceTest<E: EthSpec> { pub anchor_state: BeaconState<E>, pub anchor_block: BeaconBlock<E>, #[allow(clippy::type_complexity)] - pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, PowBlock, AttesterSlashing<E>>>, -} - -/// Spec to be used for fork choice tests. -pub fn fork_choice_spec<E: EthSpec>(fork_name: ForkName) -> ChainSpec { - testing_spec::<E>(fork_name) + pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, AttesterSlashing<E>, PowBlock>>, } impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { @@ -92,7 +87,7 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { .to_str() .expect("path must be valid OsStr") .to_string(); - let spec = &fork_choice_spec::<E>(fork_name); + let spec = &testing_spec::<E>(fork_name); let steps: Vec<Step<String, String, String, String>> = yaml_decode_file(&path.join("steps.yaml"))?; // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. @@ -116,14 +111,14 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) .map(|attestation| Step::Attestation { attestation }) } - Step::PowBlock { pow_block } => { - ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) - .map(|pow_block| Step::PowBlock { pow_block }) - } Step::AttesterSlashing { attester_slashing } => { ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing))) .map(|attester_slashing| Step::AttesterSlashing { attester_slashing }) } + Step::PowBlock { pow_block } => { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) + .map(|pow_block| Step::PowBlock { pow_block }) + } Step::Checks { checks } => Ok(Step::Checks { checks }), }) .collect::<Result<_, _>>()?; @@ -159,15 +154,12 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { - let tester = Tester::new(self, fork_choice_spec::<E>(fork_name))?; + let tester = Tester::new(self, testing_spec::<E>(fork_name))?; // TODO(merge): re-enable this test before production. // This test is skipped until we can do retrospective confirmations of the terminal // block after an optimistic sync. - if self.description == "block_lookup_failed" - //TODO(sean): enable once we implement equivocation logic (https://github.com/sigp/lighthouse/issues/3241) - || self.description == "discard_equivocations" - { + if self.description == "block_lookup_failed" { return Err(Error::SkippedKnownFailure); }; @@ -179,11 +171,10 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { tester.process_block(block.clone(), *valid)? } Step::Attestation { attestation } => tester.process_attestation(attestation)?, + Step::AttesterSlashing { attester_slashing } => { + tester.process_attester_slashing(attester_slashing) + } Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), - //TODO(sean): enable once we implement equivocation logic (https://github.com/sigp/lighthouse/issues/3241) - Step::AttesterSlashing { - attester_slashing: _, - } => (), Step::Checks { checks } => { let Checks { head, @@ -443,6 +434,14 @@ impl<E: EthSpec> Tester<E> { .map_err(|e| Error::InternalError(format!("attestation import failed with {:?}", e))) } + pub fn process_attester_slashing(&self, attester_slashing: &AttesterSlashing<E>) { + self.harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_attester_slashing(attester_slashing) + } + pub fn process_pow_block(&self, pow_block: &PowBlock) { let el = self.harness.mock_execution_layer.as_ref().unwrap(); diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 25299bf577..13c0a8c54a 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -52,7 +52,7 @@ pub trait Handler { .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) }; let test_cases = fs::read_dir(&handler_path) - .expect("handler dir exists") + .unwrap_or_else(|e| panic!("handler dir {} exists: {:?}", handler_path.display(), e)) .filter_map(as_directory) .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) .filter_map(as_directory) diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index a36253f24e..91345fb669 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -377,8 +377,9 @@ fn epoch_processing_participation_record_updates() { #[test] fn epoch_processing_sync_committee_updates() { + // There are presently no mainnet tests, see: + // https://github.com/ethereum/consensus-spec-tests/issues/29 EpochProcessingHandler::<MinimalEthSpec, SyncCommitteeUpdates>::default().run(); - EpochProcessingHandler::<MainnetEthSpec, SyncCommitteeUpdates>::default().run(); } #[test] From 25f0e261cb37ca1945c7ec581da0c7cf09ad4c95 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Thu, 28 Jul 2022 13:57:09 +0000 Subject: [PATCH 099/184] Don't return errors when fork choice fails (#3370) ## Issue Addressed NA ## Proposed Changes There are scenarios where the only viable head will have an invalid execution payload, in this scenario the `get_head` function on `proto_array` will return an error. We must recover from this scenario by importing blocks from the network. This PR stops `BeaconChain::recompute_head` from returning an error so that we can't accidentally start down-scoring peers or aborting block import just because the current head has an invalid payload. ## Reviewer Notes The following changes are included: 1. Allow `fork_choice.get_head` to fail gracefully in `BeaconChain::process_block` when trying to update the `early_attester_cache`; simply don't add the block to the cache rather than aborting the entire process. 1. Don't return an error from `BeaconChain::recompute_head_at_current_slot` and `BeaconChain::recompute_head` to defensively prevent calling functions from aborting any process just because the fork choice function failed to run. - This should have practically no effect, since most callers were still continuing if recomputing the head failed. - The outlier is that the API will return 200 rather than a 500 when fork choice fails. 1. Add the `ProtoArrayForkChoice::set_all_blocks_to_optimistic` function to recover from the scenario where we've rebooted and the persisted fork choice has an invalid head. --- beacon_node/beacon_chain/src/beacon_chain.rs | 68 ++--- .../beacon_chain/src/canonical_head.rs | 46 ++- .../beacon_chain/src/state_advance_timer.rs | 9 +- beacon_node/beacon_chain/src/test_utils.rs | 18 +- .../beacon_chain/tests/block_verification.rs | 18 +- .../tests/payload_invalidation.rs | 261 +++++++++++++++++- beacon_node/beacon_chain/tests/store_tests.rs | 14 +- beacon_node/beacon_chain/tests/tests.rs | 6 +- beacon_node/http_api/src/lib.rs | 10 +- .../network/src/beacon_processor/tests.rs | 2 +- .../beacon_processor/worker/gossip_methods.rs | 16 +- .../beacon_processor/worker/sync_methods.rs | 24 +- consensus/fork_choice/src/fork_choice.rs | 12 +- consensus/proto_array/src/proto_array.rs | 2 +- .../src/proto_array_fork_choice.rs | 104 ++++++- testing/ef_tests/src/cases/fork_choice.rs | 3 +- 16 files changed, 466 insertions(+), 147 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a9e26e4875..2e944f2939 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2805,32 +2805,38 @@ impl<T: BeaconChainTypes> BeaconChain<T> { if !payload_verification_status.is_optimistic() && block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { - let new_head_root = fork_choice - .get_head(current_slot, &self.spec) - .map_err(BeaconChainError::from)?; - - if new_head_root == block_root { - if let Some(proto_block) = fork_choice.get_block(&block_root) { - if let Err(e) = self.early_attester_cache.add_head_block( - block_root, - signed_block.clone(), - proto_block, - &state, - &self.spec, - ) { + match fork_choice.get_head(current_slot, &self.spec) { + // This block became the head, add it to the early attester cache. + Ok(new_head_root) if new_head_root == block_root => { + if let Some(proto_block) = fork_choice.get_block(&block_root) { + if let Err(e) = self.early_attester_cache.add_head_block( + block_root, + signed_block.clone(), + proto_block, + &state, + &self.spec, + ) { + warn!( + self.log, + "Early attester cache insert failed"; + "error" => ?e + ); + } + } else { warn!( self.log, - "Early attester cache insert failed"; - "error" => ?e + "Early attester block missing"; + "block_root" => ?block_root ); } - } else { - warn!( - self.log, - "Early attester block missing"; - "block_root" => ?block_root - ); } + // This block did not become the head, nothing to do. + Ok(_) => (), + Err(e) => error!( + self.log, + "Failed to compute head during block import"; + "error" => ?e + ), } } @@ -3608,16 +3614,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { // Run fork choice since it's possible that the payload invalidation might result in a new // head. - // - // Don't return early though, since invalidating the justified checkpoint might cause an - // error here. - if let Err(e) = self.recompute_head_at_current_slot().await { - crit!( - self.log, - "Failed to run fork choice routine"; - "error" => ?e, - ); - } + self.recompute_head_at_current_slot().await; // Obtain the justified root from fork choice. // @@ -4262,14 +4259,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } // Run fork choice and signal to any waiting task that it has completed. - if let Err(e) = self.recompute_head_at_current_slot().await { - error!( - self.log, - "Fork choice error at slot start"; - "error" => ?e, - "slot" => slot, - ); - } + self.recompute_head_at_current_slot().await; // Send the notification regardless of fork choice success, this is a "best effort" // notification and we don't want block production to hit the timeout in case of error. diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index aff4deeaf9..c37f266824 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -434,9 +434,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// Execute the fork choice algorithm and enthrone the result as the canonical head. /// /// This method replaces the old `BeaconChain::fork_choice` method. - pub async fn recompute_head_at_current_slot(self: &Arc<Self>) -> Result<(), Error> { - let current_slot = self.slot()?; - self.recompute_head_at_slot(current_slot).await + pub async fn recompute_head_at_current_slot(self: &Arc<Self>) { + match self.slot() { + Ok(current_slot) => self.recompute_head_at_slot(current_slot).await, + Err(e) => error!( + self.log, + "No slot when recomputing head"; + "error" => ?e + ), + } } /// Execute the fork choice algorithm and enthrone the result as the canonical head. @@ -445,7 +451,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// different slot to the wall-clock can be useful for pushing fork choice into the next slot /// *just* before the start of the slot. This ensures that block production can use the correct /// head value without being delayed. - pub async fn recompute_head_at_slot(self: &Arc<Self>, current_slot: Slot) -> Result<(), Error> { + /// + /// This function purposefully does *not* return a `Result`. It's possible for fork choice to + /// fail to update if there is only one viable head and it has an invalid execution payload. In + /// such a case it's critical that the `BeaconChain` keeps importing blocks so that the + /// situation can be rectified. We avoid returning an error here so that calling functions + /// can't abort block import because an error is returned here. + pub async fn recompute_head_at_slot(self: &Arc<Self>, current_slot: Slot) { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); @@ -455,15 +467,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> { move || chain.recompute_head_at_slot_internal(current_slot), "recompute_head_internal", ) - .await? + .await { // Fork choice returned successfully and did not need to update the EL. - Ok(None) => Ok(()), + Ok(Ok(None)) => (), // Fork choice returned successfully and needed to update the EL. It has returned a // join-handle from when it spawned some async tasks. We should await those tasks. - Ok(Some(join_handle)) => match join_handle.await { + Ok(Ok(Some(join_handle))) => match join_handle.await { // The async task completed successfully. - Ok(Some(())) => Ok(()), + Ok(Some(())) => (), // The async task did not complete successfully since the runtime is shutting down. Ok(None) => { debug!( @@ -471,7 +483,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { "Did not update EL fork choice"; "info" => "shutting down" ); - Err(Error::RuntimeShutdown) } // The async task did not complete successfully, tokio returned an error. Err(e) => { @@ -480,13 +491,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> { "Did not update EL fork choice"; "error" => ?e ); - Err(Error::TokioJoin(e)) } }, // There was an error recomputing the head. - Err(e) => { + Ok(Err(e)) => { metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); - Err(e) + error!( + self.log, + "Error whist recomputing head"; + "error" => ?e + ); + } + // There was an error spawning the task. + Err(e) => { + error!( + self.log, + "Failed to spawn recompute head task"; + "error" => ?e + ); } } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 5abec98877..48c0f2f8a2 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -220,14 +220,7 @@ async fn state_advance_timer<T: BeaconChainTypes>( return; } - if let Err(e) = beacon_chain.recompute_head_at_slot(next_slot).await { - warn!( - log, - "Error updating fork choice for next slot"; - "error" => ?e, - "slot" => next_slot, - ); - } + beacon_chain.recompute_head_at_slot(next_slot).await; // Use a blocking task to avoid blocking the core executor whilst waiting for locks // in `ForkChoiceSignalTx`. diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1297e7d78b..1f19465c08 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -211,6 +211,20 @@ impl<E: EthSpec> Builder<EphemeralHarnessType<E>> { self.store = Some(store); self.store_mutator(Box::new(mutator)) } + + /// Manually restore from a given `MemoryStore`. + pub fn resumed_ephemeral_store( + mut self, + store: Arc<HotColdDB<E, MemoryStore<E>, MemoryStore<E>>>, + ) -> Self { + let mutator = move |builder: BeaconChainBuilder<_>| { + builder + .resume_from_db() + .expect("should resume from database") + }; + self.store = Some(store); + self.store_mutator(Box::new(mutator)) + } } impl<E: EthSpec> Builder<DiskHarnessType<E>> { @@ -1376,7 +1390,7 @@ where .process_block(Arc::new(block), CountUnrealized::True) .await? .into(); - self.chain.recompute_head_at_current_slot().await?; + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -1389,7 +1403,7 @@ where .process_block(Arc::new(block), CountUnrealized::True) .await? .into(); - self.chain.recompute_head_at_current_slot().await?; + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 43dda7ab05..88d6914036 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -160,11 +160,7 @@ async fn chain_segment_full_segment() { .into_block_error() .expect("should import chain segment"); - harness - .chain - .recompute_head_at_current_slot() - .await - .expect("should run fork choice"); + harness.chain.recompute_head_at_current_slot().await; assert_eq!( harness.head_block_root(), @@ -194,11 +190,7 @@ async fn chain_segment_varying_chunk_size() { .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } - harness - .chain - .recompute_head_at_current_slot() - .await - .expect("should run fork choice"); + harness.chain.recompute_head_at_current_slot().await; assert_eq!( harness.head_block_root(), @@ -729,11 +721,7 @@ async fn block_gossip_verification() { } // Recompute the head to ensure we cache the latest view of fork choice. - harness - .chain - .recompute_head_at_current_slot() - .await - .unwrap(); + harness.chain.recompute_head_at_current_slot().await; /* * This test ensures that: diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index f2ebb430d4..4107631378 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,6 +1,7 @@ #![cfg(not(debug_assertions))] use beacon_chain::{ + canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, @@ -14,6 +15,7 @@ use fork_choice::{ }; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; +use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; @@ -95,11 +97,15 @@ impl InvalidPayloadRig { } async fn recompute_head(&self) { - self.harness - .chain - .recompute_head_at_current_slot() - .await - .unwrap(); + self.harness.chain.recompute_head_at_current_slot().await; + } + + fn cached_head(&self) -> CachedHead<E> { + self.harness.chain.canonical_head.cached_head() + } + + fn canonical_head(&self) -> &CanonicalHead<EphemeralHarnessType<E>> { + &self.harness.chain.canonical_head } fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { @@ -354,6 +360,19 @@ impl InvalidPayloadRig { .await .unwrap(); } + + fn assert_get_head_error_contains(&self, s: &str) { + match self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .get_head(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) + { + Err(ForkChoiceError::ProtoArrayError(e)) if e.contains(s) => (), + other => panic!("expected {} error, got {:?}", s, other), + }; + } } /// Simple test of the different import types. @@ -1183,3 +1202,235 @@ async fn attesting_to_optimistic_head() { get_aggregated().unwrap(); get_aggregated_by_slot_and_root().unwrap(); } + +/// Helper for running tests where we generate a chain with an invalid head and then some +/// `fork_blocks` to recover it. +struct InvalidHeadSetup { + rig: InvalidPayloadRig, + fork_blocks: Vec<Arc<SignedBeaconBlock<E>>>, + invalid_head: CachedHead<E>, +} + +impl InvalidHeadSetup { + async fn new() -> InvalidHeadSetup { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + + // Import blocks until the first time the chain finalizes. + while rig.cached_head().finalized_checkpoint().epoch == 0 { + rig.import_block(Payload::Syncing).await; + } + + let invalid_head = rig.cached_head(); + + // Invalidate the head block. + rig.invalidate_manually(invalid_head.head_block_root()) + .await; + assert!(rig + .canonical_head() + .head_execution_status() + .unwrap() + .is_invalid()); + + // Finding a new head should fail since the only possible head is not valid. + rig.assert_get_head_error_contains("InvalidBestNode"); + + // Build three "fork" blocks that conflict with the current canonical head. Don't apply them to + // the chain yet. + let mut fork_blocks = vec![]; + let mut parent_state = rig + .harness + .chain + .state_at_slot( + invalid_head.head_slot() - 3, + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + for _ in 0..3 { + let slot = parent_state.slot() + 1; + let (fork_block, post_state) = rig.harness.make_block(parent_state, slot).await; + parent_state = post_state; + fork_blocks.push(Arc::new(fork_block)) + } + + Self { + rig, + fork_blocks, + invalid_head, + } + } +} + +#[tokio::test] +async fn recover_from_invalid_head_by_importing_blocks() { + let InvalidHeadSetup { + rig, + fork_blocks, + invalid_head, + } = InvalidHeadSetup::new().await; + + // Import the first two blocks, they should not become the head. + for i in 0..2 { + if i == 0 { + // The first block should be `VALID` during import. + rig.harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .all_payloads_valid_on_new_payload(); + } else { + // All blocks after the first block should return `SYNCING`. + rig.harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .all_payloads_syncing_on_new_payload(true); + } + + rig.harness + .chain + .process_block(fork_blocks[i].clone(), CountUnrealized::True) + .await + .unwrap(); + rig.recompute_head().await; + rig.assert_get_head_error_contains("InvalidBestNode"); + let new_head = rig.cached_head(); + assert_eq!( + new_head.head_block_root(), + invalid_head.head_block_root(), + "the head should not change" + ); + } + + // Import the third block, it should become the head. + rig.harness + .chain + .process_block(fork_blocks[2].clone(), CountUnrealized::True) + .await + .unwrap(); + rig.recompute_head().await; + let new_head = rig.cached_head(); + assert_eq!( + new_head.head_block_root(), + fork_blocks[2].canonical_root(), + "the third block should become the head" + ); + + let manual_get_head = rig + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .get_head(rig.harness.chain.slot().unwrap(), &rig.harness.chain.spec) + .unwrap(); + assert_eq!(manual_get_head, new_head.head_block_root(),); +} + +#[tokio::test] +async fn recover_from_invalid_head_after_persist_and_reboot() { + let InvalidHeadSetup { + rig, + fork_blocks: _, + invalid_head, + } = InvalidHeadSetup::new().await; + + // Forcefully persist the head and fork choice. + rig.harness.chain.persist_head_and_fork_choice().unwrap(); + + let resumed = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .resumed_ephemeral_store(rig.harness.chain.store.clone()) + .mock_execution_layer() + .build(); + + // Forget the original rig so we don't accidentally use it again. + drop(rig); + + let resumed_head = resumed.chain.canonical_head.cached_head(); + assert_eq!( + resumed_head.head_block_root(), + invalid_head.head_block_root(), + "the resumed harness should have the invalid block as the head" + ); + assert!( + resumed + .chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_block(&resumed_head.head_block_root()) + .unwrap(), + "the invalid block should have become optimistic" + ); +} + +#[tokio::test] +async fn weights_after_resetting_optimistic_status() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + + let mut roots = vec![]; + for _ in 0..4 { + roots.push(rig.import_block(Payload::Syncing).await); + } + + rig.recompute_head().await; + let head = rig.cached_head(); + + let original_weights = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .iter_nodes(&head.head_block_root()) + .map(|node| (node.root, node.weight)) + .collect::<HashMap<_, _>>(); + + rig.invalidate_manually(roots[1]).await; + + rig.harness + .chain + .canonical_head + .fork_choice_write_lock() + .proto_array_mut() + .set_all_blocks_to_optimistic::<E>(&rig.harness.chain.spec) + .unwrap(); + + let new_weights = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .iter_nodes(&head.head_block_root()) + .map(|node| (node.root, node.weight)) + .collect::<HashMap<_, _>>(); + + assert_eq!(original_weights, new_weights); + + // Advance the current slot and run fork choice to remove proposer boost. + rig.harness + .set_current_slot(rig.harness.chain.slot().unwrap() + 1); + rig.recompute_head().await; + + assert_eq!( + rig.harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block_weight(&head.head_block_root()) + .unwrap(), + head.snapshot.beacon_state.validators()[0].effective_balance, + "proposer boost should be removed from the head block and the vote of a single validator applied" + ); + + // Import a length of chain to ensure the chain can be built atop. + for _ in 0..E::slots_per_epoch() * 4 { + rig.import_block(Payload::Valid).await; + } +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b5b8152e8d..d9d5ca20d7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2128,7 +2128,7 @@ async fn weak_subjectivity_sync() { .process_block(Arc::new(full_block), CountUnrealized::True) .await .unwrap(); - beacon_chain.recompute_head_at_current_slot().await.unwrap(); + beacon_chain.recompute_head_at_current_slot().await; // Check that the new block's state can be loaded correctly. let state_root = block.state_root(); @@ -2460,11 +2460,7 @@ async fn revert_minority_fork_on_resume() { .build(); // Head should now be just before the fork. - resumed_harness - .chain - .recompute_head_at_current_slot() - .await - .unwrap(); + resumed_harness.chain.recompute_head_at_current_slot().await; assert_eq!(resumed_harness.head_slot(), fork_slot - 1); // Head track should know the canonical head and the rogue head. @@ -2482,11 +2478,7 @@ async fn revert_minority_fork_on_resume() { .unwrap(); // The canonical head should be the block from the majority chain. - resumed_harness - .chain - .recompute_head_at_current_slot() - .await - .unwrap(); + resumed_harness.chain.recompute_head_at_current_slot().await; assert_eq!(resumed_harness.head_slot(), block.slot()); assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 80a122976f..f7d443748d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -693,11 +693,7 @@ async fn run_skip_slot_test(skip_slots: u64) { harness_a.chain.head_snapshot().beacon_block_root ); - harness_b - .chain - .recompute_head_at_current_slot() - .await - .expect("should run fork choice"); + harness_b.chain.recompute_head_at_current_slot().await; assert_eq!( harness_b.chain.head_snapshot().beacon_block.slot(), diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 3284f874f9..c2503f392f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1059,10 +1059,7 @@ pub fn serve<T: BeaconChainTypes>( // Update the head since it's likely this block will become the new // head. - chain - .recompute_head_at_current_slot() - .await - .map_err(warp_utils::reject::beacon_chain_error)?; + chain.recompute_head_at_current_slot().await; // Perform some logging to inform users if their blocks are being produced // late. @@ -1186,10 +1183,7 @@ pub fn serve<T: BeaconChainTypes>( Ok(_) => { // Update the head since it's likely this block will become the new // head. - chain - .recompute_head_at_current_slot() - .await - .map_err(warp_utils::reject::beacon_chain_error)?; + chain.recompute_head_at_current_slot().await; Ok(warp::reply::json(&())) } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index d437cf0bed..05854ac1e2 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -221,7 +221,7 @@ impl TestRig { } pub async fn recompute_head(&self) { - self.chain.recompute_head_at_current_slot().await.unwrap() + self.chain.recompute_head_at_current_slot().await } pub fn head_root(&self) -> Hash256 { diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 1b1dc12d87..12172e0e53 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -927,21 +927,7 @@ impl<T: BeaconChainTypes> Worker<T> { "peer_id" => %peer_id ); - if let Err(e) = self.chain.recompute_head_at_current_slot().await { - error!( - self.log, - "Fork choice failed"; - "error" => ?e, - "location" => "block_gossip" - ) - } else { - debug!( - self.log, - "Fork choice success"; - "block" => ?block_root, - "location" => "block_gossip" - ) - } + self.chain.recompute_head_at_current_slot().await; } Err(BlockError::ParentUnknown { .. }) => { // Inform the sync manager to find parents for this block diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index ffcadb8689..a27ba7bfa0 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -111,7 +111,7 @@ impl<T: BeaconChainTypes> Worker<T> { None, ); - self.recompute_head("process_rpc_block").await; + self.chain.recompute_head_at_current_slot().await; } } // Sync handles these results @@ -248,7 +248,7 @@ impl<T: BeaconChainTypes> Worker<T> { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if imported_blocks > 0 { - self.recompute_head("process_blocks_ok").await; + self.chain.recompute_head_at_current_slot().await; } (imported_blocks, Ok(())) } @@ -259,7 +259,7 @@ impl<T: BeaconChainTypes> Worker<T> { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); let r = self.handle_failed_chain_segment(error); if imported_blocks > 0 { - self.recompute_head("process_blocks_err").await; + self.chain.recompute_head_at_current_slot().await; } (imported_blocks, r) } @@ -392,24 +392,6 @@ impl<T: BeaconChainTypes> Worker<T> { } } - /// Runs fork-choice on a given chain. This is used during block processing after one successful - /// block import. - async fn recompute_head(&self, location: &str) { - match self.chain.recompute_head_at_current_slot().await { - Ok(()) => debug!( - self.log, - "Fork choice success"; - "location" => location - ), - Err(e) => error!( - self.log, - "Fork choice failed"; - "error" => ?e, - "location" => location - ), - } - } - /// Helper function to handle a `BlockError` from `process_chain_segment` fn handle_failed_chain_segment( &self, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index a31d8ade6b..c17c46a777 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1451,7 +1451,17 @@ where _phantom: PhantomData, }; - fork_choice.get_head(current_slot, spec)?; + // If a call to `get_head` fails, the only known cause is because the only head with viable + // FFG properties is has an invalid payload. In this scenario, set all the payloads back to + // an optimistic status so that we can have a head to start from. + if fork_choice.get_head(current_slot, spec).is_err() { + fork_choice + .proto_array + .set_all_blocks_to_optimistic::<E>(spec)?; + // If the second attempt at finding a head fails, return an error since we do not + // expect this scenario. + fork_choice.get_head(current_slot, spec)?; + } Ok(fork_choice) } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 85a15fb60e..962408513e 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -980,7 +980,7 @@ impl ProtoArray { /// Returns `None` if there is an overflow or underflow when calculating the score. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance -fn calculate_proposer_boost<E: EthSpec>( +pub fn calculate_proposer_boost<E: EthSpec>( validator_balances: &[u64], proposer_score_boost: u64, ) -> Option<u64> { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 4767919f70..3ecdc68a2e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,5 +1,7 @@ use crate::error::Error; -use crate::proto_array::{InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode}; +use crate::proto_array::{ + calculate_proposer_boost, InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, +}; use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -303,6 +305,106 @@ impl ProtoArrayForkChoice { .map_err(|e| format!("find_head failed: {:?}", e)) } + /// For all nodes, regardless of their relationship to the finalized block, set their execution + /// status to be optimistic. + /// + /// In practice this means forgetting any `VALID` or `INVALID` statuses. + pub fn set_all_blocks_to_optimistic<E: EthSpec>( + &mut self, + spec: &ChainSpec, + ) -> Result<(), String> { + // Iterate backwards through all nodes in the `proto_array`. Whilst it's not strictly + // required to do this process in reverse, it seems natural when we consider how LMD votes + // are counted. + // + // This function will touch all blocks, even those that do not descend from the finalized + // block. Since this function is expected to run at start-up during very rare + // circumstances we prefer simplicity over efficiency. + for node_index in (0..self.proto_array.nodes.len()).rev() { + let node = self + .proto_array + .nodes + .get_mut(node_index) + .ok_or("unreachable index out of bounds in proto_array nodes")?; + + match node.execution_status { + ExecutionStatus::Invalid(block_hash) => { + node.execution_status = ExecutionStatus::Optimistic(block_hash); + + // Restore the weight of the node, it would have been set to `0` in + // `apply_score_changes` when it was invalidated. + let mut restored_weight: u64 = self + .votes + .0 + .iter() + .enumerate() + .filter_map(|(validator_index, vote)| { + if vote.current_root == node.root { + // Any voting validator that does not have a balance should be + // ignored. This is consistent with `compute_deltas`. + self.balances.get(validator_index) + } else { + None + } + }) + .sum(); + + // If the invalid root was boosted, apply the weight to it and + // ancestors. + if let Some(proposer_score_boost) = spec.proposer_score_boost { + if self.proto_array.previous_proposer_boost.root == node.root { + // Compute the score based upon the current balances. We can't rely on + // the `previous_proposr_boost.score` since it is set to zero with an + // invalid node. + let proposer_score = + calculate_proposer_boost::<E>(&self.balances, proposer_score_boost) + .ok_or("Failed to compute proposer boost")?; + // Store the score we've applied here so it can be removed in + // a later call to `apply_score_changes`. + self.proto_array.previous_proposer_boost.score = proposer_score; + // Apply this boost to this node. + restored_weight = restored_weight + .checked_add(proposer_score) + .ok_or("Overflow when adding boost to weight")?; + } + } + + // Add the restored weight to the node and all ancestors. + if restored_weight > 0 { + let mut node_or_ancestor = node; + loop { + node_or_ancestor.weight = node_or_ancestor + .weight + .checked_add(restored_weight) + .ok_or("Overflow when adding weight to ancestor")?; + + if let Some(parent_index) = node_or_ancestor.parent { + node_or_ancestor = self + .proto_array + .nodes + .get_mut(parent_index) + .ok_or(format!("Missing parent index: {}", parent_index))?; + } else { + // This is either the finalized block or a block that does not + // descend from the finalized block. + break; + } + } + } + } + // There are no balance changes required if the node was either valid or + // optimistic. + ExecutionStatus::Valid(block_hash) | ExecutionStatus::Optimistic(block_hash) => { + node.execution_status = ExecutionStatus::Optimistic(block_hash) + } + // An irrelevant node cannot become optimistic, this is a no-op. + ExecutionStatus::Irrelevant(_) => (), + } + } + + Ok(()) + } + pub fn maybe_prune(&mut self, finalized_root: Hash256) -> Result<(), String> { self.proto_array .maybe_prune(finalized_root) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 7d90f2ee9a..65872efbe9 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -313,8 +313,7 @@ impl<E: EthSpec> Tester<E> { fn find_head(&self) -> Result<CachedHead<E>, Error> { let chain = self.harness.chain.clone(); - self.block_on_dangerous(chain.recompute_head_at_current_slot())? - .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; + self.block_on_dangerous(chain.recompute_head_at_current_slot())?; Ok(self.harness.chain.canonical_head.cached_head()) } From 6c2d8b2262f1d55eb10d302bfc668660dc173b49 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Sat, 30 Jul 2022 00:22:37 +0000 Subject: [PATCH 100/184] Builder Specs v0.2.0 (#3134) ## Issue Addressed https://github.com/sigp/lighthouse/issues/3091 Extends https://github.com/sigp/lighthouse/pull/3062, adding pre-bellatrix block support on blinded endpoints and allowing the normal proposal flow (local payload construction) on blinded endpoints. This resulted in better fallback logic because the VC will not have to switch endpoints on failure in the BN <> Builder API, the BN can just fallback immediately and without repeating block processing that it shouldn't need to. We can also keep VC fallback from the VC<>BN API's blinded endpoint to full endpoint. ## Proposed Changes - Pre-bellatrix blocks on blinded endpoints - Add a new `PayloadCache` to the execution layer - Better fallback-from-builder logic ## Todos - [x] Remove VC transition logic - [x] Add logic to only enable builder flow after Merge transition finalization - [x] Tests - [x] Fix metrics - [x] Rustdocs Co-authored-by: Mac L <mjladson@pm.me> Co-authored-by: realbigsean <sean@sigmaprime.io> --- .github/workflows/local-testnet.yml | 20 +- Cargo.lock | 445 +++++--- account_manager/src/validator/import.rs | 2 + beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 89 +- beacon_node/beacon_chain/src/chain_config.rs | 15 + beacon_node/beacon_chain/src/errors.rs | 1 + .../beacon_chain/src/execution_payload.rs | 17 +- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 40 +- beacon_node/builder_client/src/lib.rs | 56 +- beacon_node/execution_layer/Cargo.toml | 4 + .../src/engine_api/json_structures.rs | 58 +- beacon_node/execution_layer/src/lib.rs | 218 +++- .../execution_layer/src/payload_cache.rs | 33 + .../src/test_utils/mock_builder.rs | 383 +++++++ .../src/test_utils/mock_execution_layer.rs | 47 +- .../execution_layer/src/test_utils/mod.rs | 2 + beacon_node/http_api/Cargo.toml | 3 +- beacon_node/http_api/src/lib.rs | 220 +--- beacon_node/http_api/src/publish_blocks.rs | 155 +++ beacon_node/http_api/tests/common.rs | 12 +- beacon_node/http_api/tests/tests.rs | 972 +++++++++++++++++- beacon_node/src/cli.rs | 40 + beacon_node/src/config.rs | 14 + book/src/SUMMARY.md | 21 +- book/src/builders.md | 144 +++ book/src/suggested-fee-recipient.md | 18 +- .../src/validator_definitions.rs | 92 ++ common/eth2/src/lib.rs | 4 +- common/eth2/src/lighthouse_vc/http_client.rs | 14 +- common/eth2/src/lighthouse_vc/types.rs | 38 +- consensus/types/src/builder_bid.rs | 24 +- consensus/types/src/chain_spec.rs | 8 + consensus/types/src/execution_block_hash.rs | 4 +- consensus/types/src/payload.rs | 11 + lighthouse/tests/account_manager.rs | 8 + lighthouse/tests/beacon_node.rs | 74 +- lighthouse/tests/validator_client.rs | 42 + .../{print_logs.sh => dump_logs.sh} | 4 +- scripts/local_testnet/start_local_testnet.sh | 7 +- scripts/local_testnet/validator_client.sh | 17 +- testing/ef_tests/src/cases/operations.rs | 39 +- testing/ef_tests/src/type_name.rs | 1 + testing/ef_tests/tests/tests.rs | 8 +- .../src/test_rig.rs | 24 +- testing/web3signer_tests/src/lib.rs | 51 +- validator_client/src/block_service.rs | 117 +-- validator_client/src/cli.rs | 23 +- validator_client/src/config.rs | 34 +- .../src/http_api/create_validator.rs | 4 + validator_client/src/http_api/keystores.rs | 2 + validator_client/src/http_api/mod.rs | 32 +- validator_client/src/http_api/remotekeys.rs | 2 + validator_client/src/http_api/tests.rs | 137 ++- .../src/http_api/tests/keystores.rs | 4 +- validator_client/src/http_metrics/metrics.rs | 2 + .../src/initialized_validators.rs | 77 +- validator_client/src/lib.rs | 9 +- validator_client/src/preparation_service.rs | 179 ++-- validator_client/src/validator_store.rs | 85 +- 61 files changed, 3522 insertions(+), 687 deletions(-) create mode 100644 beacon_node/execution_layer/src/payload_cache.rs create mode 100644 beacon_node/execution_layer/src/test_utils/mock_builder.rs create mode 100644 beacon_node/http_api/src/publish_blocks.rs create mode 100644 book/src/builders.md rename scripts/local_testnet/{print_logs.sh => dump_logs.sh} (83%) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index b68135e4d8..35032a0932 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -40,13 +40,29 @@ jobs: run: make && make install-lcli - name: Start local testnet - run: ./start_local_testnet.sh + run: ./start_local_testnet.sh && sleep 60 working-directory: scripts/local_testnet - name: Print logs - run: ./print_logs.sh + run: ./dump_logs.sh working-directory: scripts/local_testnet - name: Stop local testnet run: ./stop_local_testnet.sh working-directory: scripts/local_testnet + + - name: Clean-up testnet + run: ./clean.sh + working-directory: scripts/local_testnet + + - name: Start local testnet with blinded block production + run: ./start_local_testnet.sh -p && sleep 60 + working-directory: scripts/local_testnet + + - name: Print logs for blinded block testnet + run: ./dump_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet with blinded block production + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet diff --git a/Cargo.lock b/Cargo.lock index e06b5f55ad..a93bd7fd5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -178,6 +178,27 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.56" @@ -263,10 +284,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] -name = "backtrace" -version = "0.3.65" +name = "axum" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "6b9496f0c1d1afb7a2af4338bbe1d969cddfead41d87a9fb3aaa6d0bbc7af648" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa 1.0.2", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite 0.2.9", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4f44a0e6200e9d11a1cdc989e4b358f6e3d354fbf48478f345a17f4e43f8635" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", +] + +[[package]] +name = "backtrace" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", @@ -295,6 +361,24 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" +[[package]] +name = "beacon-api-client" +version = "0.1.0" +source = "git+https://github.com/ralexstokes/beacon-api-client#061c1b1bb1f18bcd7cf23d4cd375f99c78d5a2a5" +dependencies = [ + "ethereum-consensus", + "http", + "itertools", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", + "url", +] + [[package]] name = "beacon_chain" version = "0.2.0" @@ -347,6 +431,7 @@ dependencies = [ "tokio", "tree_hash", "types", + "unused_port", ] [[package]] @@ -430,9 +515,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty 2.0.0", "radium 0.7.0", @@ -591,9 +676,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" dependencies = [ "serde", ] @@ -636,12 +721,9 @@ dependencies = [ [[package]] name = "cast" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" -dependencies = [ - "rustc_version 0.4.0", -] +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" @@ -666,9 +748,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b72a433d0cf2aef113ba70f62634c56fddb0f244e6377185c56a7cadbd8f91" +checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", "cipher", @@ -678,9 +760,9 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b84ed6d1d5f7aa9bdde921a5090e0ca4d934d250ea3b402a5fab3a994e28a2a" +checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ "aead", "chacha20", @@ -888,9 +970,9 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", @@ -914,9 +996,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", @@ -924,9 +1006,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if", "crossbeam-utils", @@ -934,9 +1016,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -945,9 +1027,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" +checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg 1.1.0", "cfg-if", @@ -959,9 +1041,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" +checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" dependencies = [ "cfg-if", "once_cell", @@ -987,9 +1069,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5999502d32b9c48d492abe66392408144895020ec4709e549e840799f3bb74c0" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", @@ -1052,7 +1134,7 @@ version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" dependencies = [ - "nix 0.24.1", + "nix 0.24.2", "winapi", ] @@ -1840,6 +1922,27 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethereum-consensus" +version = "0.1.0" +source = "git+https://github.com/ralexstokes/ethereum-consensus#592eb44dc24403cc9d152f4b96683ab551533201" +dependencies = [ + "async-stream", + "blst", + "enr", + "hex", + "integer-sqrt", + "multiaddr 0.14.0", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.9.9", + "ssz-rs", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "ethereum-types" version = "0.12.1" @@ -1912,7 +2015,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tokio-tungstenite 0.17.1", + "tokio-tungstenite 0.17.2", "tracing", "tracing-futures", "url", @@ -1958,6 +2061,7 @@ dependencies = [ "eth2_serde_utils", "eth2_ssz", "eth2_ssz_types", + "ethereum-consensus", "ethers-core", "exit-future", "fork_choice", @@ -1967,6 +2071,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru", + "mev-build-rs", "parking_lot 0.12.1", "rand 0.8.5", "reqwest", @@ -1975,6 +2080,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", + "ssz-rs", "state_processing", "task_executor", "tempfile", @@ -2016,9 +2122,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -2338,9 +2444,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" [[package]] name = "git-version" @@ -2428,9 +2534,12 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] [[package]] name = "hashlink" @@ -2585,6 +2694,12 @@ dependencies = [ "pin-project-lite 0.2.9", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "http_api" version = "0.1.0" @@ -2620,6 +2735,7 @@ dependencies = [ "tokio-stream", "tree_hash", "types", + "unused_port", "warp", "warp_utils", ] @@ -2665,9 +2781,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -2828,7 +2944,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg 1.1.0", - "hashbrown 0.12.1", + "hashbrown 0.12.3", ] [[package]] @@ -2902,9 +3018,9 @@ checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" dependencies = [ "wasm-bindgen", ] @@ -3415,9 +3531,9 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", "base64", @@ -3634,11 +3750,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.12.3", ] [[package]] @@ -3703,6 +3819,12 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "matchit" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" + [[package]] name = "mdbx-sys" version = "0.11.6-4" @@ -3742,6 +3864,22 @@ dependencies = [ "safe_arith", ] +[[package]] +name = "mev-build-rs" +version = "0.2.0" +source = "git+https://github.com/ralexstokes/mev-rs?tag=v0.2.0#921fa3f7c3497839461964a5297dfe4f2cef3136" +dependencies = [ + "async-trait", + "axum", + "beacon-api-client", + "ethereum-consensus", + "serde", + "serde_json", + "ssz-rs", + "thiserror", + "tracing", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -4035,9 +4173,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ "bitflags", "cfg-if", @@ -4162,9 +4300,9 @@ dependencies = [ [[package]] name = "object" -version = "0.28.4" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "memchr", ] @@ -4189,9 +4327,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.40" +version = "0.10.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" dependencies = [ "bitflags", "cfg-if", @@ -4230,9 +4368,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.74" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835363342df5fba8354c5b453325b110ffd54044e588c539cf2f20a8014e4cb1" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ "autocfg 1.1.0", "cc", @@ -4293,7 +4431,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" dependencies = [ "arrayvec", - "bitvec 1.0.0", + "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive 3.1.3", @@ -4404,9 +4542,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ "base64", ] @@ -4419,10 +4557,11 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pest" -version = "2.1.3" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "69486e2b8c2d2aeb9762db7b4e00b0331156393555cff467f4163ff06821eef8" dependencies = [ + "thiserror", "ucd-trie", ] @@ -4539,9 +4678,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" +checksum = "9428003b84df1496fb9d6eeee9c5f8145cb41ca375eb0dad204328888832811f" dependencies = [ "num-traits", "plotters-backend", @@ -4552,15 +4691,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" +checksum = "e0918736323d1baff32ee0eade54984f6f201ad7e97d5cfb5d6ab4a358529615" dependencies = [ "plotters-backend", ] @@ -4662,9 +4801,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.40" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" +checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b" dependencies = [ "unicode-ident", ] @@ -5065,9 +5204,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -5330,9 +5469,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf" +checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" [[package]] name = "rw-stream-sink" @@ -5562,9 +5701,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.138" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" +checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03" dependencies = [ "serde_derive", ] @@ -5591,9 +5730,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.138" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" +checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da" dependencies = [ "proc-macro2", "quote", @@ -5658,9 +5797,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.24" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ "indexmap", "ryu", @@ -5804,9 +5943,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg 1.1.0", +] [[package]] name = "slasher" @@ -6063,6 +6205,31 @@ dependencies = [ "der 0.5.1", ] +[[package]] +name = "ssz-rs" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +dependencies = [ + "bitvec 1.0.1", + "hex", + "lazy_static", + "num-bigint", + "serde", + "sha2 0.9.9", + "ssz-rs-derive", + "thiserror", +] + +[[package]] +name = "ssz-rs-derive" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -6211,6 +6378,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synstructure" version = "0.12.6" @@ -6453,10 +6626,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" dependencies = [ + "autocfg 1.1.0", "bytes", "libc", "memchr", @@ -6551,16 +6725,16 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.17.1" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", "rustls 0.20.6", "tokio", "tokio-rustls 0.23.4", - "tungstenite 0.17.2", + "tungstenite 0.17.3", "webpki 0.22.0", "webpki-roots", ] @@ -6604,6 +6778,47 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project 1.0.11", + "pin-project-lite 0.2.9", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite 0.2.9", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.2" @@ -6667,9 +6882,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a713421342a5a666b7577783721d3117f1b69a393df803ee17bb73b1e122a59" +checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" dependencies = [ "ansi_term", "matchers", @@ -6798,9 +7013,9 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", @@ -6922,9 +7137,9 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" [[package]] name = "unicode-normalization" @@ -7210,9 +7425,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -7220,13 +7435,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -7235,9 +7450,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.31" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" +checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad" dependencies = [ "cfg-if", "js-sys", @@ -7247,9 +7462,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7257,9 +7472,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ "proc-macro2", "quote", @@ -7270,15 +7485,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" +checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" [[package]] name = "wasm-bindgen-test" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b30cf2cba841a812f035c40c50f53eb9c56181192a9dd2c71b65e6a87a05ba" +checksum = "513df541345bb9fcc07417775f3d51bbb677daf307d8035c0afafd87dc2e6599" dependencies = [ "console_error_panic_hook", "js-sys", @@ -7290,9 +7505,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad594bf33e73cafcac2ae9062fc119d4f75f9c77e25022f91c9a64bd5b6463" +checksum = "6150d36a03e90a3cf6c12650be10626a9902d70c5270fd47d7a47e5389a10d56" dependencies = [ "proc-macro2", "quote", @@ -7315,9 +7530,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" +checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -7417,9 +7632,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ "webpki 0.22.0", ] @@ -7635,9 +7850,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 4c7140df39..c581866a25 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -280,6 +280,8 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin password_opt, graffiti, suggested_fee_recipient, + None, + None, ) .map_err(|e| format!("Unable to create new validator definition: {:?}", e))?; diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index c8b82e3d28..092f3064d5 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -62,6 +62,7 @@ sensitive_url = { path = "../../common/sensitive_url" } superstruct = "0.5.0" hex = "0.4.2" exit-future = "0.2.0" +unused_port = {path = "../../common/unused_port"} [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2e944f2939..326d8b6c67 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -53,7 +53,9 @@ use crate::BeaconForkChoiceStore; use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; use eth2::types::{EventKind, SseBlock, SyncDuty}; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use execution_layer::{ + BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, +}; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, @@ -3315,10 +3317,21 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; - let pubkey_opt = state + let pubkey = state .validators() .get(proposer_index as usize) - .map(|v| v.pubkey); + .map(|v| v.pubkey) + .ok_or(BlockProductionError::BeaconChain( + BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), + ))?; + + let builder_params = BuilderParams { + pubkey, + slot: state.slot(), + chain_health: self + .is_healthy() + .map_err(BlockProductionError::BeaconChain)?, + }; // If required, start the process of loading an execution payload from the EL early. This // allows it to run concurrently with things like attestation packing. @@ -3326,7 +3339,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { BeaconState::Base(_) | BeaconState::Altair(_) => None, BeaconState::Merge(_) => { let prepare_payload_handle = - get_execution_payload(self.clone(), &state, proposer_index, pubkey_opt)?; + get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; Some(prepare_payload_handle) } }; @@ -4539,6 +4552,74 @@ impl<T: BeaconChainTypes> BeaconChain<T> { .map(|duration| (fork_name, duration)) } + /// This method serves to get a sense of the current chain health. It is used in block proposal + /// to determine whether we should outsource payload production duties. + /// + /// Since we are likely calling this during the slot we are going to propose in, don't take into + /// account the current slot when accounting for skips. + pub fn is_healthy(&self) -> Result<ChainHealth, Error> { + // Check if the merge has been finalized. + if let Some(finalized_hash) = self + .canonical_head + .cached_head() + .forkchoice_update_parameters() + .finalized_hash + { + if ExecutionBlockHash::zero() == finalized_hash { + return Ok(ChainHealth::PreMerge); + } + } else { + return Ok(ChainHealth::PreMerge); + }; + + if self.config.builder_fallback_disable_checks { + return Ok(ChainHealth::Healthy); + } + + let current_slot = self.slot()?; + + // Check slots at the head of the chain. + let prev_slot = current_slot.saturating_sub(Slot::new(1)); + let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); + let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; + + // Check if finalization is advancing. + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let epochs_since_finalization = current_epoch.saturating_sub( + self.canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, + ); + let finalization_check = epochs_since_finalization.as_usize() + <= self.config.builder_fallback_epochs_since_finalization; + + // Check skip slots in the last `SLOTS_PER_EPOCH`. + let start_slot = current_slot.saturating_sub(T::EthSpec::slots_per_epoch()); + let mut epoch_skips = 0; + for slot in start_slot.as_u64()..current_slot.as_u64() { + if self + .block_root_at_slot_skips_none(Slot::new(slot))? + .is_none() + { + epoch_skips += 1; + } + } + let epoch_skips_check = epoch_skips <= self.config.builder_fallback_skips_per_epoch; + + if !head_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::Skips)) + } else if !finalization_check { + Ok(ChainHealth::Unhealthy( + FailedCondition::EpochsSinceFinalization, + )) + } else if !epoch_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::SkipsPerEpoch)) + } else { + Ok(ChainHealth::Healthy) + } + } + pub fn dump_as_dot<W: Write>(&self, output: &mut W) { let canonical_head_hash = self.canonical_head.cached_head().head_block_root(); let mut visited: HashSet<Hash256> = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index d5e3d19814..2c43ca53ed 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -24,6 +24,16 @@ pub struct ChainConfig { /// /// If set to 0 then block proposal will not wait for fork choice at all. pub fork_choice_before_proposal_timeout_ms: u64, + /// Number of skip slots in a row before the BN refuses to use connected builders during payload construction. + pub builder_fallback_skips: usize, + /// Number of skip slots in the past `SLOTS_PER_EPOCH` before the BN refuses to use connected + /// builders during payload construction. + pub builder_fallback_skips_per_epoch: usize, + /// Number of epochs since finalization before the BN refuses to use connected builders during + /// payload construction. + pub builder_fallback_epochs_since_finalization: usize, + /// Whether any chain health checks should be considered when deciding whether to use the builder API. + pub builder_fallback_disable_checks: bool, pub count_unrealized: bool, } @@ -36,6 +46,11 @@ impl Default for ChainConfig { enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, + // Builder fallback configs that are set in `clap` will override these. + builder_fallback_skips: 3, + builder_fallback_skips_per_epoch: 8, + builder_fallback_epochs_since_finalization: 3, + builder_fallback_disable_checks: false, count_unrealized: false, } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 189cb3fdea..604fb6bea3 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -138,6 +138,7 @@ pub enum BeaconChainError { new_slot: Slot, }, AltairForkDisabled, + BuilderMissing, ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 81193563cb..fade47e1d3 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -11,7 +11,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::PayloadStatus; +use execution_layer::{BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -303,12 +303,11 @@ pub fn get_execution_payload< chain: Arc<BeaconChain<T>>, state: &BeaconState<T::EthSpec>, proposer_index: u64, - pubkey: Option<PublicKeyBytes>, + builder_params: BuilderParams, ) -> Result<PreparePayloadHandle<Payload>, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; - let slot = state.slot(); let current_epoch = state.current_epoch(); let is_merge_transition_complete = is_merge_transition_complete(state); let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; @@ -325,13 +324,12 @@ pub fn get_execution_payload< async move { prepare_execution_payload::<T, Payload>( &chain, - slot, is_merge_transition_complete, timestamp, random, proposer_index, - pubkey, latest_execution_payload_header_block_hash, + builder_params, ) .await }, @@ -359,19 +357,18 @@ pub fn get_execution_payload< #[allow(clippy::too_many_arguments)] pub async fn prepare_execution_payload<T, Payload>( chain: &Arc<BeaconChain<T>>, - slot: Slot, is_merge_transition_complete: bool, timestamp: u64, random: Hash256, proposer_index: u64, - pubkey: Option<PublicKeyBytes>, latest_execution_payload_header_block_hash: ExecutionBlockHash, + builder_params: BuilderParams, ) -> Result<Payload, BlockProductionError> where T: BeaconChainTypes, Payload: ExecPayload<T::EthSpec> + Default, { - let current_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; let execution_layer = chain .execution_layer @@ -432,9 +429,9 @@ where timestamp, random, proposer_index, - pubkey, - slot, forkchoice_update_params, + builder_params, + &chain.spec, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9cb734f2a0..57a1da9dc6 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -15,7 +15,7 @@ mod early_attester_cache; mod errors; pub mod eth1_chain; pub mod events; -mod execution_payload; +pub mod execution_payload; pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1f19465c08..6771861dfd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -14,7 +14,9 @@ use bls::get_withdrawal_credentials; use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ auth::JwtKey, - test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, + test_utils::{ + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + }, ExecutionLayer, }; use fork_choice::CountUnrealized; @@ -154,6 +156,7 @@ pub struct Builder<T: BeaconChainTypes> { store_mutator: Option<BoxedMutator<T::EthSpec, T::HotStore, T::ColdStore>>, execution_layer: Option<ExecutionLayer<T::EthSpec>>, mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, + mock_builder: Option<TestingBuilder<T::EthSpec>>, runtime: TestRuntime, log: Logger, } @@ -285,6 +288,7 @@ where store_mutator: None, execution_layer: None, mock_execution_layer: None, + mock_builder: None, runtime, log, } @@ -388,6 +392,38 @@ where self } + pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + + let spec = self.spec.clone().expect("cannot build without spec"); + let mock_el = MockExecutionLayer::new( + self.runtime.task_executor.clone(), + spec.terminal_total_difficulty, + DEFAULT_TERMINAL_BLOCK, + spec.terminal_block_hash, + spec.terminal_block_hash_activation_epoch, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + Some(builder_url.clone()), + ) + .move_to_terminal_block(); + + let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); + + self.mock_builder = Some(TestingBuilder::new( + mock_el_url, + builder_url, + beacon_url, + spec, + self.runtime.task_executor.clone(), + )); + self.execution_layer = Some(mock_el.el.clone()); + self.mock_execution_layer = Some(mock_el); + + self + } + /// Instruct the mock execution engine to always return a "valid" response to any payload it is /// asked to execute. pub fn mock_execution_layer_all_payloads_valid(self) -> Self { @@ -456,6 +492,7 @@ where shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, + mock_builder: self.mock_builder.map(Arc::new), rng: make_rng(), } } @@ -474,6 +511,7 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> { pub runtime: TestRuntime, pub mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, + pub mock_builder: Option<Arc<TestingBuilder<T::EthSpec>>>, pub rng: Mutex<StdRng>, } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 500f5aa9ff..3517d06b15 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,4 +1,3 @@ -use eth2::ok_or_error; use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, @@ -6,23 +5,33 @@ use eth2::types::{ Slot, }; pub use eth2::Error; +use eth2::{ok_or_error, StatusCode}; use reqwest::{IntoUrl, Response}; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde::Serialize; use std::time::Duration; -pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 500; +pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; + +/// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). +pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; #[derive(Clone)] pub struct Timeouts { get_header: Duration, + post_validators: Duration, + post_blinded_blocks: Duration, + get_builder_status: Duration, } impl Default for Timeouts { fn default() -> Self { Self { get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + post_validators: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + post_blinded_blocks: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + get_builder_status: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), } } } @@ -51,14 +60,6 @@ impl BuilderHttpClient { }) } - async fn get<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<T, Error> { - self.get_response_with_timeout(url, None) - .await? - .json() - .await - .map_err(Error::Reqwest) - } - async fn get_with_timeout<T: DeserializeOwned, U: IntoUrl>( &self, url: U, @@ -104,14 +105,13 @@ impl BuilderHttpClient { &self, url: U, body: &T, + timeout: Option<Duration>, ) -> Result<Response, Error> { - let response = self - .client - .post(url) - .json(body) - .send() - .await - .map_err(Error::Reqwest)?; + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.json(body).send().await.map_err(Error::Reqwest)?; ok_or_error(response).await } @@ -129,7 +129,8 @@ impl BuilderHttpClient { .push("builder") .push("validators"); - self.post_generic(path, &validator, None).await?; + self.post_generic(path, &validator, Some(self.timeouts.post_validators)) + .await?; Ok(()) } @@ -148,7 +149,11 @@ impl BuilderHttpClient { .push("blinded_blocks"); Ok(self - .post_with_raw_response(path, &blinded_block) + .post_with_raw_response( + path, + &blinded_block, + Some(self.timeouts.post_blinded_blocks), + ) .await? .json() .await?) @@ -160,7 +165,7 @@ impl BuilderHttpClient { slot: Slot, parent_hash: ExecutionBlockHash, pubkey: &PublicKeyBytes, - ) -> Result<ForkVersionedResponse<SignedBuilderBid<E, Payload>>, Error> { + ) -> Result<Option<ForkVersionedResponse<SignedBuilderBid<E, Payload>>>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -173,7 +178,13 @@ impl BuilderHttpClient { .push(format!("{parent_hash:?}").as_str()) .push(pubkey.as_hex_string().as_str()); - self.get_with_timeout(path, self.timeouts.get_header).await + let resp = self.get_with_timeout(path, self.timeouts.get_header).await; + + if matches!(resp, Err(Error::StatusCode(StatusCode::NO_CONTENT))) { + Ok(None) + } else { + resp.map(Some) + } } /// `GET /eth/v1/builder/status` @@ -187,6 +198,7 @@ impl BuilderHttpClient { .push("builder") .push("status"); - self.get(path).await + self.get_with_timeout(path, self.timeouts.get_builder_status) + .await } } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 26e4ba52ef..83f9454f8a 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -40,3 +40,7 @@ lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } +mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", tag = "v0.2.0"} +ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus"} +ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs"} + diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 0316cf3993..9ed38b61b0 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,9 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayloadHeader, FixedVector, Transaction, Unsigned, - VariableList, -}; +use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -432,59 +429,6 @@ impl From<ForkchoiceUpdatedResponse> for JsonForkchoiceUpdatedV1Response { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonProposeBlindedBlockResponseStatus { - Valid, - Invalid, - Syncing, -} -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(bound = "E: EthSpec")] -pub struct JsonProposeBlindedBlockResponse<E: EthSpec> { - pub result: ExecutionPayload<E>, - pub error: Option<String>, -} - -impl<E: EthSpec> From<JsonProposeBlindedBlockResponse<E>> for ExecutionPayload<E> { - fn from(j: JsonProposeBlindedBlockResponse<E>) -> Self { - let JsonProposeBlindedBlockResponse { result, error: _ } = j; - result - } -} - -impl From<JsonProposeBlindedBlockResponseStatus> for ProposeBlindedBlockResponseStatus { - fn from(j: JsonProposeBlindedBlockResponseStatus) -> Self { - match j { - JsonProposeBlindedBlockResponseStatus::Valid => { - ProposeBlindedBlockResponseStatus::Valid - } - JsonProposeBlindedBlockResponseStatus::Invalid => { - ProposeBlindedBlockResponseStatus::Invalid - } - JsonProposeBlindedBlockResponseStatus::Syncing => { - ProposeBlindedBlockResponseStatus::Syncing - } - } - } -} -impl From<ProposeBlindedBlockResponseStatus> for JsonProposeBlindedBlockResponseStatus { - fn from(f: ProposeBlindedBlockResponseStatus) -> Self { - match f { - ProposeBlindedBlockResponseStatus::Valid => { - JsonProposeBlindedBlockResponseStatus::Valid - } - ProposeBlindedBlockResponseStatus::Invalid => { - JsonProposeBlindedBlockResponseStatus::Invalid - } - ProposeBlindedBlockResponseStatus::Syncing => { - JsonProposeBlindedBlockResponseStatus::Syncing - } - } - } -} - #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5b82018749..aea952a57d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,6 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. +use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; use engine_api::Error as ApiError; @@ -31,13 +32,14 @@ use tokio::{ time::sleep, }; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, }; mod engine_api; mod engines; mod metrics; +pub mod payload_cache; mod payload_status; pub mod test_utils; @@ -69,6 +71,7 @@ pub enum Error { NoPayloadBuilder, ApiError(ApiError), Builder(builder_client::Error), + NoHeaderFromBuilder, EngineError(Box<EngineError>), NotSynced, ShuttingDown, @@ -101,6 +104,26 @@ pub struct Proposer { payload_attributes: PayloadAttributes, } +/// Information from the beacon chain that is necessary for querying the builder API. +pub struct BuilderParams { + pub pubkey: PublicKeyBytes, + pub slot: Slot, + pub chain_health: ChainHealth, +} + +pub enum ChainHealth { + Healthy, + Unhealthy(FailedCondition), + PreMerge, +} + +#[derive(Debug)] +pub enum FailedCondition { + Skips, + SkipsPerEpoch, + EpochsSinceFinalization, +} + struct Inner<E: EthSpec> { engine: Arc<Engine>, builder: Option<BuilderHttpClient>, @@ -110,7 +133,7 @@ struct Inner<E: EthSpec> { execution_blocks: Mutex<LruCache<ExecutionBlockHash, ExecutionBlock>>, proposers: RwLock<HashMap<ProposerKey, Proposer>>, executor: TaskExecutor, - phantom: std::marker::PhantomData<E>, + payload_cache: PayloadCache<E>, log: Logger, } @@ -212,7 +235,7 @@ impl<T: EthSpec> ExecutionLayer<T> { proposers: RwLock::new(HashMap::new()), execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, - phantom: std::marker::PhantomData, + payload_cache: PayloadCache::default(), log, }; @@ -231,6 +254,16 @@ impl<T: EthSpec> ExecutionLayer<T> { &self.inner.builder } + /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. + fn cache_payload(&self, payload: &ExecutionPayload<T>) -> Option<ExecutionPayload<T>> { + self.inner.payload_cache.put(payload.clone()) + } + + /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + pub fn get_payload_by_root(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { + self.inner.payload_cache.pop(root) + } + pub fn executor(&self) -> &TaskExecutor { &self.inner.executor } @@ -487,9 +520,9 @@ impl<T: EthSpec> ExecutionLayer<T> { timestamp: u64, prev_randao: Hash256, proposer_index: u64, - pubkey: Option<PublicKeyBytes>, - slot: Slot, forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, ) -> Result<Payload, Error> { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -504,9 +537,9 @@ impl<T: EthSpec> ExecutionLayer<T> { timestamp, prev_randao, suggested_fee_recipient, - pubkey, - slot, forkchoice_update_params, + builder_params, + spec, ) .await } @@ -534,36 +567,137 @@ impl<T: EthSpec> ExecutionLayer<T> { timestamp: u64, prev_randao: Hash256, suggested_fee_recipient: Address, - pubkey_opt: Option<PublicKeyBytes>, - slot: Slot, forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, ) -> Result<Payload, Error> { - //FIXME(sean) fallback logic included in PR #3134 + if let Some(builder) = self.builder() { + let slot = builder_params.slot; + let pubkey = builder_params.pubkey; - // Don't attempt to outsource payload construction until after the merge transition has been - // finalized. We want to be conservative with payload construction until then. - if let (Some(builder), Some(pubkey)) = (self.builder(), pubkey_opt) { - if forkchoice_update_params - .finalized_hash - .map_or(false, |finalized_block_hash| { - finalized_block_hash != ExecutionBlockHash::zero() - }) - { - info!( - self.log(), - "Requesting blinded header from connected builder"; - "slot" => ?slot, - "pubkey" => ?pubkey, - "parent_hash" => ?parent_hash, - ); - return builder - .get_builder_header::<T, Payload>(slot, parent_hash, &pubkey) - .await - .map(|d| d.data.message.header) - .map_err(Error::Builder); + match builder_params.chain_health { + ChainHealth::Healthy => { + info!( + self.log(), + "Requesting blinded header from connected builder"; + "slot" => ?slot, + "pubkey" => ?pubkey, + "parent_hash" => ?parent_hash, + ); + let (relay_result, local_result) = tokio::join!( + builder.get_builder_header::<T, Payload>(slot, parent_hash, &pubkey), + self.get_full_payload_caching( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + ); + + return match (relay_result, local_result) { + (Err(e), Ok(local)) => { + warn!( + self.log(), + "Unable to retrieve a payload from a connected \ + builder, falling back to the local execution client: {e:?}" + ); + Ok(local) + } + (Ok(None), Ok(local)) => { + warn!( + self.log(), + "No payload provided by connected builder. \ + Attempting to propose through local execution engine" + ); + Ok(local) + } + (Ok(Some(relay)), Ok(local)) => { + let is_signature_valid = relay.data.verify_signature(spec); + let header = relay.data.message.header; + + info!( + self.log(), + "Received a payload header from the connected builder"; + "block_hash" => ?header.block_hash(), + ); + + if header.parent_hash() != parent_hash { + warn!( + self.log(), + "Invalid parent hash from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.prev_randao() != prev_randao { + warn!( + self.log(), + "Invalid prev randao from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.timestamp() != local.timestamp() { + warn!( + self.log(), + "Invalid timestamp from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.block_number() != local.block_number() { + warn!( + self.log(), + "Invalid block number from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if !matches!(relay.version, Some(ForkName::Merge)) { + // Once fork information is added to the payload, we will need to + // check that the local and relay payloads match. At this point, if + // we are requesting a payload at all, we have to assume this is + // the Bellatrix fork. + warn!( + self.log(), + "Invalid fork from connected builder, falling \ + back to local execution engine." + ); + Ok(local) + } else if !is_signature_valid { + let pubkey_bytes = relay.data.message.pubkey; + warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \ + bid from connected builder, falling back to local execution engine."); + Ok(local) + } else { + if header.fee_recipient() != suggested_fee_recipient { + info!( + self.log(), + "Fee recipient from connected builder does \ + not match, using it anyways." + ); + } + Ok(header) + } + } + (relay_result, Err(local_error)) => { + warn!(self.log(), "Failure from local execution engine. Attempting to \ + propose through connected builder"; "error" => ?local_error); + relay_result + .map_err(Error::Builder)? + .ok_or(Error::NoHeaderFromBuilder) + .map(|d| d.data.message.header) + } + }; + } + ChainHealth::Unhealthy(condition) => { + info!(self.log(), "Due to poor chain health the local execution engine will be used \ + for payload construction. To adjust chain health conditions \ + Use `builder-fallback` prefixed flags"; + "failed_condition" => ?condition) + } + // Intentional no-op, so we never attempt builder API proposals pre-merge. + ChainHealth::PreMerge => (), } } - self.get_full_payload::<Payload>( + self.get_full_payload_caching( parent_hash, timestamp, prev_randao, @@ -593,6 +727,26 @@ impl<T: EthSpec> ExecutionLayer<T> { .await } + /// Get a full payload and cache its result in the execution layer's payload cache. + async fn get_full_payload_caching<Payload: ExecPayload<T>>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, + ) -> Result<Payload, Error> { + self.get_full_payload_with( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + Self::cache_payload, + ) + .await + } + async fn get_full_payload_with<Payload: ExecPayload<T>>( &self, parent_hash: ExecutionBlockHash, diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs new file mode 100644 index 0000000000..60a8f2a95c --- /dev/null +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -0,0 +1,33 @@ +use lru::LruCache; +use parking_lot::Mutex; +use tree_hash::TreeHash; +use types::{EthSpec, ExecutionPayload, Hash256}; + +pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; + +/// A cache mapping execution payloads by tree hash roots. +pub struct PayloadCache<T: EthSpec> { + payloads: Mutex<LruCache<PayloadCacheId, ExecutionPayload<T>>>, +} + +#[derive(Hash, PartialEq, Eq)] +struct PayloadCacheId(Hash256); + +impl<T: EthSpec> Default for PayloadCache<T> { + fn default() -> Self { + PayloadCache { + payloads: Mutex::new(LruCache::new(DEFAULT_PAYLOAD_CACHE_SIZE)), + } + } +} + +impl<T: EthSpec> PayloadCache<T> { + pub fn put(&self, payload: ExecutionPayload<T>) -> Option<ExecutionPayload<T>> { + let root = payload.tree_hash_root(); + self.payloads.lock().put(PayloadCacheId(root), payload) + } + + pub fn pop(&self, root: &Hash256) -> Option<ExecutionPayload<T>> { + self.payloads.lock().pop(&PayloadCacheId(*root)) + } +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs new file mode 100644 index 0000000000..6b565cb3d8 --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -0,0 +1,383 @@ +use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::{Config, ExecutionLayer, PayloadAttributes}; +use async_trait::async_trait; +use eth2::types::{BlockId, StateId, ValidatorId}; +use eth2::{BeaconNodeHttpClient, Timeouts}; +use ethereum_consensus::crypto::{SecretKey, Signature}; +use ethereum_consensus::primitives::BlsPublicKey; +pub use ethereum_consensus::state_transition::Context; +use fork_choice::ForkchoiceUpdateParameters; +use mev_build_rs::{ + sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, + BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, + ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, + SignedValidatorRegistration, +}; +use parking_lot::RwLock; +use sensitive_url::SensitiveUrl; +use ssz::{Decode, Encode}; +use ssz_rs::{Merkleized, SimpleSerialize}; +use std::collections::HashMap; +use std::fmt::Debug; +use std::net::Ipv4Addr; +use std::sync::Arc; +use std::time::Duration; +use task_executor::TaskExecutor; +use tempfile::NamedTempFile; +use tree_hash::TreeHash; +use types::{ + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, +}; + +#[derive(Clone)] +pub enum Operation { + FeeRecipient(Address), + GasLimit(usize), + Value(usize), + ParentHash(Hash256), + PrevRandao(Hash256), + BlockNumber(usize), + Timestamp(usize), +} + +impl Operation { + fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + match self { + Operation::FeeRecipient(fee_recipient) => { + bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + } + Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, + Operation::Value(value) => bid.value = to_ssz_rs(&Uint256::from(value))?, + Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, + Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + } + Ok(()) + } +} + +pub struct TestingBuilder<E: EthSpec> { + server: BlindedBlockProviderServer<MockBuilder<E>>, + pub builder: MockBuilder<E>, +} + +impl<E: EthSpec> TestingBuilder<E> { + pub fn new( + mock_el_url: SensitiveUrl, + builder_url: SensitiveUrl, + beacon_url: SensitiveUrl, + spec: ChainSpec, + executor: TaskExecutor, + ) -> Self { + let file = NamedTempFile::new().unwrap(); + let path = file.path().into(); + std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); + + // This EL should not talk to a builder + let config = Config { + execution_endpoints: vec![mock_el_url], + secret_files: vec![path], + suggested_fee_recipient: None, + ..Default::default() + }; + + let el = + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + + // This should probably be done for all fields, we only update ones we are testing with so far. + let mut context = Context::for_mainnet(); + context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); + context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); + context.terminal_block_hash_activation_epoch = + to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); + + let builder = MockBuilder::new( + el, + BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), + spec, + context, + ); + let port = builder_url.full.port().unwrap(); + let host: Ipv4Addr = builder_url + .full + .host_str() + .unwrap() + .to_string() + .parse() + .unwrap(); + let server = BlindedBlockProviderServer::new(host, port, builder.clone()); + Self { server, builder } + } + + pub async fn run(&self) { + self.server.run().await + } +} + +#[derive(Clone)] +pub struct MockBuilder<E: EthSpec> { + el: ExecutionLayer<E>, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Arc<Context>, + val_registration_cache: Arc<RwLock<HashMap<BlsPublicKey, SignedValidatorRegistration>>>, + builder_sk: SecretKey, + operations: Arc<RwLock<Vec<Operation>>>, + invalidate_signatures: Arc<RwLock<bool>>, +} + +impl<E: EthSpec> MockBuilder<E> { + pub fn new( + el: ExecutionLayer<E>, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Context, + ) -> Self { + let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + Self { + el, + beacon_client, + // Should keep spec and context consistent somehow + spec, + context: Arc::new(context), + val_registration_cache: Arc::new(RwLock::new(HashMap::new())), + builder_sk: sk, + operations: Arc::new(RwLock::new(vec![])), + invalidate_signatures: Arc::new(RwLock::new(false)), + } + } + + pub fn add_operation(&self, op: Operation) { + self.operations.write().push(op); + } + + pub fn invalid_signatures(&self) { + *self.invalidate_signatures.write() = true; + } + + pub fn valid_signatures(&mut self) { + *self.invalidate_signatures.write() = false; + } + + fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + let mut guard = self.operations.write(); + while let Some(op) = guard.pop() { + op.apply(bid)?; + } + Ok(()) + } +} + +#[async_trait] +impl<E: EthSpec> mev_build_rs::BlindedBlockProvider for MockBuilder<E> { + async fn register_validators( + &self, + registrations: &mut [SignedValidatorRegistration], + ) -> Result<(), BlindedBlockProviderError> { + for registration in registrations { + let pubkey = registration.message.public_key.clone(); + let message = &mut registration.message; + verify_signed_builder_message( + message, + ®istration.signature, + &pubkey, + &self.context, + )?; + self.val_registration_cache.write().insert( + registration.message.public_key.clone(), + registration.clone(), + ); + } + + Ok(()) + } + + async fn fetch_best_bid( + &self, + bid_request: &BidRequest, + ) -> Result<SignedBuilderBid, BlindedBlockProviderError> { + let slot = Slot::new(bid_request.slot); + let signed_cached_data = self + .val_registration_cache + .read() + .get(&bid_request.public_key) + .ok_or_else(|| convert_err("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; + + let head = self + .beacon_client + .get_beacon_blocks::<E>(BlockId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing head block"))?; + + let block = head.data.message_merge().map_err(convert_err)?; + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { + return Err(BlindedBlockProviderError::Custom(format!( + "head mismatch: {} {}", + head_execution_hash, bid_request.parent_hash + ))); + } + + let finalized_execution_hash = self + .beacon_client + .get_beacon_blocks::<E>(BlockId::Finalized) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let justified_execution_hash = self + .beacon_client + .get_beacon_blocks::<E>(BlockId::Justified) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let val_index = self + .beacon_client + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), + ) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing validator from state"))? + .data + .index; + let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; + let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + + let genesis_time = self + .beacon_client + .get_beacon_genesis() + .await + .map_err(convert_err)? + .data + .genesis_time; + let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + + let head_state: BeaconState<E> = self + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(convert_err)?; + + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao: *prev_randao, + suggested_fee_recipient: fee_recipient, + }; + + self.el + .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .await; + + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; + + let payload = self + .el + .get_full_payload_caching::<BlindedPayload<E>>( + head_execution_hash, + timestamp, + *prev_randao, + fee_recipient, + forkchoice_update_params, + ) + .await + .map_err(convert_err)? + .to_execution_payload_header(); + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + let mut header: ServerPayloadHeader = + serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; + + header.gas_limit = cached_data.gas_limit; + + let mut message = BuilderBid { + header, + value: ssz_rs::U256::default(), + public_key: self.builder_sk.public_key(), + }; + + self.apply_operations(&mut message)?; + + let mut signature = + sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + + if *self.invalidate_signatures.read() { + signature = Signature::default(); + } + + let signed_bid = SignedBuilderBid { message, signature }; + Ok(signed_bid) + } + + async fn open_bid( + &self, + signed_block: &mut SignedBlindedBeaconBlock, + ) -> Result<ServerPayload, BlindedBlockProviderError> { + let payload = self + .el + .get_payload_by_root(&from_ssz_rs( + &signed_block + .message + .body + .execution_payload_header + .hash_tree_root() + .map_err(convert_err)?, + )?) + .ok_or_else(|| convert_err("missing payload for tx root"))?; + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + serde_json::from_str(json_payload.as_str()).map_err(convert_err) + } +} + +pub fn from_ssz_rs<T: SimpleSerialize, U: Decode>( + ssz_rs_data: &T, +) -> Result<U, BlindedBlockProviderError> { + U::from_ssz_bytes( + ssz_rs::serialize(ssz_rs_data) + .map_err(convert_err)? + .as_ref(), + ) + .map_err(convert_err) +} + +pub fn to_ssz_rs<T: Encode, U: SimpleSerialize>( + ssz_data: &T, +) -> Result<U, BlindedBlockProviderError> { + ssz_rs::deserialize::<U>(&ssz_data.as_ssz_bytes()).map_err(convert_err) +} + +fn convert_err<E: Debug>(e: E) -> BlindedBlockProviderError { + BlindedBlockProviderError::Custom(format!("{e:?}")) +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 8a5c26fe8d..cab2367cd0 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -7,6 +7,7 @@ use crate::{ use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; +use tree_hash::TreeHash; use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; pub struct MockExecutionLayer<T: EthSpec> { @@ -124,6 +125,11 @@ impl<T: EthSpec> MockExecutionLayer<T> { .unwrap(); let validator_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; let payload = self .el .get_payload::<FullPayload<T>>( @@ -131,9 +137,9 @@ impl<T: EthSpec> MockExecutionLayer<T> { timestamp, prev_randao, validator_index, - None, - slot, forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -144,6 +150,43 @@ impl<T: EthSpec> MockExecutionLayer<T> { assert_eq!(payload.timestamp, timestamp); assert_eq!(payload.prev_randao, prev_randao); + // Ensure the payload cache is empty. + assert!(self + .el + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; + let payload_header = self + .el + .get_payload::<BlindedPayload<T>>( + parent_hash, + timestamp, + prev_randao, + validator_index, + forkchoice_update_params, + builder_params, + &self.spec, + ) + .await + .unwrap() + .execution_payload_header; + assert_eq!(payload_header.block_hash, block_hash); + assert_eq!(payload_header.parent_hash, parent_hash); + assert_eq!(payload_header.block_number, block_number); + assert_eq!(payload_header.timestamp, timestamp); + assert_eq!(payload_header.prev_randao, prev_randao); + + // Ensure the payload cache has the correct payload. + assert_eq!( + self.el + .get_payload_by_root(&payload_header.tree_hash_root()), + Some(payload.clone()) + ); + let status = self.el.notify_new_payload(&payload).await.unwrap(); assert_eq!(status, PayloadStatus::Valid); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 970c619a56..2463153951 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,6 +22,7 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; @@ -30,6 +31,7 @@ pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; mod execution_block_generator; mod handle_rpc; +mod mock_builder; mod mock_execution_layer; /// Configuration for the MockExecutionLayer. diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 5cc703aa1a..fedd66c540 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -32,15 +32,16 @@ parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" +tree_hash = "0.4.1" [dev-dependencies] store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } +unused_port = {path = "../../common/unused_port"} [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index c2503f392f..a1b23c7f03 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,17 +13,16 @@ mod block_rewards; mod database; mod metrics; mod proposer_duties; +mod publish_blocks; mod state_id; mod sync_committees; mod validator_inclusion; mod version; use beacon_chain::{ - attestation_verification::VerifiedAttestation, - observed_operations::ObservationOutcome, - validator_monitor::{get_block_delay_ms, timestamp_now}, - AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - CountUnrealized, ProduceBlockVerification, WhenSlotSkipped, + attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, + validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, + BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; @@ -45,12 +44,11 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttesterSlashing, BeaconBlockBodyMerge, BeaconBlockMerge, BeaconStateError, - BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + Attestation, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, + ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, + ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedBlindedBeaconBlock, SignedContributionAndProof, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use version::{ add_consensus_version_header, execution_optimistic_fork_versioned_response, @@ -1025,81 +1023,9 @@ pub fn serve<T: BeaconChainTypes>( chain: Arc<BeaconChain<T>>, network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, log: Logger| async move { - let seen_timestamp = timestamp_now(); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message(&network_tx, PubsubMessage::BeaconBlock(block.clone()))?; - - // Determine the delay after the start of the slot, register it with metrics. - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); - - match chain - .process_block(block.clone(), CountUnrealized::True) + publish_blocks::publish_block(block, chain, &network_tx, log) .await - { - Ok(root) => { - info!( - log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), - ); - - // Notify the validator monitor. - chain.validator_monitor.read().register_api_block( - seen_timestamp, - block.message(), - root, - &chain.slot_clock, - ); - - // Update the head since it's likely this block will become the new - // head. - chain.recompute_head_at_current_slot().await; - - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= error_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } - - Ok(warp::reply::json(&())) - } - Err(e) => { - let msg = format!("{:?}", e); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } + .map(|()| warp::reply()) }, ); @@ -1117,87 +1043,13 @@ pub fn serve<T: BeaconChainTypes>( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: Arc<SignedBeaconBlock<T::EthSpec, BlindedPayload<_>>>, + |block: SignedBeaconBlock<T::EthSpec, BlindedPayload<_>>, chain: Arc<BeaconChain<T>>, network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, - _log: Logger| async move { - if let Some(el) = chain.execution_layer.as_ref() { - //FIXME(sean): we may not always receive the payload in this response because it - // should be the relay's job to propogate the block. However, since this block is - // already signed and sent this might be ok (so long as the relay validates - // the block before revealing the payload). - - //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should - // be able to support the normal block proposal flow, because at some point full block endpoints - // will be deprecated from the beacon API. This will entail creating full blocks in - // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded - // blocks. We will access the payload of those blocks here. This flow should happen if the - // execution layer has no payload builders or if we have not yet finalized post-merge transition. - let payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { - warp_utils::reject::custom_server_error(format!("proposal failed: {:?}", e)) - })?; - let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { - message: BeaconBlockMerge { - slot: block.message().slot(), - proposer_index: block.message().proposer_index(), - parent_root: block.message().parent_root(), - state_root: block.message().state_root(), - body: BeaconBlockBodyMerge { - randao_reveal: block.message().body().randao_reveal().clone(), - eth1_data: block.message().body().eth1_data().clone(), - graffiti: *block.message().body().graffiti(), - proposer_slashings: block - .message() - .body() - .proposer_slashings() - .clone(), - attester_slashings: block - .message() - .body() - .attester_slashings() - .clone(), - attestations: block.message().body().attestations().clone(), - deposits: block.message().body().deposits().clone(), - voluntary_exits: block.message().body().voluntary_exits().clone(), - sync_aggregate: block - .message() - .body() - .sync_aggregate() - .unwrap() - .clone(), - execution_payload: payload.into(), - }, - }, - signature: block.signature().clone(), - }); - let new_block = Arc::new(new_block); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(new_block.clone()), - )?; - - match chain.process_block(new_block, CountUnrealized::True).await { - Ok(_) => { - // Update the head since it's likely this block will become the new - // head. - chain.recompute_head_at_current_slot().await; - - Ok(warp::reply::json(&())) - } - Err(e) => { - let msg = format!("{:?}", e); - - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } - } else { - Err(warp_utils::reject::custom_server_error( - "no execution layer found".to_string(), - )) - } + log: Logger| async move { + publish_blocks::publish_blinded_block(block, chain, &network_tx, log) + .await + .map(|()| warp::reply()) }, ); @@ -2593,19 +2445,13 @@ pub fn serve<T: BeaconChainTypes>( }) .collect::<Vec<_>>(); - debug!( - log, - "Resolved validator request pubkeys"; - "count" => preparation_data.len() - ); - // Update the prepare beacon proposer cache based on this request. execution_layer .update_proposer_preparation(current_epoch, &preparation_data) .await; // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blined block + // sure we have a local payload to fall back to in the event of the blinded block // flow failing. chain .prepare_beacon_proposer(current_slot) @@ -2617,9 +2463,37 @@ pub fn serve<T: BeaconChainTypes>( )) })?; - //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder + let builder = execution_layer + .builder() + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; - Ok::<_, warp::Rejection>(warp::reply::json(&())) + info!( + log, + "Forwarding register validator request to connected builder"; + "count" => register_val_data.len(), + ); + + builder + .post_builder_validators(®ister_val_data) + .await + .map(|resp| warp::reply::json(&resp)) + .map_err(|e| { + error!(log, "Error from connected relay"; "error" => ?e); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request(message.message); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error(message.message); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) }, ); // POST validator/sync_committee_subscriptions diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs new file mode 100644 index 0000000000..b282e6f490 --- /dev/null +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -0,0 +1,155 @@ +use crate::metrics; +use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; +use beacon_chain::{BeaconChain, BeaconChainTypes, CountUnrealized}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{crit, error, info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tree_hash::TreeHash; +use types::{ + BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, + SignedBeaconBlock, +}; +use warp::Rejection; + +/// Handles a request from the HTTP API for full blocks. +pub async fn publish_block<T: BeaconChainTypes>( + block: Arc<SignedBeaconBlock<T::EthSpec>>, + chain: Arc<BeaconChain<T>>, + network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, + log: Logger, +) -> Result<(), Rejection> { + let seen_timestamp = timestamp_now(); + + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + + match chain + .process_block(block.clone(), CountUnrealized::True) + .await + { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "block_delay" => ?delay, + "root" => format!("{}", root), + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), + ); + + // Notify the validator monitor. + chain.validator_monitor.read().register_api_block( + seen_timestamp, + block.message(), + root, + &chain.slot_clock, + ); + + // Update the head since it's likely this block will become the new + // head. + chain.recompute_head_at_current_slot().await; + + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let error_threshold = crit_threshold / 2; + if delay >= crit_threshold { + crit!( + log, + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } else if delay >= error_threshold { + error!( + log, + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } + + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } +} + +/// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full +/// blocks before publishing. +pub async fn publish_blinded_block<T: BeaconChainTypes>( + block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, + chain: Arc<BeaconChain<T>>, + network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, + log: Logger, +) -> Result<(), Rejection> { + let full_block = reconstruct_block(chain.clone(), block, log.clone()).await?; + publish_block::<T>(Arc::new(full_block), chain, network_tx, log).await +} + +/// Deconstruct the given blinded block, and construct a full block. This attempts to use the +/// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve +/// the full payload. +async fn reconstruct_block<T: BeaconChainTypes>( + chain: Arc<BeaconChain<T>>, + block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, + log: Logger, +) -> Result<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>, Rejection> { + let full_payload = if let Ok(payload_header) = block.message().body().execution_payload() { + let el = chain.execution_layer.as_ref().ok_or_else(|| { + warp_utils::reject::custom_server_error("Missing execution layer".to_string()) + })?; + + // If the execution block hash is zero, use an empty payload. + let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { + ExecutionPayload::default() + // If we already have an execution payload with this transactions root cached, use it. + } else if let Some(cached_payload) = + el.get_payload_by_root(&payload_header.tree_hash_root()) + { + info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); + cached_payload + // Otherwise, this means we are attempting a blind block proposal. + } else { + let full_payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Blind block proposal failed: {:?}", + e + )) + })?; + info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); + full_payload + }; + + Some(full_payload) + } else { + None + }; + + block.try_into_full_block(full_payload).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) + }) +} diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 06466c43bb..8f9856991f 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -86,6 +86,16 @@ impl<E: EthSpec> InteractiveTester<E> { pub async fn create_api_server<T: BeaconChainTypes>( chain: Arc<BeaconChain<T>>, log: Logger, +) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { + // Get a random unused port. + let port = unused_port::unused_tcp_port().unwrap(); + create_api_server_on_port(chain, log, port).await +} + +pub async fn create_api_server_on_port<T: BeaconChainTypes>( + chain: Arc<BeaconChain<T>>, + log: Logger, + port: u16, ) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -129,7 +139,7 @@ pub async fn create_api_server<T: BeaconChainTypes>( config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - listen_port: 0, + listen_port: port, allow_origin: None, serve_legacy_spec: true, tls_config: None, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b4c29cae42..38c06848cf 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,4 @@ -use crate::common::{create_api_server, ApiServer}; +use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -11,6 +11,8 @@ use eth2::{ types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; +use execution_layer::test_utils::Operation; +use execution_layer::test_utils::TestingBuilder; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{BlockId, StateId}; @@ -66,6 +68,7 @@ struct ApiTester { network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>, local_enr: Enr, external_peer_id: PeerId, + mock_builder: Option<Arc<TestingBuilder<E>>>, } impl ApiTester { @@ -90,12 +93,16 @@ impl ApiTester { } pub async fn new_from_spec(spec: ChainSpec) -> Self { + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer() + .mock_execution_layer_with_builder(beacon_url.clone()) .build(), ); @@ -205,25 +212,28 @@ impl ApiTester { let ApiServer { server, - listening_socket, + listening_socket: _, shutdown_tx, network_rx, local_enr, external_peer_id, - } = create_api_server(chain.clone(), log).await; + } = create_api_server_on_port(chain.clone(), log, port).await; harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( - SensitiveUrl::parse(&format!( - "http://{}:{}", - listening_socket.ip(), - listening_socket.port() - )) - .unwrap(), + beacon_url, Timeouts::set_all(Duration::from_secs(SECONDS_PER_SLOT)), ); + let builder_ref = harness.mock_builder.as_ref().unwrap().clone(); + harness.runtime.task_executor.spawn( + async move { builder_ref.run().await }, + "mock_builder_server", + ); + + let mock_builder = harness.mock_builder.clone(); + Self { harness, chain, @@ -239,6 +249,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_builder, } } @@ -321,6 +332,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_builder: None, } } @@ -328,6 +340,13 @@ impl ApiTester { &self.harness.validator_keypairs } + pub async fn new_mev_tester() -> Self { + Self::new_with_hard_forks(true, true) + .await + .test_post_validator_register_validator() + .await + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -2005,6 +2024,175 @@ impl ApiTester { self } + pub async fn test_blinded_block_production<Payload: ExecPayload<E>>(&self) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blinded_blocks::<E, Payload>(slot, &randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blinded_blocks(&signed_block) + .await + .unwrap(); + + // This converts the generic `Payload` to a concrete type for comparison. + let head_block = SignedBeaconBlock::from(signed_block.clone()); + assert_eq!(head_block, signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + + pub async fn test_blinded_block_production_no_verify_randao<Payload: ExecPayload<E>>( + self, + ) -> Self { + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::<E, Payload>( + slot, + None, + None, + Some(false), + ) + .await + .unwrap() + .data; + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + pub async fn test_blinded_block_production_verify_randao_invalid<Payload: ExecPayload<E>>( + self, + ) -> Self { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let bad_randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = (epoch + 1).signing_root(domain); + sk.sign(message).into() + }; + + // Check failure with no `verify_randao` passed. + self.client + .get_validator_blinded_blocks::<E, Payload>(slot, &bad_randao_reveal, None) + .await + .unwrap_err(); + + // Check failure with `verify_randao=true`. + self.client + .get_validator_blinded_blocks_with_verify_randao::<E, Payload>( + slot, + Some(&bad_randao_reveal), + None, + Some(true), + ) + .await + .unwrap_err(); + + // Check failure with no randao reveal provided. + self.client + .get_validator_blinded_blocks_with_verify_randao::<E, Payload>( + slot, None, None, None, + ) + .await + .unwrap_err(); + + // Check success with `verify_randao=false`. + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::<E, Payload>( + slot, + Some(&bad_randao_reveal), + None, + Some(false), + ) + .await + .unwrap() + .data; + + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + pub async fn test_get_validator_attestation_data(self) -> Self { let mut state = self.chain.head_beacon_state_cloned(); let slot = state.slot(); @@ -2203,7 +2391,14 @@ impl ApiTester { let mut registrations = vec![]; let mut fee_recipients = vec![]; - let fork = self.chain.head_snapshot().beacon_state.fork(); + let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); + let fork = Fork { + current_version: self.chain.spec.genesis_fork_version, + previous_version: self.chain.spec.genesis_fork_version, + epoch: genesis_epoch, + }; + + let expected_gas_limit = 11_111_111; for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); @@ -2211,12 +2406,13 @@ impl ApiTester { let data = ValidatorRegistrationData { fee_recipient, - gas_limit: 0, + gas_limit: expected_gas_limit, timestamp: 0, pubkey, }; + let domain = self.chain.spec.get_domain( - Epoch::new(0), + genesis_epoch, Domain::ApplicationMask(ApplicationDomain::Builder), &fork, Hash256::zero(), @@ -2224,11 +2420,13 @@ impl ApiTester { let message = data.signing_root(domain); let signature = keypair.sk.sign(message); - fee_recipients.push(fee_recipient); - registrations.push(SignedValidatorRegistrationData { + let signed = SignedValidatorRegistrationData { message: data, signature, - }); + }; + + fee_recipients.push(fee_recipient); + registrations.push(signed); } self.client @@ -2258,6 +2456,594 @@ impl ApiTester { self } + // Helper function for tests that require a valid RANDAO signature. + async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + let (proposer_pubkey_bytes, proposer_index) = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| (duty.pubkey, duty.validator_index)) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = + self.chain + .spec + .get_domain(epoch, Domain::Randao, &fork, genesis_validators_root); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + (proposer_index, randao_reveal) + } + + pub async fn test_payload_respects_registration(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111); + + // If this cache is empty, it indicates fallback was not used, so the payload came from the + // mock builder. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::GasLimit(30_000_000)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_accepts_changed_fee_recipient(self) -> Self { + let test_fee_recipient = "0x4242424242424242424242424242424242424242" + .parse::<Address>() + .unwrap(); + + // Mutate fee recipient. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::FeeRecipient(test_fee_recipient)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.fee_recipient, + test_fee_recipient + ); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_rejects_invalid_parent_hash(self) -> Self { + let invalid_parent_hash = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::<Hash256>() + .unwrap(); + + // Mutate parent hash. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::ParentHash(invalid_parent_hash)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_parent_hash = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_hash; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.parent_hash, + expected_parent_hash + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_prev_randao(self) -> Self { + let invalid_prev_randao = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::<Hash256>() + .unwrap(); + + // Mutate prev randao. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::PrevRandao(invalid_prev_randao)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_prev_randao = self + .chain + .canonical_head + .cached_head() + .head_random() + .unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.prev_randao, + expected_prev_randao + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_block_number(self) -> Self { + let invalid_block_number = 2; + + // Mutate block number. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::BlockNumber(invalid_block_number)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_block_number = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_number + + 1; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.block_number, + expected_block_number + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_timestamp(self) -> Self { + let invalid_timestamp = 2; + + // Mutate timestamp. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Timestamp(invalid_timestamp)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let min_expected_timestamp = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .timestamp; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_signature(self) -> Self { + self.mock_builder + .as_ref() + .unwrap() + .builder + .invalid_signatures(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips(self) -> Self { + let slot = self.chain.slot().unwrap(); + + // Since we are proposing this slot, start the count from the previous slot. + let prev_slot = slot - Slot::new(1); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let epoch = self.chain.epoch().unwrap(); + + // Inclusive here to make sure we advance one slot past the threshold. + for _ in (prev_slot - head_slot).as_usize()..=self.chain.config.builder_fallback_skips { + self.harness.advance_slot(); + } + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips_per_epoch(self) -> Self { + // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. + for i in 0..E::slots_per_epoch() { + if i == 0 || i as usize > self.chain.config.builder_fallback_skips_per_epoch { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + // Without proposing, advance into the next slot, this should make us cross the threshold + // number of skips, causing us to use the fallback. + self.harness.advance_slot(); + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + self + } + + pub async fn test_builder_chain_health_epochs_since_finalization(self) -> Self { + let skips = E::slots_per_epoch() + * self.chain.config.builder_fallback_epochs_since_finalization as u64; + + for _ in 0..skips { + self.harness.advance_slot(); + } + + // Fill the next epoch with blocks, should be enough to justify, not finalize. + for _ in 0..E::slots_per_epoch() { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this + // scenario starts at an epoch boundary). + for _ in 0..E::slots_per_epoch() + 1 { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -2976,6 +3762,72 @@ async fn block_production_verify_randao_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::<FullPayload<_>>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_full_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::<FullPayload<_>>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::<FullPayload<_>>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::<FullPayload<_>>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::<BlindedPayload<_>>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::<BlindedPayload<_>>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::<BlindedPayload<_>>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::<BlindedPayload<_>>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_attestation_data() { ApiTester::new() @@ -3060,6 +3912,94 @@ async fn post_validator_register_validator() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_valid() { + ApiTester::new_mev_tester() + .await + .test_payload_respects_registration() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_gas_limit_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_mutated_gas_limit() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_fee_recipient_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_changed_fee_recipient() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_parent_hash() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_parent_hash() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_prev_randao() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_prev_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_block_number() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_block_number() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_timestamp() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_timestamp() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_signature() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_signature() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_per_epoch() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips_per_epoch() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_epochs_since_finalization() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_epochs_since_finalization() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index b36f154ae8..3b78d8f684 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -708,6 +708,46 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("250") .takes_value(true) ) + .arg( + Arg::with_name("builder-fallback-skips") + .long("builder-fallback-skips") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in a row, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-skips-per-epoch") + .long("builder-fallback-skips-per-epoch") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in the past `SLOTS_PER_EPOCH`, it will NOT query \ + any connected builders, and will use the local execution engine for \ + payload construction.") + .default_value("8") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-epochs-since-finalization") + .long("builder-fallback-epochs-since-finalization") + .help("If this node is proposing a block and the chain has not finalized within \ + this number of epochs, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction. Setting \ + this value to anything less than 2 will cause the node to NEVER query \ + connected builders. Setting it to 2 will cause this condition to be hit \ + if there are skips slots at the start of an epoch, right before this node \ + is set to propose.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-disable-checks") + .long("builder-fallback-disable-checks") + .help("This flag disables all checks related to chain health. This means the builder \ + API will always be used for payload construction, regardless of recent chain \ + conditions.") + .takes_value(false) + ) .arg( Arg::with_name("count-unrealized") .long("count-unrealized") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index fb0cbe0c92..584a961958 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -634,6 +634,20 @@ pub fn get_config<E: EthSpec>( client_config.chain.count_unrealized = true; } + /* + * Builder fallback configs. + */ + client_config.chain.builder_fallback_skips = + clap_utils::parse_required(cli_args, "builder-fallback-skips")?; + client_config.chain.builder_fallback_skips_per_epoch = + clap_utils::parse_required(cli_args, "builder-fallback-skips-per-epoch")?; + client_config + .chain + .builder_fallback_epochs_since_finalization = + clap_utils::parse_required(cli_args, "builder-fallback-epochs-since-finalization")?; + client_config.chain.builder_fallback_disable_checks = + cli_args.is_present("builder-fallback-disable-checks"); + Ok(client_config) } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index c3e99d7a86..d05677465b 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -18,21 +18,21 @@ * [Create a validator](./validator-create.md) * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Staking Launchpad](./validator-import-launchpad.md) + * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) * [APIs](./api.md) - * [Beacon Node API](./api-bn.md) - * [/lighthouse](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) - * [Validator Client API](./api-vc.md) - * [Endpoints](./api-vc-endpoints.md) - * [Authorization Header](./api-vc-auth-header.md) - * [Signature Header](./api-vc-sig-header.md) - * [Prometheus Metrics](./advanced_metrics.md) + * [Beacon Node API](./api-bn.md) + * [/lighthouse](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Endpoints](./api-vc-endpoints.md) + * [Authorization Header](./api-vc-auth-header.md) + * [Signature Header](./api-vc-sig-header.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) @@ -45,6 +45,7 @@ * [Redundancy](./redundancy.md) * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) + * [MEV and Lighthouse](./builders.md) * [Contributing](./contributing.md) - * [Development Environment](./setup.md) + * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/builders.md b/book/src/builders.md new file mode 100644 index 0000000000..78a80899cc --- /dev/null +++ b/book/src/builders.md @@ -0,0 +1,144 @@ +# MEV and Lighthouse + +Lighthouse is able to interact with servers that implement the [builder +API](https://github.com/ethereum/builder-specs), allowing it to produce blocks without having +knowledge of the transactions included in the block. This enables Lighthouse to outsource the job of +transaction gathering/ordering within a block to parties specialized in this particular task. For +economic reasons, these parties will refuse to reveal the list of transactions to the validator +before the validator has committed to (i.e. signed) the block. A primer on MEV can be found +[here]([MEV](https://ethereum.org/en/developers/docs/mev/)). + +Using the builder API is not known to introduce additional slashing risks, however a live-ness risk +(i.e. the ability for the chain to produce valid blocks) is introduced because your node will be +signing blocks without executing the transactions within the block. Therefore it won't know whether +the transactions are valid and it may sign a block that the network will reject. This would lead to +a missed proposal and the opportunity cost of lost block rewards. + +## How to connect to a builder + +The beacon node and validator client each require a new flag for lighthouse to be fully compatible with builder API servers. + +``` +lighthouse bn --builder https://mainnet-builder.test +``` +The `--builder` flag will cause the beacon node to query the provided URL during block production for a block +payload with stubbed-out transactions. If this request fails, Lighthouse will fall back to the local +execution engine and produce a block using transactions gathered and verified locally. + +The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. +Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for +blinded blocks, you should use the following flag: + +``` +lighthouse vc --builder-proposals +``` +With the `--builder-proposals` flag, the validator client will ask for blinded blocks for all validators it manages. +In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) + +## Multiple builders + +Lighthouse currently only supports a connection to a single builder. If you'd like to connect to multiple builders or +relays, run one of the following services and configure lighthouse to use it with the `--builder` flag. + +* [`mev-boost`][mev-boost] +* [`mev-rs`][mev-rs] + +## Validator Client Configuration + +In the validator client you can configure gas limit, fee recipient and whether to use the builder API on a +per-validator basis or set a configuration for all validators managed by the validator client. CLI flags for each of these +will serve as default values for all validators managed by the validator client. In order to manage the values +per-validator you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests +described below. + +Both the gas limit and fee recipient will be passed along as suggestions to connected builders. If there is a discrepancy +in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are calculated based +on prior execution blocks, so it should be managed by an execution engine, even if it is external. Depending on the +connected relay, payment to the proposer might be in the form of a transaction within the block to the fee recipient, +so a discrepancy in fee recipient might not indicate that there is something afoot. If you know the relay you are connected to *should* +only create blocks with a `fee_recipient` field matching the one suggested, you can use +the [strict fee recipient](suggested-fee-recipient.md#strict-fee-recipient) flag. + +### Enable/Disable builder proposals and set Gas Limit +Use the [lighthouse API](api-vc-endpoints.md) to configure these fields per-validator. + +#### `PATCH /lighthouse/validators/:voting_pubkey` + + +#### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | PATCH | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | + +#### Example Path + +``` +localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +``` + +#### Example Request Body +Each field is optional. +```json +{ + "builder_proposals": true, + "gas_limit": 3000000001 +} +``` + +#### Example Response Body + +```json +null +``` +### Fee Recipient + +Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. + +### Validator definitions example +``` +--- +- enabled: true + voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" + type: local_keystore + voting_keystore_path: /home/paul/.lighthouse/validators/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007/voting-keystore.json + voting_keystore_password_path: /home/paul/.lighthouse/secrets/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 + suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" + gas_limit: 3000000001 + builder_proposals: true +- enabled: false + voting_public_key: "0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477" + type: local_keystore voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json + voting_keystore_password: myStrongpa55word123&$ + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 333333333 + builder_proposals: true +``` + +## Circuit breaker conditions + +By outsourcing payload construction and signing blocks without verifying transactions, we are creating a new risk to +live-ness. If most of the network is using a small set of relays and one is bugged, a string of missed proposals could +happen quickly. This is not only generally bad for the network, but if you have a proposal coming up, you might not +realize that your next proposal is likely to be missed until it's too late. So we've implemented some "chain health" +checks to try and avoid scenarios like this. + +By default, Lighthouse is strict with these conditions, but we encourage users to learn about and adjust them. + +- `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query + any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT + query any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within + this number of epochs, we will NOT query any connected builders, and will use the local execution engine for payload + construction. Setting this value to anything less than 2 will cause the node to NEVER query connected builders. Setting + it to 2 will cause this condition to be hit if there are skips slots at the start of an epoch, right before this node + is set to propose. +- `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder + API will always be used for payload construction, regardless of recent chain conditions. + +[mev-rs]: https://github.com/ralexstokes/mev-rs +[mev-boost]: https://github.com/flashbots/mev-boost diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index c401abfb7a..a584be306f 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -30,6 +30,15 @@ Assuming trustworthy nodes, the priority for the four methods is: 1. `--suggested-fee-recipient` provided to the VC. 1. `--suggested-fee-recipient` provided to the BN. +## Strict Fee Recipient + +If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose +`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal +block proposal flow and block proposals through the builder API. Proposals through the builder API are more likely +to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before +using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the +local execution engine for payload construction, where a strict fee recipient check will still be applied. + ### 1. Setting the fee recipient in the `validator_definitions.yml` Users can set the fee recipient in `validator_definitions.yml` with the `suggested_fee_recipient` @@ -62,15 +71,6 @@ validators where a `suggested_fee_recipient` is not loaded from another method. The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. -## Strict Fee Recipient - -If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose -`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal -block proposal flow, as well as block proposals through the builder API. Proposals through the builder API are more likely -to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before -using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the -local execution engine for payload construction, where a strict fee recipient check will still be applied. - ## Setting the fee recipient dynamically using the keymanager API When the [validator client API](api-vc.md) is enabled, the diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index e68737e259..66e3b73547 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -109,6 +109,12 @@ pub struct ValidatorDefinition { #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option<Address>, #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option<u64>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option<bool>, + #[serde(default)] pub description: String, #[serde(flatten)] pub signing_definition: SigningDefinition, @@ -126,6 +132,8 @@ impl ValidatorDefinition { voting_keystore_password: Option<ZeroizeString>, graffiti: Option<GraffitiString>, suggested_fee_recipient: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<Self, Error> { let voting_keystore_path = voting_keystore_path.as_ref().into(); let keystore = @@ -138,6 +146,8 @@ impl ValidatorDefinition { description: keystore.description().unwrap_or("").to_string(), graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, @@ -284,6 +294,8 @@ impl ValidatorDefinitions { description: keystore.description().unwrap_or("").to_string(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, @@ -526,4 +538,84 @@ mod tests { Some(Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap()) ); } + + #[test] + fn gas_limit_checks() { + let no_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_gas_limit).unwrap(); + assert!(def.gas_limit.is_none()); + + let invalid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result<ValidatorDefinition, _> = serde_yaml::from_str(invalid_gas_limit); + assert!(def.is_err()); + + let valid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 35000000 + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_gas_limit).unwrap(); + assert_eq!(def.gas_limit, Some(35000000)); + } + + #[test] + fn builder_proposals_checks() { + let no_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_builder_proposals).unwrap(); + assert!(def.builder_proposals.is_none()); + + let invalid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result<ValidatorDefinition, _> = serde_yaml::from_str(invalid_builder_proposals); + assert!(def.is_err()); + + let valid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: true + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_builder_proposals).unwrap(); + assert_eq!(def.builder_proposals, Some(true)); + } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 1025959165..8cd138e980 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1280,7 +1280,7 @@ impl BeaconNodeHttpClient { .await } - /// `GET v2/validator/blocks/{slot}` + /// `GET v1/validator/blinded_blocks/{slot}` pub async fn get_validator_blinded_blocks_with_verify_randao< T: EthSpec, Payload: ExecPayload<T>, @@ -1291,7 +1291,7 @@ impl BeaconNodeHttpClient { graffiti: Option<&Graffiti>, verify_randao: Option<bool>, ) -> Result<ForkVersionedResponse<BeaconBlock<T, Payload>>, Error> { - let mut path = self.eth_path(V2)?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index d678ca34b7..abed4fe5e7 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -462,7 +462,9 @@ impl ValidatorClientHttpClient { pub async fn patch_lighthouse_validators( &self, voting_pubkey: &PublicKeyBytes, - enabled: bool, + enabled: Option<bool>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<(), Error> { let mut path = self.server.full.clone(); @@ -472,7 +474,15 @@ impl ValidatorClientHttpClient { .push("validators") .push(&voting_pubkey.to_string()); - self.patch(path, &ValidatorPatchRequest { enabled }).await + self.patch( + path, + &ValidatorPatchRequest { + enabled, + gas_limit, + builder_proposals, + }, + ) + .await } fn make_keystores_url(&self) -> Result<Url, Error> { diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 3e1c13dcf8..d829c97cc7 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -26,6 +26,12 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option<Address>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option<u64>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option<bool>, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -49,6 +55,12 @@ pub struct CreatedValidator { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option<Address>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option<u64>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option<bool>, pub eth1_deposit_tx_data: String, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, @@ -62,7 +74,15 @@ pub struct PostValidatorsResponseData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorPatchRequest { - pub enabled: bool, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option<bool>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option<u64>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option<bool>, } #[derive(Clone, PartialEq, Serialize, Deserialize)] @@ -70,8 +90,18 @@ pub struct KeystoreValidatorsPostRequest { pub password: ZeroizeString, pub enable: bool, pub keystore: Keystore, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub graffiti: Option<GraffitiString>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option<Address>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option<u64>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option<bool>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -84,6 +114,12 @@ pub struct Web3SignerValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option<Address>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option<u64>, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option<bool>, pub voting_public_key: PublicKey, pub url: String, #[serde(default)] diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 1726f2ad07..047bceae7e 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,13 +1,14 @@ -use crate::{EthSpec, ExecPayload, ExecutionPayloadHeader, Uint256}; -use bls::blst_implementations::PublicKeyBytes; +use crate::{ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, Uint256}; +use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; use serde_derive::{Deserialize, Serialize}; use serde_with::{serde_as, DeserializeAs, SerializeAs}; use std::marker::PhantomData; +use tree_hash_derive::TreeHash; #[serde_as] -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] pub struct BuilderBid<E: EthSpec, Payload: ExecPayload<E>> { #[serde_as(as = "BlindedPayloadAsHeader<E>")] @@ -16,9 +17,12 @@ pub struct BuilderBid<E: EthSpec, Payload: ExecPayload<E>> { pub value: Uint256, pub pubkey: PublicKeyBytes, #[serde(skip)] + #[tree_hash(skip_hashing)] _phantom_data: PhantomData<E>, } +impl<E: EthSpec, Payload: ExecPayload<E>> SignedRoot for BuilderBid<E, Payload> {} + /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] @@ -50,3 +54,17 @@ impl<'de, E: EthSpec, Payload: ExecPayload<E>> DeserializeAs<'de, Payload> .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) } } + +impl<E: EthSpec, Payload: ExecPayload<E>> SignedBuilderBid<E, Payload> { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 8a69505a51..3668d0524c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1355,4 +1355,12 @@ mod yaml_tests { ) ); } + + #[test] + fn test_domain_builder() { + assert_eq!( + int_to_bytes4(ApplicationDomain::Builder.get_domain_constant()), + [0, 0, 0, 1] + ); + } } diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index dbfe218159..978bd4c69a 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -1,12 +1,14 @@ use crate::test_utils::TestRandom; use crate::Hash256; +use derivative::Derivative; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] +#[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(Hash256); diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 4a8552d249..114ca02ecf 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -9,6 +9,7 @@ use std::hash::Hash; use test_random_derive::TestRandom; use tree_hash::TreeHash; +#[derive(Debug)] pub enum BlockType { Blinded, Full, @@ -18,6 +19,7 @@ pub trait ExecPayload<T: EthSpec>: Debug + Clone + Encode + + Debug + Decode + TestRandom + TreeHash @@ -45,6 +47,7 @@ pub trait ExecPayload<T: EthSpec>: fn timestamp(&self) -> u64; fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; + fn gas_limit(&self) -> u64; } impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { @@ -79,6 +82,10 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { fn fee_recipient(&self) -> Address { self.execution_payload.fee_recipient } + + fn gas_limit(&self) -> u64 { + self.execution_payload.gas_limit + } } impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { @@ -113,6 +120,10 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { fn fee_recipient(&self) -> Address { self.execution_payload_header.fee_recipient } + + fn gas_limit(&self) -> u64 { + self.execution_payload_header.gas_limit + } } #[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 06b0303c69..696830a0d1 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -494,6 +494,8 @@ fn validator_import_launchpad() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -614,6 +616,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -638,6 +642,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), @@ -738,6 +744,8 @@ fn validator_import_launchpad_password_file() { voting_public_key: keystore.public_key().unwrap(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index a9f8900d0c..1f6855cba4 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -394,25 +394,36 @@ fn merge_fee_recipient_flag() { fn run_payload_builder_flag_test(flag: &str, builders: &str) { use sensitive_url::SensitiveUrl; - let dir = TempDir::new().expect("Unable to create temporary directory"); let all_builders: Vec<_> = builders .split(",") .map(|builder| SensitiveUrl::parse(builder).expect("valid builder url")) .collect(); - CommandLineTest::new() - .flag("execution-endpoint", Some("http://meow.cats")) + run_payload_builder_flag_test_with_config(flag, builders, None, None, |config| { + let config = config.execution_layer.as_ref().unwrap(); + // Only first provided endpoint is parsed as we don't support + // redundancy. + assert_eq!(config.builder_url, all_builders.get(0).cloned()); + }) +} +fn run_payload_builder_flag_test_with_config<F: Fn(&Config)>( + flag: &str, + builders: &str, + additional_flag: Option<&str>, + additional_flag_value: Option<&str>, + f: F, +) { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let mut test = CommandLineTest::new(); + test.flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", dir.path().join("jwt-file").as_os_str().to_str(), ) - .flag(flag, Some(builders)) - .run_with_zero_port() - .with_config(|config| { - let config = config.execution_layer.as_ref().unwrap(); - // Only first provided endpoint is parsed as we don't support - // redundancy. - assert_eq!(config.builder_url, all_builders.get(0).cloned()); - }); + .flag(flag, Some(builders)); + if let Some(additional_flag_name) = additional_flag { + test.flag(additional_flag_name, additional_flag_value); + } + test.run_with_zero_port().with_config(f); } #[test] @@ -420,7 +431,46 @@ fn payload_builder_flags() { run_payload_builder_flag_test("builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); - run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); +} + +#[test] +fn builder_fallback_flags() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips"), + Some("7"), + |config| { + assert_eq!(config.chain.builder_fallback_skips, 7); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips-per-epoch"), + Some("11"), + |config| { + assert_eq!(config.chain.builder_fallback_skips_per_epoch, 11); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-epochs-since-finalization"), + Some("4"), + |config| { + assert_eq!(config.chain.builder_fallback_epochs_since_finalization, 4); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-disable-checks"), + None, + |config| { + assert_eq!(config.chain.builder_fallback_disable_checks, true); + }, + ); } fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 98b159e996..21dc4d7872 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -389,6 +389,48 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } #[test] +fn no_gas_limit_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.gas_limit.is_none())); +} +#[test] +fn gas_limit_flag() { + CommandLineTest::new() + .flag("gas-limit", Some("600")) + .flag("builder-proposals", None) + .run() + .with_config(|config| assert_eq!(config.gas_limit, Some(600))); +} +#[test] +fn no_builder_proposals_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.builder_proposals)); +} +#[test] +fn builder_proposals_flag() { + CommandLineTest::new() + .flag("builder-proposals", None) + .run() + .with_config(|config| assert!(config.builder_proposals)); +} +#[test] +fn no_builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.builder_registration_timestamp_override.is_none())); +} +#[test] +fn builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .flag("builder-registration-timestamp-override", Some("100")) + .run() + .with_config(|config| { + assert_eq!(config.builder_registration_timestamp_override, Some(100)) + }); +} +#[test] fn strict_fee_recipient_flag() { CommandLineTest::new() .flag("strict-fee-recipient", None) diff --git a/scripts/local_testnet/print_logs.sh b/scripts/local_testnet/dump_logs.sh similarity index 83% rename from scripts/local_testnet/print_logs.sh rename to scripts/local_testnet/dump_logs.sh index 2a9e7822a6..dc5f4edd38 100755 --- a/scripts/local_testnet/print_logs.sh +++ b/scripts/local_testnet/dump_logs.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Print the tail of all the logs output from local testnet +# Print all the logs output from local testnet set -Eeuo pipefail @@ -12,6 +12,6 @@ do echo "=============================================================================" echo "$f" echo "=============================================================================" - tail "$f" + cat "$f" echo "" done diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 33c1d642e7..dcc0a5382a 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -10,12 +10,14 @@ ulimit -n 65536 # VC_COUNT is defaulted in vars.env DEBUG_LEVEL=${DEBUG_LEVEL:-info} +BUILDER_PROPOSALS= # Get options -while getopts "v:d:h" flag; do +while getopts "v:d:ph" flag; do case "${flag}" in v) VC_COUNT=${OPTARG};; d) DEBUG_LEVEL=${OPTARG};; + p) BUILDER_PROPOSALS="-p";; h) validators=$(( $VALIDATOR_COUNT / $BN_COUNT )) echo "Start local testnet, defaults: 1 eth1 node, $BN_COUNT beacon nodes," @@ -26,6 +28,7 @@ while getopts "v:d:h" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" + echo " -p: enable private tx proposals" echo " -h: this help" exit ;; @@ -116,7 +119,7 @@ done # Start requested number of validator clients for (( vc=1; vc<=$VC_COUNT; vc++ )); do - execute_command_add_PID validator_node_$vc.log ./validator_client.sh $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) $DEBUG_LEVEL + execute_command_add_PID validator_node_$vc.log ./validator_client.sh $BUILDER_PROPOSALS -d $DEBUG_LEVEL $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) done echo "Started!" diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 5aa75dfe2d..975a2a6753 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -10,13 +10,24 @@ set -Eeuo pipefail source ./vars.env -DEBUG_LEVEL=${3:-info} +DEBUG_LEVEL=info + +BUILDER_PROPOSALS= + +# Get options +while getopts "pd:" flag; do + case "${flag}" in + p) BUILDER_PROPOSALS="--builder-proposals";; + d) DEBUG_LEVEL=${OPTARG};; + esac +done exec lighthouse \ --debug-level $DEBUG_LEVEL \ vc \ - --datadir $1 \ + $BUILDER_PROPOSALS \ + --datadir ${@:$OPTIND:1} \ --testnet-dir $TESTNET_DIR \ --init-slashing-protection \ - --beacon-nodes $2 \ + --beacon-nodes ${@:$OPTIND+1:1} \ $VC_ARGS diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index f86148312f..798dae083b 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -17,8 +17,9 @@ use state_processing::per_block_processing::{ use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ForkName, - FullPayload, ProposerSlashing, SignedVoluntaryExit, SyncAggregate, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, + SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -255,6 +256,40 @@ impl<E: EthSpec> Operation<E> for FullPayload<E> { } } } +impl<E: EthSpec> Operation<E> for BlindedPayload<E> { + fn handler_name() -> String { + "execution_payload".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair + } + + fn decode(path: &Path, _spec: &ChainSpec) -> Result<Self, Error> { + ssz_decode_file::<ExecutionPayload<E>>(path).map(Into::into) + } + + fn apply_to( + &self, + state: &mut BeaconState<E>, + spec: &ChainSpec, + extra: &Operations<E, Self>, + ) -> Result<(), BlockProcessingError> { + let valid = extra + .execution_metadata + .as_ref() + .map_or(false, |e| e.execution_valid); + if valid { + process_execution_payload(state, self, spec) + } else { + Err(BlockProcessingError::ExecutionInvalid) + } + } +} impl<E: EthSpec, O: Operation<E>> LoadCase for Operations<E, O> { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result<Self, Error> { diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 540fe6903e..c075e89b3f 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -56,6 +56,7 @@ type_name!(Eth1Data); type_name_generic!(ExecutionPayload); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); type_name_generic!(HistoricalBatch); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 91345fb669..31abbd1591 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -71,11 +71,17 @@ fn operations_sync_aggregate() { } #[test] -fn operations_execution_payload() { +fn operations_execution_payload_full() { OperationsHandler::<MinimalEthSpec, FullPayload<_>>::default().run(); OperationsHandler::<MainnetEthSpec, FullPayload<_>>::default().run(); } +#[test] +fn operations_execution_payload_blinded() { + OperationsHandler::<MinimalEthSpec, BlindedPayload<_>>::default().run(); + OperationsHandler::<MainnetEthSpec, BlindedPayload<_>>::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::<MinimalEthSpec>::default().run(); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 7126268c37..0aa960bc41 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -3,7 +3,9 @@ use crate::execution_engine::{ }; use crate::transactions::transactions; use ethers_providers::Middleware; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use execution_layer::{ + BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadStatus, +}; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; use sensitive_url::SensitiveUrl; @@ -14,7 +16,7 @@ use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, Slot, Uint256, + MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); @@ -305,6 +307,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { // in CI. sleep(Duration::from_secs(3)).await; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let valid_payload = self .ee_a .execution_layer @@ -313,9 +320,9 @@ impl<E: GenericExecutionEngine> TestRig<E> { timestamp, prev_randao, proposer_index, - None, - Slot::new(0), forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -413,6 +420,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { let timestamp = valid_payload.timestamp + 1; let prev_randao = Hash256::zero(); let proposer_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let second_payload = self .ee_a .execution_layer @@ -421,9 +433,9 @@ impl<E: GenericExecutionEngine> TestRig<E> { timestamp, prev_randao, proposer_index, - None, - Slot::new(0), forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index bdee18026b..4f9a574f84 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -302,6 +302,7 @@ mod tests { let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let config = validator_client::Config::default(); let validator_store = ValidatorStore::<_, E>::new( initialized_validators, @@ -310,7 +311,7 @@ mod tests { spec, None, slot_clock, - None, + &config, executor, log.clone(), ); @@ -359,6 +360,8 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: signer_rig.keystore_path.clone(), @@ -375,6 +378,8 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url: signer_rig.url.to_string(), @@ -450,8 +455,6 @@ mod tests { } } - //TODO: remove this once the consensys web3signer includes the `validator_registration` method - #[allow(dead_code)] fn get_validator_registration(pubkey: PublicKeyBytes) -> ValidatorRegistrationData { let fee_recipient = Address::repeat_byte(42); ValidatorRegistrationData { @@ -513,16 +516,17 @@ mod tests { .await .unwrap() }) - //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method - // - // .await - // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { - // let val_reg_data = get_validator_registration(pubkey); - // validator_store - // .sign_validator_registration_data(val_reg_data) - // .await - // .unwrap() - // }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) .await; } @@ -599,16 +603,17 @@ mod tests { .unwrap() }, ) - //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method - // - // .await - // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { - // let val_reg_data = get_validator_registration(pubkey); - // validator_store - // .sign_validator_registration_data(val_reg_data) - // .await - // .unwrap() - // }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) .await; } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 649f240645..d47546eb0d 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -11,9 +11,7 @@ use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; -use types::{ - BlindedPayload, BlockType, Epoch, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot, -}; +use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; #[derive(Debug)] pub enum BlockError { @@ -44,7 +42,6 @@ pub struct BlockServiceBuilder<T, E: EthSpec> { context: Option<RuntimeContext<E>>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - private_tx_proposals: bool, strict_fee_recipient: bool, } @@ -57,7 +54,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { context: None, graffiti: None, graffiti_file: None, - private_tx_proposals: false, strict_fee_recipient: false, } } @@ -92,11 +88,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { self } - pub fn private_tx_proposals(mut self, private_tx_proposals: bool) -> Self { - self.private_tx_proposals = private_tx_proposals; - self - } - pub fn strict_fee_recipient(mut self, strict_fee_recipient: bool) -> Self { self.strict_fee_recipient = strict_fee_recipient; self @@ -119,7 +110,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, - private_tx_proposals: self.private_tx_proposals, strict_fee_recipient: self.strict_fee_recipient, }), }) @@ -134,7 +124,6 @@ pub struct Inner<T, E: EthSpec> { context: RuntimeContext<E>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - private_tx_proposals: bool, strict_fee_recipient: bool, } @@ -244,32 +233,29 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { ) } - let private_tx_proposals = self.private_tx_proposals; - let merge_slot = self - .context - .eth2_config - .spec - .bellatrix_fork_epoch - .unwrap_or_else(Epoch::max_value) - .start_slot(E::slots_per_epoch()); for validator_pubkey in proposers { + let builder_proposals = self + .validator_store + .get_builder_proposals(&validator_pubkey); let service = self.clone(); let log = log.clone(); self.inner.context.executor.spawn( async move { - let publish_result = if private_tx_proposals && slot >= merge_slot { + let publish_result = if builder_proposals { let mut result = service.clone() .publish_block::<BlindedPayload<E>>(slot, validator_pubkey) .await; match result.as_ref() { Err(BlockError::Recoverable(e)) => { - error!(log, "Error whilst producing a blinded block, attempting to publish full block"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, attempting to \ + publish full block"; "error" => ?e); result = service .publish_block::<FullPayload<E>>(slot, validator_pubkey) .await; }, Err(BlockError::Irrecoverable(e)) => { - error!(log, "Error whilst producing a blinded block, cannot fallback because block was signed"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, cannot fallback \ + because the block was signed"; "error" => ?e); }, _ => {}, }; @@ -344,12 +330,12 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { let block = self .beacon_nodes .first_success(RequireSynced::No, |beacon_node| async move { - let get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); let block = match Payload::block_type() { BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); beacon_node .get_validator_blocks::<E, Payload>( slot, @@ -366,6 +352,10 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { .data } BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); beacon_node .get_validator_blinded_blocks::<E, Payload>( slot, @@ -382,7 +372,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { .data } }; - drop(get_timer); // Ensure the correctness of the execution payload's fee recipient. if strict_fee_recipient { @@ -415,43 +404,51 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { // Publish block with first available beacon node. self.beacon_nodes .first_success(RequireSynced::No, |beacon_node| async { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - match Payload::block_type() { - BlockType::Full => beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, - BlockType::Blinded => beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } } - - info!( - log, - "Successfully published block"; - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); Ok::<_, BlockError>(()) }) .await?; + + info!( + log, + "Successfully published block"; + "block_type" => ?Payload::block_type(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 1f8b7b08ba..ceca31aa75 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -251,8 +251,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(false), ) .arg( - Arg::with_name("private-tx-proposals") - .long("private-tx-proposals") + Arg::with_name("builder-proposals") + .long("builder-proposals") + .alias("private-tx-proposals") .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") @@ -271,4 +272,22 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { for payload construction, where a strict fee recipient check will still be applied.") .takes_value(false), ) + .arg( + Arg::with_name("builder-registration-timestamp-override") + .long("builder-registration-timestamp-override") + .alias("builder-registration-timestamp-override") + .help("This flag takes a unix timestamp value that will be used to override the \ + timestamp used in the builder api registration") + .takes_value(true), + ) + .arg( + Arg::with_name("gas-limit") + .long("gas-limit") + .value_name("INTEGER") + .takes_value(true) + .help("The gas limit to be used in all builder proposals for all validators managed \ + by this validator client. Note this will not necessarily be used if the gas limit \ + set here moves too far from the previous block's gas limit. [default: 30,000,000]") + .requires("builder-proposals"), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 725414b1b9..42c91927ca 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -52,7 +52,12 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, - pub private_tx_proposals: bool, + /// Enable use of the blinded block endpoints during proposals. + pub builder_proposals: bool, + /// Overrides the timestamp field in builder api ValidatorRegistrationV1 + pub builder_registration_timestamp_override: Option<u64>, + /// Fallback gas limit. + pub gas_limit: Option<u64>, /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option<Vec<PathBuf>>, @@ -91,7 +96,9 @@ impl Default for Config { monitoring_api: None, enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, - private_tx_proposals: false, + builder_proposals: false, + builder_registration_timestamp_override: None, + gas_limit: None, strict_fee_recipient: false, } } @@ -300,8 +307,27 @@ impl Config { config.enable_doppelganger_protection = true; } - if cli_args.is_present("private-tx-proposals") { - config.private_tx_proposals = true; + if cli_args.is_present("builder-proposals") { + config.builder_proposals = true; + } + + config.gas_limit = cli_args + .value_of("gas-limit") + .map(|gas_limit| { + gas_limit + .parse::<u64>() + .map_err(|_| "gas-limit is not a valid u64.") + }) + .transpose()?; + + if let Some(registration_timestamp_override) = + cli_args.value_of("builder-registration-timestamp-override") + { + config.builder_registration_timestamp_override = Some( + registration_timestamp_override + .parse::<u64>() + .map_err(|_| "builder-registration-timestamp-override is not a valid u64.")?, + ); } if cli_args.is_present("strict-fee-recipient") { diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index db59c25f75..a32ccce627 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -140,6 +140,8 @@ pub async fn create_validators_mnemonic<P: AsRef<Path>, T: 'static + SlotClock, request.enable, request.graffiti.clone(), request.suggested_fee_recipient, + request.gas_limit, + request.builder_proposals, ) .await .map_err(|e| { @@ -154,6 +156,8 @@ pub async fn create_validators_mnemonic<P: AsRef<Path>, T: 'static + SlotClock, description: request.description.clone(), graffiti: request.graffiti.clone(), suggested_fee_recipient: request.suggested_fee_recipient, + gas_limit: request.gas_limit, + builder_proposals: request.builder_proposals, voting_pubkey, eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index f88aacfca8..29af8d0205 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -205,6 +205,8 @@ fn import_single_keystore<T: SlotClock + 'static, E: EthSpec>( true, None, None, + None, + None, )) .map_err(|e| format!("failed to initialize validator: {:?}", e))?; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 07e7b1e13f..a5d8d0e71c 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -413,6 +413,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let voting_password = body.password.clone(); let graffiti = body.graffiti.clone(); let suggested_fee_recipient = body.suggested_fee_recipient; + let gas_limit = body.gas_limit; + let builder_proposals = body.builder_proposals; let validator_def = { if let Some(handle) = task_executor.handle() { @@ -423,6 +425,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( body.enable, graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, )) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -469,6 +473,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( voting_public_key: web3signer.voting_public_key, graffiti: web3signer.graffiti, suggested_fee_recipient: web3signer.suggested_fee_recipient, + gas_limit: web3signer.gas_limit, + builder_proposals: web3signer.builder_proposals, description: web3signer.description, signing_definition: SigningDefinition::Web3Signer( Web3SignerDefinition { @@ -515,18 +521,32 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); - match initialized_validators.is_enabled(&validator_pubkey) { - None => Err(warp_utils::reject::custom_not_found(format!( + match ( + initialized_validators.is_enabled(&validator_pubkey), + initialized_validators.validator(&validator_pubkey.compress()), + ) { + (None, _) => Err(warp_utils::reject::custom_not_found(format!( "no validator for {:?}", validator_pubkey ))), - Some(enabled) if enabled == body.enabled => Ok(()), - Some(_) => { + (Some(is_enabled), Some(initialized_validator)) + if Some(is_enabled) == body.enabled + && initialized_validator.get_gas_limit() == body.gas_limit + && initialized_validator.get_builder_proposals() + == body.builder_proposals => + { + Ok(()) + } + (Some(_), _) => { if let Some(handle) = task_executor.handle() { handle .block_on( - initialized_validators - .set_validator_status(&validator_pubkey, body.enabled), + initialized_validators.set_validator_definition_fields( + &validator_pubkey, + body.enabled, + body.gas_limit, + body.builder_proposals, + ), ) .map_err(|e| { warp_utils::reject::custom_server_error(format!( diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs index 57b7527e2b..991dfb8bf7 100644 --- a/validator_client/src/http_api/remotekeys.rs +++ b/validator_client/src/http_api/remotekeys.rs @@ -123,6 +123,8 @@ fn import_single_remotekey<T: SlotClock + 'static, E: EthSpec>( voting_public_key: pubkey, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::from("Added by remotekey API"), signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url, diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 7ee0563417..e67a82634c 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -83,6 +83,7 @@ impl ApiTester { let mut config = Config::default(); config.validator_dir = validator_dir.path().into(); config.secrets_dir = secrets_dir.path().into(); + config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); let spec = E::default_spec(); @@ -103,7 +104,7 @@ impl ApiTester { spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock, - Some(TEST_DEFAULT_FEE_RECIPIENT), + &config, executor.clone(), log.clone(), )); @@ -270,6 +271,8 @@ impl ApiTester { description: format!("boi #{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, deposit_gwei: E::default_spec().max_effective_balance, }) .collect::<Vec<_>>(); @@ -401,6 +404,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; self.client @@ -419,6 +424,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; let response = self @@ -455,6 +462,8 @@ impl ApiTester { description: format!("{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: kp.pk, url: format!("http://signer_{}.com/", i), root_certificate_path: None, @@ -484,7 +493,7 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, enabled) + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None) .await .unwrap(); @@ -521,6 +530,56 @@ impl ApiTester { self } + + pub async fn set_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators(&validator.voting_pubkey, None, Some(gas_limit), None) + .await + .unwrap(); + + self + } + + pub async fn assert_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store.get_gas_limit(&validator.voting_pubkey), + gas_limit + ); + + self + } + + pub async fn set_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + Some(builder_proposals), + ) + .await + .unwrap(); + + self + } + + pub async fn assert_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_builder_proposals(&validator.voting_pubkey), + builder_proposals + ); + + self + } } struct HdValidatorScenario { @@ -583,6 +642,8 @@ fn routes_with_invalid_auth() { description: <_>::default(), graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), deposit_gwei: <_>::default(), }]) .await @@ -612,13 +673,15 @@ fn routes_with_invalid_auth() { keystore, graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), }) .await }) .await .test_with_invalid_auth(|client| async move { client - .patch_lighthouse_validators(&PublicKeyBytes::empty(), false) + .patch_lighthouse_validators(&PublicKeyBytes::empty(), Some(false), None, None) .await }) .await @@ -735,6 +798,74 @@ fn validator_enabling() { }); } +#[test] +fn validator_gas_limit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_gas_limit(0, 500) + .await + .assert_gas_limit(0, 500) + .await + // Update gas limit while validator is disabled. + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_gas_limit(0, 1000) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_gas_limit(0, 1000) + .await + }); +} + +#[test] +fn validator_builder_proposals() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_builder_proposals(0, true) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_builder_proposals(0, false) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_builder_proposals(0, false) + .await + }); +} + #[test] fn keystore_validator_creation() { let runtime = build_runtime(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 530993ee05..c3b5f0bb90 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -39,6 +39,8 @@ fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorReq description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: pubkey, url: web3_signer_url(), root_certificate_path: None, @@ -465,7 +467,7 @@ fn import_and_delete_conflicting_web3_signer_keystores() { for pubkey in &pubkeys { tester .client - .patch_lighthouse_validators(pubkey, false) + .patch_lighthouse_validators(pubkey, Some(false), None, None) .await .unwrap(); } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 836aab4c1f..146d008a57 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -11,7 +11,9 @@ pub const UNREGISTERED: &str = "unregistered"; pub const FULL_UPDATE: &str = "full_update"; pub const BEACON_BLOCK: &str = "beacon_block"; pub const BEACON_BLOCK_HTTP_GET: &str = "beacon_block_http_get"; +pub const BLINDED_BEACON_BLOCK_HTTP_GET: &str = "blinded_beacon_block_http_get"; pub const BEACON_BLOCK_HTTP_POST: &str = "beacon_block_http_post"; +pub const BLINDED_BEACON_BLOCK_HTTP_POST: &str = "blinded_beacon_block_http_post"; pub const ATTESTATIONS: &str = "attestations"; pub const ATTESTATIONS_HTTP_GET: &str = "attestations_http_get"; pub const ATTESTATIONS_HTTP_POST: &str = "attestations_http_post"; diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 8069bfcab8..66a621eb77 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -110,6 +110,8 @@ pub struct InitializedValidator { signing_method: Arc<SigningMethod>, graffiti: Option<Graffiti>, suggested_fee_recipient: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, /// The validators index in `state.validators`, to be updated by an external service. index: Option<u64>, } @@ -129,6 +131,22 @@ impl InitializedValidator { SigningMethod::Web3Signer { .. } => None, } } + + pub fn get_suggested_fee_recipient(&self) -> Option<Address> { + self.suggested_fee_recipient + } + + pub fn get_gas_limit(&self) -> Option<u64> { + self.gas_limit + } + + pub fn get_builder_proposals(&self) -> Option<bool> { + self.builder_proposals + } + + pub fn get_index(&self) -> Option<u64> { + self.index + } } fn open_keystore(path: &Path) -> Result<Keystore, Error> { @@ -292,6 +310,8 @@ impl InitializedValidator { signing_method: Arc::new(signing_method), graffiti: def.graffiti.map(Into::into), suggested_fee_recipient: def.suggested_fee_recipient, + gas_limit: def.gas_limit, + builder_proposals: def.builder_proposals, index: None, }) } @@ -622,7 +642,28 @@ impl InitializedValidators { .and_then(|v| v.suggested_fee_recipient) } - /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled` values. + /// Returns the `gas_limit` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn gas_limit(&self, public_key: &PublicKeyBytes) -> Option<u64> { + self.validators.get(public_key).and_then(|v| v.gas_limit) + } + + /// Returns the `builder_proposals` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn builder_proposals(&self, public_key: &PublicKeyBytes) -> Option<bool> { + self.validators + .get(public_key) + .and_then(|v| v.builder_proposals) + } + + /// Returns an `Option` of a reference to an `InitializedValidator` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn validator(&self, public_key: &PublicKeyBytes) -> Option<&InitializedValidator> { + self.validators.get(public_key) + } + + /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, and `builder_proposals` + /// values. /// /// ## Notes /// @@ -630,11 +671,17 @@ impl InitializedValidators { /// disk. A newly enabled validator will be added to `self.validators`, whilst a newly disabled /// validator will be removed from `self.validators`. /// + /// If a `gas_limit` is included in the call to this function, it will also be updated and saved + /// to disk. If `gas_limit` is `None` the `gas_limit` *will not* be unset in `ValidatorDefinition` + /// or `InitializedValidator`. The same logic applies to `builder_proposals`. + /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub async fn set_validator_status( + pub async fn set_validator_definition_fields( &mut self, voting_public_key: &PublicKey, - enabled: bool, + enabled: Option<bool>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<(), Error> { if let Some(def) = self .definitions @@ -642,11 +689,33 @@ impl InitializedValidators { .iter_mut() .find(|def| def.voting_public_key == *voting_public_key) { - def.enabled = enabled; + // Don't overwrite fields if they are not set in this request. + if let Some(enabled) = enabled { + def.enabled = enabled; + } + if let Some(gas_limit) = gas_limit { + def.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + def.builder_proposals = Some(builder_proposals); + } } self.update_validators().await?; + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + // Don't overwrite fields if they are not set in this request. + if let Some(gas_limit) = gas_limit { + val.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + val.builder_proposals = Some(builder_proposals); + } + } + self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index f10142d614..bb7b296d23 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -362,7 +362,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context.eth2_config.spec.clone(), doppelganger_service.clone(), slot_clock.clone(), - config.fee_recipient, + &config, context.executor.clone(), log.clone(), )); @@ -413,7 +413,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .private_tx_proposals(config.private_tx_proposals) .strict_fee_recipient(config.strict_fee_recipient) .build()?; @@ -430,6 +429,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("preparation".into())) + .builder_registration_timestamp_override(config.builder_registration_timestamp_override) .build()?; let sync_committee_service = SyncCommitteeService::new( @@ -487,10 +487,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { self.preparation_service .clone() - .start_update_service( - self.config.private_tx_proposals, - &self.context.eth2_config.spec, - ) + .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start preparation service: {}", e))?; if let Some(doppelganger_service) = self.doppelganger_service.clone() { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 01dfc0ca04..b138d3e4ee 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -22,12 +22,16 @@ const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; /// Number of epochs to wait before re-submitting validator registration. const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; +/// The number of validator registrations to include per request to the beacon node. +const VALIDATOR_REGISTRATION_BATCH_SIZE: usize = 500; + /// Builds an `PreparationService`. pub struct PreparationServiceBuilder<T: SlotClock + 'static, E: EthSpec> { validator_store: Option<Arc<ValidatorStore<T, E>>>, slot_clock: Option<T>, beacon_nodes: Option<Arc<BeaconNodeFallback<T, E>>>, context: Option<RuntimeContext<E>>, + builder_registration_timestamp_override: Option<u64>, } impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { @@ -37,6 +41,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { slot_clock: None, beacon_nodes: None, context: None, + builder_registration_timestamp_override: None, } } @@ -60,6 +65,14 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { self } + pub fn builder_registration_timestamp_override( + mut self, + builder_registration_timestamp_override: Option<u64>, + ) -> Self { + self.builder_registration_timestamp_override = builder_registration_timestamp_override; + self + } + pub fn build(self) -> Result<PreparationService<T, E>, String> { Ok(PreparationService { inner: Arc::new(Inner { @@ -75,6 +88,8 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { context: self .context .ok_or("Cannot build PreparationService without runtime_context")?, + builder_registration_timestamp_override: self + .builder_registration_timestamp_override, validator_registration_cache: RwLock::new(HashMap::new()), }), }) @@ -87,6 +102,7 @@ pub struct Inner<T, E: EthSpec> { slot_clock: T, beacon_nodes: Arc<BeaconNodeFallback<T, E>>, context: RuntimeContext<E>, + builder_registration_timestamp_override: Option<u64>, // Used to track unpublished validator registration changes. validator_registration_cache: RwLock<HashMap<ValidatorRegistrationKey, SignedValidatorRegistrationData>>, @@ -137,14 +153,8 @@ impl<T, E: EthSpec> Deref for PreparationService<T, E> { } impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { - pub fn start_update_service( - self, - start_registration_service: bool, - spec: &ChainSpec, - ) -> Result<(), String> { - if start_registration_service { - self.clone().start_validator_registration_service(spec)?; - } + pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + self.clone().start_validator_registration_service(spec)?; self.start_proposer_prepare_service(spec) } @@ -208,7 +218,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { let validator_registration_fut = async move { loop { // Poll the endpoint immediately to ensure fee recipients are received. - if let Err(e) = self.register_validators(&spec).await { + if let Err(e) = self.register_validators().await { error!(log,"Error during validator registration";"error" => ?e); } @@ -251,35 +261,48 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { } fn collect_preparation_data(&self, spec: &ChainSpec) -> Vec<ProposerPreparationData> { - self.collect_data(spec, |_, validator_index, fee_recipient| { - ProposerPreparationData { - validator_index, - fee_recipient, - } - }) - } - - fn collect_validator_registration_keys( - &self, - spec: &ChainSpec, - ) -> Vec<ValidatorRegistrationKey> { - self.collect_data(spec, |pubkey, _, fee_recipient| { - ValidatorRegistrationKey { - fee_recipient, - //TODO(sean) this is geth's default, we should make this configurable and maybe have the default be dynamic. - // Discussion here: https://github.com/ethereum/builder-specs/issues/17 - gas_limit: 30_000_000, - pubkey, - } - }) - } - - fn collect_data<G, U>(&self, spec: &ChainSpec, map_fn: G) -> Vec<U> - where - G: Fn(PublicKeyBytes, u64, Address) -> U, - { let log = self.context.log(); + self.collect_proposal_data(|pubkey, proposal_data| { + if let Some(fee_recipient) = proposal_data.fee_recipient { + Some(ProposerPreparationData { + // Ignore fee recipients for keys without indices, they are inactive. + validator_index: proposal_data.validator_index?, + fee_recipient, + }) + } else { + if spec.bellatrix_fork_epoch.is_some() { + error!( + log, + "Validator is missing fee recipient"; + "msg" => "update validator_definitions.yml", + "pubkey" => ?pubkey + ); + } + None + } + }) + } + fn collect_validator_registration_keys(&self) -> Vec<ValidatorRegistrationKey> { + self.collect_proposal_data(|pubkey, proposal_data| { + // We don't log for missing fee recipients here because this will be logged more + // frequently in `collect_preparation_data`. + proposal_data.fee_recipient.and_then(|fee_recipient| { + proposal_data + .builder_proposals + .then(|| ValidatorRegistrationKey { + fee_recipient, + gas_limit: proposal_data.gas_limit, + pubkey, + }) + }) + }) + } + + fn collect_proposal_data<G, U>(&self, map_fn: G) -> Vec<U> + where + G: Fn(PublicKeyBytes, ProposalData) -> Option<U>, + { let all_pubkeys: Vec<_> = self .validator_store .voting_pubkeys(DoppelgangerStatus::ignored); @@ -287,23 +310,8 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { all_pubkeys .into_iter() .filter_map(|pubkey| { - // Ignore fee recipients for keys without indices, they are inactive. - let validator_index = self.validator_store.validator_index(&pubkey)?; - let fee_recipient = self.validator_store.get_fee_recipient(&pubkey); - - if let Some(fee_recipient) = fee_recipient { - Some(map_fn(pubkey, validator_index, fee_recipient)) - } else { - if spec.bellatrix_fork_epoch.is_some() { - error!( - log, - "Validator is missing fee recipient"; - "msg" => "update validator_definitions.yml", - "pubkey" => ?pubkey - ); - } - None - } + let proposal_data = self.validator_store.proposal_data(&pubkey)?; + map_fn(pubkey, proposal_data) }) .collect() } @@ -341,8 +349,8 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { } /// Register validators with builders, used in the blinded block proposal flow. - async fn register_validators(&self, spec: &ChainSpec) -> Result<(), String> { - let registration_keys = self.collect_validator_registration_keys(spec); + async fn register_validators(&self) -> Result<(), String> { + let registration_keys = self.collect_validator_registration_keys(); let mut changed_keys = vec![]; @@ -388,10 +396,15 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { let signed_data = if let Some(signed_data) = cached_registration_opt { signed_data } else { - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("{e:?}"))? - .as_secs(); + let timestamp = + if let Some(timestamp) = self.builder_registration_timestamp_override { + timestamp + } else { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("{e:?}"))? + .as_secs() + }; let ValidatorRegistrationKey { fee_recipient, @@ -426,29 +439,35 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { } if !signed.is_empty() { - let signed_ref = signed.as_slice(); - - match self - .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_validator_register_validator(signed_ref) - .await - }) - .await - { - Ok(()) => debug!( - log, - "Published validator registration"; - "count" => registration_data_len, - ), - Err(e) => error!( - log, - "Unable to publish validator registration"; - "error" => %e, - ), + for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) { + match self + .beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node.post_validator_register_validator(batch).await + }) + .await + { + Ok(()) => info!( + log, + "Published validator registrations to the builder network"; + "count" => registration_data_len, + ), + Err(e) => error!( + log, + "Unable to publish validator registrations to the builder network"; + "error" => %e, + ), + } } } Ok(()) } } + +/// A helper struct, used for passing data from the validator store to services. +pub struct ProposalData { + pub(crate) validator_index: Option<u64>, + pub(crate) fee_recipient: Option<Address>, + pub(crate) gas_limit: u64, + pub(crate) builder_proposals: bool, +} diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index de39f91264..f883d0201f 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -3,6 +3,7 @@ use crate::{ http_metrics::metrics, initialized_validators::InitializedValidators, signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}, + Config, }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::{Mutex, RwLock}; @@ -27,6 +28,7 @@ use types::{ use validator_dir::ValidatorDir; pub use crate::doppelganger_service::DoppelgangerStatus; +use crate::preparation_service::ProposalData; #[derive(Debug, PartialEq)] pub enum Error { @@ -52,6 +54,11 @@ impl From<SigningError> for Error { /// This acts as a maximum safe-guard against clock drift. const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; +/// Currently used as the default gas limit in execution clients. +/// +/// https://github.com/ethereum/builder-specs/issues/17 +const DEFAULT_GAS_LIMIT: u64 = 30_000_000; + struct LocalValidator { validator_dir: ValidatorDir, voting_keypair: Keypair, @@ -87,6 +94,8 @@ pub struct ValidatorStore<T, E: EthSpec> { doppelganger_service: Option<Arc<DoppelgangerService>>, slot_clock: T, fee_recipient_process: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: bool, task_executor: TaskExecutor, _phantom: PhantomData<E>, } @@ -102,7 +111,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { spec: ChainSpec, doppelganger_service: Option<Arc<DoppelgangerService>>, slot_clock: T, - fee_recipient_process: Option<Address>, + config: &Config, task_executor: TaskExecutor, log: Logger, ) -> Self { @@ -115,7 +124,9 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { log, doppelganger_service, slot_clock, - fee_recipient_process, + fee_recipient_process: config.fee_recipient, + gas_limit: config.gas_limit, + builder_proposals: config.builder_proposals, task_executor, _phantom: PhantomData, } @@ -146,6 +157,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. + #[allow(clippy::too_many_arguments)] pub async fn add_validator_keystore<P: AsRef<Path>>( &self, voting_keystore_path: P, @@ -153,12 +165,16 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { enable: bool, graffiti: Option<GraffitiString>, suggested_fee_recipient: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<ValidatorDefinition, String> { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, Some(password), graffiti.map(Into::into), suggested_fee_recipient, + gas_limit, + builder_proposals, ) .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; @@ -200,6 +216,23 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { Ok(validator_def) } + /// Returns `ProposalData` for the provided `pubkey` if it exists in `InitializedValidators`. + /// `ProposalData` fields include defaulting logic described in `get_fee_recipient_defaulting`, + /// `get_gas_limit_defaulting`, and `get_builder_proposals_defaulting`. + pub fn proposal_data(&self, pubkey: &PublicKeyBytes) -> Option<ProposalData> { + self.validators + .read() + .validator(pubkey) + .map(|validator| ProposalData { + validator_index: validator.get_index(), + fee_recipient: self + .get_fee_recipient_defaulting(validator.get_suggested_fee_recipient()), + gas_limit: self.get_gas_limit_defaulting(validator.get_gas_limit()), + builder_proposals: self + .get_builder_proposals_defaulting(validator.get_builder_proposals()), + }) + } + /// Attempts to resolve the pubkey to a validator index. /// /// It may return `None` if the `pubkey` is: @@ -366,9 +399,12 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { pub fn get_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option<Address> { // If there is a `suggested_fee_recipient` in the validator definitions yaml // file, use that value. - self.suggested_fee_recipient(validator_pubkey) - // If there's nothing in the file, try the process-level default value. - .or(self.fee_recipient_process) + self.get_fee_recipient_defaulting(self.suggested_fee_recipient(validator_pubkey)) + } + + pub fn get_fee_recipient_defaulting(&self, fee_recipient: Option<Address>) -> Option<Address> { + // If there's nothing in the file, try the process-level default value. + fee_recipient.or(self.fee_recipient_process) } /// Returns the suggested_fee_recipient from `validator_definitions.yml` if any. @@ -379,6 +415,45 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { .suggested_fee_recipient(validator_pubkey) } + /// Returns the gas limit for the given public key. The priority order for fetching + /// the gas limit is: + /// + /// 1. validator_definitions.yml + /// 2. process level gas limit + /// 3. `DEFAULT_GAS_LIMIT` + pub fn get_gas_limit(&self, validator_pubkey: &PublicKeyBytes) -> u64 { + self.get_gas_limit_defaulting(self.validators.read().gas_limit(validator_pubkey)) + } + + fn get_gas_limit_defaulting(&self, gas_limit: Option<u64>) -> u64 { + // If there is a `gas_limit` in the validator definitions yaml + // file, use that value. + gas_limit + // If there's nothing in the file, try the process-level default value. + .or(self.gas_limit) + // If there's no process-level default, use the `DEFAULT_GAS_LIMIT`. + .unwrap_or(DEFAULT_GAS_LIMIT) + } + + /// Returns a `bool` for the given public key that denotes whther this validator should use the + /// builder API. The priority order for fetching this value is: + /// + /// 1. validator_definitions.yml + /// 2. process level flag + pub fn get_builder_proposals(&self, validator_pubkey: &PublicKeyBytes) -> bool { + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + self.get_builder_proposals_defaulting( + self.validators.read().builder_proposals(validator_pubkey), + ) + } + + fn get_builder_proposals_defaulting(&self, builder_proposals: Option<bool>) -> bool { + builder_proposals + // If there's nothing in the file, try the process-level default value. + .unwrap_or(self.builder_proposals) + } + pub async fn sign_block<Payload: ExecPayload<E>>( &self, validator_pubkey: PublicKeyBytes, From 034260bd99460d2f2aa7dfdeea277704ecfa908c Mon Sep 17 00:00:00 2001 From: ethDreamer <mark@sigmaprime.io> Date: Sat, 30 Jul 2022 00:22:38 +0000 Subject: [PATCH 101/184] Initial Commit of Retrospective OTB Verification (#3372) ## Issue Addressed * #2983 ## Proposed Changes Basically followed the [instructions laid out here](https://github.com/sigp/lighthouse/issues/2983#issuecomment-1062494947) Co-authored-by: Paul Hauner <paul@paulhauner.com> Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 + .../beacon_chain/src/block_verification.rs | 4 +- .../beacon_chain/src/execution_payload.rs | 17 +- beacon_node/beacon_chain/src/lib.rs | 2 + .../src/otb_verification_service.rs | 378 ++++++++++++ .../tests/payload_invalidation.rs | 554 +++++++++++++++++- beacon_node/client/src/builder.rs | 2 + .../test_utils/execution_block_generator.rs | 1 + .../src/test_utils/handle_rpc.rs | 6 + .../execution_layer/src/test_utils/mod.rs | 14 +- beacon_node/store/src/lib.rs | 3 + beacon_node/store/src/memory_store.rs | 35 +- 12 files changed, 1013 insertions(+), 7 deletions(-) create mode 100644 beacon_node/beacon_chain/src/otb_verification_service.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 326d8b6c67..10506f3038 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -137,6 +137,9 @@ const MAX_PER_SLOT_FORK_CHOICE_DISTANCE: u64 = 4; pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = "Justified block has an invalid execution payload."; +pub const INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON: &str = + "Finalized merge transition block is invalid."; + /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. @@ -528,6 +531,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// Even more efficient variant of `forwards_iter_block_roots` that will avoid cloning the head /// state if it isn't required for the requested range of blocks. + /// The range [start_slot, end_slot] is inclusive (ie `start_slot <= end_slot`) pub fn forwards_iter_block_roots_until( &self, start_slot: Slot, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 0031bd2c6c..73330e7b56 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -44,7 +44,7 @@ //! ``` use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - PayloadNotifier, + AllowOptimisticImport, PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; @@ -1199,7 +1199,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no // calls to remote servers. if is_valid_merge_transition_block { - validate_merge_block(&chain, block.message()).await?; + validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; }; // The specification declares that this should be run *inside* `per_block_processing`, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index fade47e1d3..3c530aaac8 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -7,6 +7,7 @@ //! So, this module contains functions that one might expect to find in other crates, but they live //! here for good reason. +use crate::otb_verification_service::OptimisticTransitionBlock; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, @@ -27,6 +28,12 @@ use types::*; pub type PreparePayloadResult<Payload> = Result<Payload, BlockProductionError>; pub type PreparePayloadHandle<Payload> = JoinHandle<Option<PreparePayloadResult<Payload>>>; +#[derive(PartialEq)] +pub enum AllowOptimisticImport { + Yes, + No, +} + /// Used to await the result of executing payload with a remote EE. pub struct PayloadNotifier<T: BeaconChainTypes> { pub chain: Arc<BeaconChain<T>>, @@ -146,6 +153,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( pub async fn validate_merge_block<'a, T: BeaconChainTypes>( chain: &Arc<BeaconChain<T>>, block: BeaconBlockRef<'a, T::EthSpec>, + allow_optimistic_import: AllowOptimisticImport, ) -> Result<(), BlockError<T::EthSpec>> { let spec = &chain.spec; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -188,13 +196,18 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } .into()), None => { - if is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? { + if allow_optimistic_import == AllowOptimisticImport::Yes + && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? + { debug!( chain.log, - "Optimistically accepting terminal block"; + "Optimistically importing merge transition block"; "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); + // Store Optimistic Transition Block in Database for later Verification + OptimisticTransitionBlock::from_block(block) + .persist_in_store::<T, _>(&chain.store)?; Ok(()) } else { Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 57a1da9dc6..ed6c2459eb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -28,6 +28,7 @@ mod observed_aggregates; mod observed_attesters; mod observed_block_producers; pub mod observed_operations; +pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; @@ -45,6 +46,7 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs new file mode 100644 index 0000000000..805b61dd9c --- /dev/null +++ b/beacon_node/beacon_chain/src/otb_verification_service.rs @@ -0,0 +1,378 @@ +use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, +}; +use itertools::process_results; +use proto_array::InvalidationOperation; +use slog::{crit, debug, error, info, warn}; +use slot_clock::SlotClock; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use state_processing::per_block_processing::is_merge_transition_complete; +use std::sync::Arc; +use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; +use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::time::sleep; +use tree_hash::TreeHash; +use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; +use DBColumn::OptimisticTransitionBlock as OTBColumn; + +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct OptimisticTransitionBlock { + root: Hash256, + slot: Slot, +} + +impl OptimisticTransitionBlock { + // types::BeaconBlockRef<'_, <T as BeaconChainTypes>::EthSpec> + pub fn from_block<E: EthSpec>(block: BeaconBlockRef<E>) -> Self { + Self { + root: block.tree_hash_root(), + slot: block.slot(), + } + } + + pub fn root(&self) -> &Hash256 { + &self.root + } + + pub fn slot(&self) -> &Slot { + &self.slot + } + + pub fn persist_in_store<T, A>(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>, + { + if store + .as_ref() + .item_exists::<OptimisticTransitionBlock>(&self.root)? + { + Ok(()) + } else { + store.as_ref().put_item(&self.root, self) + } + } + + pub fn remove_from_store<T, A>(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>, + { + store + .as_ref() + .hot_db + .key_delete(OTBColumn.into(), self.root.as_bytes()) + } + + fn is_canonical<T: BeaconChainTypes>( + &self, + chain: &BeaconChain<T>, + ) -> Result<bool, BeaconChainError> { + Ok(chain + .forwards_iter_block_roots_until(self.slot, self.slot)? + .next() + .transpose()? + .map(|(root, _)| root) + == Some(self.root)) + } +} + +impl StoreItem for OptimisticTransitionBlock { + fn db_column() -> DBColumn { + OTBColumn + } + + fn as_store_bytes(&self) -> Vec<u8> { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> { + Ok(Self::from_ssz_bytes(bytes)?) + } +} + +/// The routine is expected to run once per epoch, 1/4th through the epoch. +pub const EPOCH_DELAY_FACTOR: u32 = 4; + +/// Spawns a routine which checks the validity of any optimistically imported transition blocks +/// +/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after +/// the start of each epoch. +/// +/// The service will not be started if there is no `execution_layer` on the `chain`. +pub fn start_otb_verification_service<T: BeaconChainTypes>( + executor: TaskExecutor, + chain: Arc<BeaconChain<T>>, +) { + // Avoid spawning the service if there's no EL, it'll just error anyway. + if chain.execution_layer.is_some() { + executor.spawn( + async move { otb_verification_service(chain).await }, + "otb_verification_service", + ); + } +} + +pub fn load_optimistic_transition_blocks<T: BeaconChainTypes>( + chain: &BeaconChain<T>, +) -> Result<Vec<OptimisticTransitionBlock>, StoreError> { + process_results(chain.store.hot_db.iter_column(OTBColumn), |iter| { + iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) + .collect() + })? +} + +#[derive(Debug)] +pub enum Error { + ForkChoice(String), + BeaconChain(BeaconChainError), + StoreError(StoreError), + NoBlockFound(OptimisticTransitionBlock), +} + +pub async fn validate_optimistic_transition_blocks<T: BeaconChainTypes>( + chain: &Arc<BeaconChain<T>>, + otbs: Vec<OptimisticTransitionBlock>, +) -> Result<(), Error> { + let finalized_slot = chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? + .slot; + + // separate otbs into + // non-canonical + // finalized canonical + // unfinalized canonical + let mut non_canonical_otbs = vec![]; + let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( + otbs.into_iter().map(|otb| { + otb.is_canonical(chain) + .map(|is_canonical| (otb, is_canonical)) + }), + |pair_iter| { + pair_iter + .filter_map(|(otb, is_canonical)| { + if is_canonical { + Some(otb) + } else { + non_canonical_otbs.push(otb); + None + } + }) + .partition::<Vec<_>, _>(|otb| *otb.slot() <= finalized_slot) + }, + ) + .map_err(Error::BeaconChain)?; + + // remove non-canonical blocks that conflict with finalized checkpoint from the database + for otb in non_canonical_otbs { + if *otb.slot() <= finalized_slot { + otb.remove_from_store::<T, _>(&chain.store) + .map_err(Error::StoreError)?; + } + } + + // ensure finalized canonical otb are valid, otherwise kill client + for otb in finalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::<T, _>(&chain.store) + .map_err(Error::StoreError)?; + info!( + chain.log, + "Validated merge transition block"; + "block_root" => ?otb.root(), + "type" => "finalized" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Finalized Merge Transition Block is Invalid! Kill the Client! + crit!( + chain.log, + "Finalized merge transition block is invalid!"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block.canonical_root() + ); + let mut shutdown_sender = chain.shutdown_sender(); + if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + )) { + crit!( + chain.log, + "Failed to shut down client"; + "error" => ?e, + "shutdown_reason" => INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON + ); + } + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + // attempt to validate any non-finalized canonical otb blocks + for otb in unfinalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::<T, _>(&chain.store) + .map_err(Error::StoreError)?; + info!( + chain.log, + "Validated merge transition block"; + "block_root" => ?otb.root(), + "type" => "not finalized" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload + warn!( + chain.log, + "Merge transition block invalid"; + "block_root" => ?otb.root() + ); + chain + .process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: *otb.root(), + }, + ) + .await + .map_err(|e| { + warn!( + chain.log, + "Error checking merge transition block"; + "error" => ?e, + "location" => "process_invalid_execution_payload" + ); + Error::BeaconChain(e) + })?; + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + Ok(()) +} + +/// Loop until any optimistically imported merge transition blocks have been verified and +/// the merge has been finalized. +async fn otb_verification_service<T: BeaconChainTypes>(chain: Arc<BeaconChain<T>>) { + let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; + loop { + match chain + .slot_clock + .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) + { + Some(duration) => { + let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; + sleep(duration + additional_delay).await; + + debug!( + chain.log, + "OTB verification service firing"; + ); + + if !is_merge_transition_complete( + &chain.canonical_head.cached_head().snapshot.beacon_state, + ) { + // We are pre-merge. Nothing to do yet. + continue; + } + + // load all optimistically imported transition blocks from the database + match load_optimistic_transition_blocks(chain.as_ref()) { + Ok(otbs) => { + if otbs.is_empty() { + if chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_or(false, |block| { + block.execution_status.is_execution_enabled() + }) + { + // there are no optimistic blocks in the database, we can exit + // the service since the merge transition is finalized and we'll + // never see another transition block + break; + } else { + debug!( + chain.log, + "No optimistic transition blocks"; + "info" => "waiting for the merge transition to finalize" + ) + } + } + if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { + warn!( + chain.log, + "Error while validating optimistic transition blocks"; + "error" => ?e + ); + } + } + Err(e) => { + error!( + chain.log, + "Error loading optimistic transition blocks"; + "error" => ?e + ); + } + }; + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(chain.slot_clock.slot_duration()).await; + } + }; + } + debug!( + chain.log, + "No optimistic transition blocks in database"; + "msg" => "shutting down OTB verification service" + ); +} diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 4107631378..df0c61f532 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,13 +1,19 @@ #![cfg(not(debug_assertions))] +use beacon_chain::otb_verification_service::{ + load_optimistic_transition_blocks, validate_optimistic_transition_blocks, + OptimisticTransitionBlock, +}; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkChoiceState, PayloadAttributes, }; use fork_choice::{ @@ -44,7 +50,11 @@ struct InvalidPayloadRig { impl InvalidPayloadRig { fn new() -> Self { - let mut spec = E::default_spec(); + let spec = E::default_spec(); + Self::new_with_spec(spec) + } + + fn new_with_spec(mut spec: ChainSpec) -> Self { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); @@ -1203,6 +1213,548 @@ async fn attesting_to_optimistic_head() { get_aggregated_by_slot_and_root().unwrap(); } +/// A helper struct to build out a chain of some configurable length which undergoes the merge +/// transition. +struct OptimisticTransitionSetup { + blocks: Vec<Arc<SignedBeaconBlock<E>>>, + execution_block_generator: ExecutionBlockGenerator<E>, +} + +impl OptimisticTransitionSetup { + async fn new(num_blocks: usize, ttd: u64) -> Self { + let mut spec = E::default_spec(); + spec.terminal_total_difficulty = ttd.into(); + let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); + rig.move_to_terminal_block(); + + let mut blocks = Vec::with_capacity(num_blocks); + for _ in 0..num_blocks { + let root = rig.import_block(Payload::Valid).await; + let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); + blocks.push(Arc::new(block)); + } + + let execution_block_generator = rig + .harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .execution_block_generator() + .clone(); + + Self { + blocks, + execution_block_generator, + } + } +} + +/// Build a chain which has optimistically imported a transition block. +/// +/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the +/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. +async fn build_optimistic_chain( + block_ttd: u64, + rig_ttd: u64, + num_blocks: usize, +) -> InvalidPayloadRig { + let OptimisticTransitionSetup { + blocks, + execution_block_generator, + } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; + // Build a brand-new testing harness. We will apply the blocks from the previous harness to + // this one. + let mut spec = E::default_spec(); + spec.terminal_total_difficulty = rig_ttd.into(); + let rig = InvalidPayloadRig::new_with_spec(spec); + + let spec = &rig.harness.chain.spec; + let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); + + // Ensure all the execution blocks from the first rig are available in the second rig. + *mock_execution_layer.server.execution_block_generator() = execution_block_generator; + + // Make the execution layer respond `SYNCING` to all `newPayload` requests. + mock_execution_layer + .server + .all_payloads_syncing_on_new_payload(true); + // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. + mock_execution_layer + .server + .all_payloads_syncing_on_forkchoice_updated(); + // Make the execution layer respond `None` to all `getBlockByHash` requests. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_none(); + + let current_slot = std::cmp::max( + blocks[0].slot() + spec.safe_slots_to_import_optimistically, + num_blocks.into(), + ); + rig.harness.set_current_slot(current_slot); + + for block in blocks { + rig.harness + .chain + .process_block(block, CountUnrealized::True) + .await + .unwrap(); + } + + rig.harness.chain.recompute_head_at_current_slot().await; + + // Make the execution layer respond normally to `getBlockByHash` requests. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + // Perform some sanity checks to ensure that the transition happened exactly where we expected. + let pre_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let pre_transition_block = rig + .harness + .chain + .get_block(&pre_transition_block_root) + .await + .unwrap() + .unwrap(); + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + assert_eq!( + pre_transition_block_root, + post_transition_block.parent_root(), + "the blocks form a single chain" + ); + assert!( + pre_transition_block + .message() + .body() + .execution_payload() + .unwrap() + .execution_payload + == <_>::default(), + "the block *has not* undergone the merge transition" + ); + assert!( + post_transition_block + .message() + .body() + .execution_payload() + .unwrap() + .execution_payload + != <_>::default(), + "the block *has* undergone the merge transition" + ); + + // Assert that the transition block was optimistically imported. + // + // Note: we're using the "fallback" check for optimistic status, so if the block was + // pre-finality then we'll just use the optimistic status of the finalized block. + assert!( + rig.harness + .chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_block(&post_transition_block_root) + .unwrap(), + "the transition block should be imported optimistically" + ); + + // Get the mock execution layer to respond to `getBlockByHash` requests normally again. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + return rig; +} + +#[tokio::test] +async fn optimistic_transition_block_valid_unfinalized() { + let ttd = 42; + let num_blocks = 16 as usize; + let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + valid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should validate fine"); + // now that the transition block has been validated, it should have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert!( + otbs.is_empty(), + "The valid optimistic transition block should have been removed from the database", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_valid_finalized() { + let ttd = 42; + let num_blocks = 130 as usize; + let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + > post_transition_block.slot(), + "the transition block should be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + valid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should validate fine"); + // now that the transition block has been validated, it should have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert!( + otbs.is_empty(), + "The valid optimistic transition block should have been removed from the database", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_unfinalized() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 22 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It shouldn't be known as invalid yet + assert!(!rig + .execution_status(post_transition_block_root) + .is_invalid()); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It should be marked invalid now + assert!(rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 22 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It shouldn't be known as invalid yet + assert!(!rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a + // syncing EE. + let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_none(); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + // It should still be marked as optimistic. + assert!(rig + .execution_status(post_transition_block_root) + .is_optimistic()); + + // the optimistic merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The optimistic merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It should be marked invalid now + assert!(rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_finalized() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 130 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + > post_transition_block.slot(), + "the transition block should be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered yet. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should invalidate merge transition block and shutdown the client"); + + // The beacon chain should have triggered a shutdown. + assert_eq!( + rig.harness.shutdown_reasons(), + vec![ShutdownReason::Failure( + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON + )] + ); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + /// Helper for running tests where we generate a chain with an invalid head and then some /// `fork_blocks` to recover it. struct InvalidHeadSetup { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b7f06183f1..4de28d8368 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,6 +1,7 @@ use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; +use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ @@ -728,6 +729,7 @@ where } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); + start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); } Ok(Client { diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 6935c88f22..3620a02dfb 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -105,6 +105,7 @@ pub struct PoWBlock { pub timestamp: u64, } +#[derive(Clone)] pub struct ExecutionBlockGenerator<T: EthSpec> { /* * Common database diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index eceb50df23..975f09fa5e 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -48,6 +48,12 @@ pub async fn handle_rpc<T: EthSpec>( s.parse() .map_err(|e| format!("unable to parse hash: {:?}", e)) })?; + + // If we have a static response set, just return that. + if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { + return Ok(serde_json::to_value(response).unwrap()); + } + let full_tx = params .get(1) .and_then(JsonValue::as_bool) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 2463153951..462e34e910 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -2,7 +2,7 @@ use crate::engine_api::auth::JwtKey; use crate::engine_api::{ - auth::Auth, http::JSONRPC_VERSION, PayloadStatusV1, PayloadStatusV1Status, + auth::Auth, http::JSONRPC_VERSION, ExecutionBlock, PayloadStatusV1, PayloadStatusV1Status, }; use bytes::Bytes; use environment::null_logger; @@ -96,6 +96,7 @@ impl<T: EthSpec> MockServer<T> { preloaded_responses, static_new_payload_response: <_>::default(), static_forkchoice_updated_response: <_>::default(), + static_get_block_by_hash_response: <_>::default(), _phantom: PhantomData, }); @@ -317,6 +318,16 @@ impl<T: EthSpec> MockServer<T> { self.set_forkchoice_updated_response(Self::invalid_terminal_block_status()); } + /// This will make the node appear like it is syncing. + pub fn all_get_block_by_hash_requests_return_none(&self) { + *self.ctx.static_get_block_by_hash_response.lock() = Some(None); + } + + /// The node will respond "naturally"; it will return blocks if they're known to it. + pub fn all_get_block_by_hash_requests_return_natural_value(&self) { + *self.ctx.static_get_block_by_hash_response.lock() = None; + } + /// Disables any static payload responses so the execution block generator will do its own /// verification. pub fn full_payload_verification(&self) { @@ -406,6 +417,7 @@ pub struct Context<T: EthSpec> { pub previous_request: Arc<Mutex<Option<serde_json::Value>>>, pub static_new_payload_response: Arc<Mutex<Option<StaticNewPayloadResponse>>>, pub static_forkchoice_updated_response: Arc<Mutex<Option<PayloadStatusV1>>>, + pub static_get_block_by_hash_response: Arc<Mutex<Option<Option<ExecutionBlock>>>>, pub _phantom: PhantomData<T>, } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 364bda2cc4..75aeca058b 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -208,6 +208,9 @@ pub enum DBColumn { BeaconRandaoMixes, #[strum(serialize = "dht")] DhtEnrs, + /// For Optimistically Imported Merge Transition Blocks + #[strum(serialize = "otb")] + OptimisticTransitionBlock, } /// A block from the database, which might have an execution payload or not. diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 3ff39c67f7..1473f59a4e 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,14 +1,17 @@ use super::{Error, ItemStore, KeyValueStore, KeyValueStoreOp}; +use crate::{ColumnIter, DBColumn}; use parking_lot::{Mutex, MutexGuard, RwLock}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use types::*; type DBHashMap = HashMap<Vec<u8>, Vec<u8>>; +type DBKeyMap = HashMap<Vec<u8>, HashSet<Vec<u8>>>; /// A thread-safe `HashMap` wrapper. pub struct MemoryStore<E: EthSpec> { db: RwLock<DBHashMap>, + col_keys: RwLock<DBKeyMap>, transaction_mutex: Mutex<()>, _phantom: PhantomData<E>, } @@ -18,6 +21,7 @@ impl<E: EthSpec> MemoryStore<E> { pub fn open() -> Self { Self { db: RwLock::new(HashMap::new()), + col_keys: RwLock::new(HashMap::new()), transaction_mutex: Mutex::new(()), _phantom: PhantomData, } @@ -41,6 +45,11 @@ impl<E: EthSpec> KeyValueStore<E> for MemoryStore<E> { fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().insert(column_key, val.to_vec()); + self.col_keys + .write() + .entry(col.as_bytes().to_vec()) + .or_insert_with(HashSet::new) + .insert(key.to_vec()); Ok(()) } @@ -63,6 +72,10 @@ impl<E: EthSpec> KeyValueStore<E> for MemoryStore<E> { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().remove(&column_key); + self.col_keys + .write() + .get_mut(&col.as_bytes().to_vec()) + .map(|set| set.remove(key)); Ok(()) } @@ -81,6 +94,26 @@ impl<E: EthSpec> KeyValueStore<E> for MemoryStore<E> { Ok(()) } + // pub type ColumnIter<'a> = Box<dyn Iterator<Item = Result<(Hash256, Vec<u8>), Error>> + 'a>; + fn iter_column(&self, column: DBColumn) -> ColumnIter { + let col = column.as_str(); + if let Some(keys) = self + .col_keys + .read() + .get(col.as_bytes()) + .map(|set| set.iter().cloned().collect::<Vec<_>>()) + { + Box::new(keys.into_iter().filter_map(move |key| { + let hash = Hash256::from_slice(&key); + self.get_bytes(col, &key) + .transpose() + .map(|res| res.map(|bytes| (hash, bytes))) + })) + } else { + Box::new(std::iter::empty()) + } + } + fn begin_rw_transaction(&self) -> MutexGuard<()> { self.transaction_mutex.lock() } From b3ce8d0de90a7571e315fe8fee6591b1decc4155 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Sat, 30 Jul 2022 00:22:39 +0000 Subject: [PATCH 102/184] Fix penalties in sync methods (#3384) ## Issue Addressed N/A ## Proposed Changes Uses the `penalize_peer` function added in #3350 in sync methods as well. The existing code in sync methods missed the `ExecutionPayloadError::UnverifiedNonOptimisticCandidate` case. --- .../src/beacon_processor/worker/sync_methods.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index a27ba7bfa0..3b2429ee9b 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -7,10 +7,10 @@ use crate::beacon_processor::DuplicateCache; use crate::metrics; use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; +use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; -use beacon_chain::{CountUnrealized, ExecutionPayloadError}; use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; use std::sync::Arc; @@ -467,24 +467,22 @@ impl<T: BeaconChainTypes> Worker<T> { mode: FailureMode::ConsensusLayer, }) } - BlockError::ExecutionPayloadError(e) => match &e { - ExecutionPayloadError::NoExecutionConnection { .. } - | ExecutionPayloadError::RequestFailed { .. } => { + ref err @ BlockError::ExecutionPayloadError(ref epe) => { + if !epe.penalize_peer() { // These errors indicate an issue with the EL and not the `ChainSegment`. // Pause the syncing while the EL recovers debug!(self.log, "Execution layer verification failed"; "outcome" => "pausing sync", - "err" => ?e + "err" => ?err ); Err(ChainSegmentFailed { - message: format!("Execution layer offline. Reason: {:?}", e), + message: format!("Execution layer offline. Reason: {:?}", err), // Do not penalize peers for internal errors. peer_action: None, mode: FailureMode::ExecutionLayer { pause_sync: true }, }) - } - err => { + } else { debug!(self.log, "Invalid execution payload"; "error" => ?err @@ -498,7 +496,7 @@ impl<T: BeaconChainTypes> Worker<T> { mode: FailureMode::ExecutionLayer { pause_sync: false }, }) } - }, + } other => { debug!( self.log, "Invalid block received"; From fdfdb9b57ced582cb1dbcdf1748243b589e19059 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Sat, 30 Jul 2022 00:22:41 +0000 Subject: [PATCH 103/184] Enable `count-unrealized` by default (#3389) ## Issue Addressed Enable https://github.com/sigp/lighthouse/pull/3322 by default on all networks. The feature can be opted out of using `--count-unrealized=false` (the CLI flag is updated to take a parameter). --- beacon_node/src/cli.rs | 7 ++++--- beacon_node/src/config.rs | 5 ++--- lighthouse/tests/beacon_node.rs | 31 +++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 3b78d8f684..5f205feeac 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -752,8 +752,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("count-unrealized") .long("count-unrealized") .hidden(true) - .help("**EXPERIMENTAL** Enables an alternative, potentially more performant FFG \ - vote tracking method.") - .takes_value(false) + .help("Enables an alternative, potentially more performant FFG \ + vote tracking method.") + .takes_value(true) + .default_value("true") ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 584a961958..6daee50de0 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -630,9 +630,8 @@ pub fn get_config<E: EthSpec>( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - if cli_args.is_present("count-unrealized") { - client_config.chain.count_unrealized = true; - } + client_config.chain.count_unrealized = + clap_utils::parse_required(cli_args, "count-unrealized")?; /* * Builder fallback configs. diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 1f6855cba4..0236ba6589 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -132,6 +132,37 @@ fn fork_choice_before_proposal_timeout_zero() { .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); } +#[test] +fn count_unrealized_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_no_arg() { + CommandLineTest::new() + .flag("count-unrealized", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_false() { + CommandLineTest::new() + .flag("count-unrealized", Some("false")) + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_true() { + CommandLineTest::new() + .flag("count-unrealized", Some("true")) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + #[test] fn freezer_dir_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); From bcfde6e7df41bc43b7e848abf9e8d742831e4cca Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Sat, 30 Jul 2022 05:08:57 +0000 Subject: [PATCH 104/184] Indicate that invalid blocks are optimistic (#3383) ## Issue Addressed NA ## Proposed Changes This PR will make Lighthouse return blocks with invalid payloads via the API with `execution_optimistic = true`. This seems a bit awkward, however I think it's better than returning a 404 or some other error. Let's consider the case where the only possible head is invalid (#3370 deals with this). In such a scenario all of the duties endpoints will start failing because the head is invalid. I think it would be better if the duties endpoints continue to work, because it's likely that even though the head is invalid the duties are still based upon valid blocks and we want the VC to have them cached. There's no risk to the VC here because we won't actually produce an attestation pointing to an invalid head. Ultimately, I don't think it's particularly important for us to distinguish between optimistic and invalid blocks on the API. Neither should be trusted and the only *real* reason that we track this is so we can try and fork around the invalid blocks. ## Additional Info - ~~Blocked on #3370~~ --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 +++++------ .../beacon_chain/src/canonical_head.rs | 10 +++-- .../tests/payload_invalidation.rs | 39 ++++++++++--------- beacon_node/http_api/src/attester_duties.rs | 4 +- beacon_node/http_api/src/block_id.rs | 10 ++--- beacon_node/http_api/src/lib.rs | 6 +-- beacon_node/http_api/src/proposer_duties.rs | 8 ++-- beacon_node/http_api/src/state_id.rs | 14 +++---- beacon_node/http_api/src/sync_committees.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 26 ++++++++----- .../src/proto_array_fork_choice.rs | 14 ++++++- 11 files changed, 91 insertions(+), 65 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 10506f3038..fec7fe25ff 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4131,8 +4131,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// Returns the value of `execution_optimistic` for `block`. /// /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. - /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. - pub fn is_optimistic_block<Payload: ExecPayload<T::EthSpec>>( + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has + /// `ExecutionStatus::Invalid`. + pub fn is_optimistic_or_invalid_block<Payload: ExecPayload<T::EthSpec>>( &self, block: &SignedBeaconBlock<T::EthSpec, Payload>, ) -> Result<bool, BeaconChainError> { @@ -4142,7 +4143,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } else { self.canonical_head .fork_choice_read_lock() - .is_optimistic_block(&block.canonical_root()) + .is_optimistic_or_invalid_block(&block.canonical_root()) .map_err(BeaconChainError::ForkChoiceError) } } @@ -4150,7 +4151,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// Returns the value of `execution_optimistic` for `head_block`. /// /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. - /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or `ExecutionStatus::Invalid`. /// /// This function will return an error if `head_block` is not present in the fork choice store /// and so should only be used on the head block or when the block *should* be present in the @@ -4158,7 +4159,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// /// There is a potential race condition when syncing where the block_root of `head_block` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_head_block<Payload: ExecPayload<T::EthSpec>>( + pub fn is_optimistic_or_invalid_head_block<Payload: ExecPayload<T::EthSpec>>( &self, head_block: &SignedBeaconBlock<T::EthSpec, Payload>, ) -> Result<bool, BeaconChainError> { @@ -4168,7 +4169,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } else { self.canonical_head .fork_choice_read_lock() - .is_optimistic_block_no_fallback(&head_block.canonical_root()) + .is_optimistic_or_invalid_block_no_fallback(&head_block.canonical_root()) .map_err(BeaconChainError::ForkChoiceError) } } @@ -4177,17 +4178,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// You can optionally provide `head_info` if it was computed previously. /// /// Returns `Ok(false)` if the head block is pre-Bellatrix, or has `ExecutionStatus::Valid`. - /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic`. + /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic` or `ExecutionStatus::Invalid`. /// /// There is a potential race condition when syncing where the block root of `head_info` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_head(&self) -> Result<bool, BeaconChainError> { + pub fn is_optimistic_or_invalid_head(&self) -> Result<bool, BeaconChainError> { self.canonical_head .head_execution_status() - .map(|status| status.is_optimistic()) + .map(|status| status.is_optimistic_or_invalid()) } - pub fn is_optimistic_block_root( + pub fn is_optimistic_or_invalid_block_root( &self, block_slot: Slot, block_root: &Hash256, @@ -4198,7 +4199,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } else { self.canonical_head .fork_choice_read_lock() - .is_optimistic_block_no_fallback(block_root) + .is_optimistic_or_invalid_block_no_fallback(block_root) .map_err(BeaconChainError::ForkChoiceError) } } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c37f266824..709382f05b 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -752,7 +752,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { ) -> Result<(), Error> { let old_snapshot = &old_cached_head.snapshot; let new_snapshot = &new_cached_head.snapshot; - let new_head_is_optimistic = new_head_proto_block.execution_status.is_optimistic(); + let new_head_is_optimistic = new_head_proto_block + .execution_status + .is_optimistic_or_invalid(); // Detect and potentially report any re-orgs. let reorg_distance = detect_reorg( @@ -883,7 +885,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { finalized_proto_block: ProtoBlock, ) -> Result<(), Error> { let new_snapshot = &new_cached_head.snapshot; - let finalized_block_is_optimistic = finalized_proto_block.execution_status.is_optimistic(); + let finalized_block_is_optimistic = finalized_proto_block + .execution_status + .is_optimistic_or_invalid(); self.op_pool .prune_all(&new_snapshot.beacon_state, self.epoch()?); @@ -1260,7 +1264,7 @@ fn observe_head_block_delays<E: EthSpec, S: SlotClock>( let block_time_set_as_head = timestamp_now(); let head_block_root = head_block.root; let head_block_slot = head_block.slot; - let head_block_is_optimistic = head_block.execution_status.is_optimistic(); + let head_block_is_optimistic = head_block.execution_status.is_optimistic_or_invalid(); // Calculate the total delay between the start of the slot and when it was set as head. let block_delay_total = get_slot_delay_ms(block_time_set_as_head, head_block_slot, slot_clock); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index df0c61f532..5e03ef2335 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -291,7 +291,7 @@ impl InvalidPayloadRig { let execution_status = self.execution_status(root.into()); match forkchoice_response { - Payload::Syncing => assert!(execution_status.is_optimistic()), + Payload::Syncing => assert!(execution_status.is_strictly_optimistic()), Payload::Valid => assert!(execution_status.is_valid_and_post_bellatrix()), Payload::Invalid { .. } | Payload::InvalidBlockHash @@ -421,7 +421,7 @@ async fn invalid_payload_invalidates_parent() { }) .await; - assert!(rig.execution_status(roots[0]).is_optimistic()); + assert!(rig.execution_status(roots[0]).is_strictly_optimistic()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); @@ -555,7 +555,7 @@ async fn pre_finalized_latest_valid_hash() { if slot == 1 { assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_optimistic()); + assert!(rig.execution_status(root).is_strictly_optimistic()); } } } @@ -605,7 +605,7 @@ async fn latest_valid_hash_will_not_validate() { } else if slot == 1 { assert!(execution_status.is_valid_and_post_bellatrix()) } else { - assert!(execution_status.is_optimistic()) + assert!(execution_status.is_strictly_optimistic()) } } } @@ -646,7 +646,7 @@ async fn latest_valid_hash_is_junk() { if slot == 1 { assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_optimistic()); + assert!(rig.execution_status(root).is_strictly_optimistic()); } } } @@ -734,7 +734,7 @@ async fn invalidates_all_descendants() { assert!(execution_status.is_valid_and_post_bellatrix()); } else if slot <= latest_valid_slot { // Blocks prior to and included the latest valid hash are not marked as valid. - assert!(execution_status.is_optimistic()); + assert!(execution_status.is_strictly_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -791,7 +791,9 @@ async fn switches_heads() { assert_eq!(rig.harness.head_block_root(), fork_block_root); // The fork block has not yet been validated. - assert!(rig.execution_status(fork_block_root).is_optimistic()); + assert!(rig + .execution_status(fork_block_root) + .is_strictly_optimistic()); for root in blocks { let slot = rig @@ -816,7 +818,7 @@ async fn switches_heads() { assert!(execution_status.is_valid_and_post_bellatrix()); } else if slot <= latest_valid_slot { // Blocks prior to and included the latest valid hash are not marked as valid. - assert!(execution_status.is_optimistic()); + assert!(execution_status.is_strictly_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -899,8 +901,8 @@ async fn manually_validate_child() { let parent = rig.import_block(Payload::Syncing).await; let child = rig.import_block(Payload::Syncing).await; - assert!(rig.execution_status(parent).is_optimistic()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(parent).is_strictly_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); rig.validate_manually(child); @@ -917,13 +919,13 @@ async fn manually_validate_parent() { let parent = rig.import_block(Payload::Syncing).await; let child = rig.import_block(Payload::Syncing).await; - assert!(rig.execution_status(parent).is_optimistic()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(parent).is_strictly_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); rig.validate_manually(parent); assert!(rig.execution_status(parent).is_valid_and_post_bellatrix()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); } #[tokio::test] @@ -1124,7 +1126,7 @@ async fn attesting_to_optimistic_head() { "the head should be the latest imported block" ); assert!( - rig.execution_status(root).is_optimistic(), + rig.execution_status(root).is_strictly_optimistic(), "the head should be optimistic" ); @@ -1371,7 +1373,7 @@ async fn build_optimistic_chain( .chain .canonical_head .fork_choice_read_lock() - .is_optimistic_block(&post_transition_block_root) + .is_optimistic_or_invalid_block(&post_transition_block_root) .unwrap(), "the transition block should be imported optimistically" ); @@ -1636,7 +1638,7 @@ async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { // It should still be marked as optimistic. assert!(rig .execution_status(post_transition_block_root) - .is_optimistic()); + .is_strictly_optimistic()); // the optimistic merge transition block should NOT have been removed from the database let otbs = load_optimistic_transition_blocks(&rig.harness.chain) @@ -1913,8 +1915,9 @@ async fn recover_from_invalid_head_after_persist_and_reboot() { .chain .canonical_head .fork_choice_read_lock() - .is_optimistic_block(&resumed_head.head_block_root()) - .unwrap(), + .get_block_execution_status(&resumed_head.head_block_root()) + .unwrap() + .is_strictly_optimistic(), "the invalid block should have become optimistic" ); } diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 6805d7104c..9febae5b19 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -68,7 +68,7 @@ fn cached_attestation_duties<T: BeaconChainTypes>( duties, request_indices, dependent_root, - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), chain, ) } @@ -95,7 +95,7 @@ fn compute_historic_attester_duties<T: BeaconChainTypes>( head.beacon_state_root(), head.beacon_state .clone_with(CloneConfig::committee_caches_only()), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), )) } else { None diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 91425e2f10..e418849040 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -33,7 +33,7 @@ impl BlockId { .map_err(warp_utils::reject::beacon_chain_error)?; Ok(( cached_head.head_block_root(), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), )) } CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), @@ -53,7 +53,7 @@ impl BlockId { } CoreBlockId::Slot(slot) => { let execution_optimistic = chain - .is_optimistic_head() + .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?; let root = chain .block_root_at_slot(*slot, WhenSlotSkipped::None) @@ -85,7 +85,7 @@ impl BlockId { let execution_optimistic = chain .canonical_head .fork_choice_read_lock() - .is_optimistic_block(root) + .is_optimistic_or_invalid_block(root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; Ok((*root, execution_optimistic)) @@ -112,7 +112,7 @@ impl BlockId { .map_err(warp_utils::reject::beacon_chain_error)?; Ok(( cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), )) } CoreBlockId::Slot(slot) => { @@ -167,7 +167,7 @@ impl BlockId { .map_err(warp_utils::reject::beacon_chain_error)?; Ok(( cached_head.snapshot.beacon_block.clone(), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), )) } CoreBlockId::Slot(slot) => { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a1b23c7f03..a8e305f3c1 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -894,7 +894,7 @@ pub fn serve<T: BeaconChainTypes>( ( cached_head.head_block_root(), cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), ) } // Only the parent root parameter, do a forwards-iterator lookup. @@ -1608,7 +1608,7 @@ pub fn serve<T: BeaconChainTypes>( chain .canonical_head .fork_choice_read_lock() - .is_optimistic_block(&root) + .is_optimistic_or_invalid_block(&root) .ok() } else { return Err(unsupported_version_rejection(endpoint_version)); @@ -1699,7 +1699,7 @@ pub fn serve<T: BeaconChainTypes>( let sync_distance = current_slot - head_slot; let is_optimistic = chain - .is_optimistic_head() + .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?; let syncing_data = api_types::SyncingData { diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 13788a07b2..877d64e20f 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -62,7 +62,7 @@ pub fn proposer_duties<T: BeaconChainTypes>( chain, request_epoch, dependent_root, - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), proposers, ) } else if request_epoch @@ -104,7 +104,7 @@ fn try_proposer_duties_from_cache<T: BeaconChainTypes>( .map_err(warp_utils::reject::beacon_state_error)?; let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); let execution_optimistic = chain - .is_optimistic_head_block(head_block) + .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::beacon_chain_error)?; let dependent_root = match head_epoch.cmp(&request_epoch) { @@ -168,7 +168,7 @@ fn compute_and_cache_proposer_duties<T: BeaconChainTypes>( chain, current_epoch, dependent_root, - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), indices, ) } @@ -194,7 +194,7 @@ fn compute_historic_proposer_duties<T: BeaconChainTypes>( head.beacon_state_root(), head.beacon_state .clone_with(CloneConfig::committee_caches_only()), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), )) } else { None diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index af47c242d6..051789c953 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -28,7 +28,7 @@ impl StateId { .map_err(warp_utils::reject::beacon_chain_error)?; return Ok(( cached_head.head_state_root(), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), )); } CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), @@ -45,7 +45,7 @@ impl StateId { CoreStateId::Slot(slot) => ( *slot, chain - .is_optimistic_head() + .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?, ), CoreStateId::Root(root) => { @@ -58,7 +58,7 @@ impl StateId { let execution_optimistic = chain .canonical_head .fork_choice_read_lock() - .is_optimistic_block_no_fallback(&hot_summary.latest_block_root) + .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; return Ok((*root, execution_optimistic)); @@ -74,7 +74,7 @@ impl StateId { .finalized_checkpoint .root; let execution_optimistic = fork_choice - .is_optimistic_block_no_fallback(&finalized_root) + .is_optimistic_or_invalid_block_no_fallback(&finalized_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; return Ok((*root, execution_optimistic)); @@ -133,7 +133,7 @@ impl StateId { .snapshot .beacon_state .clone_with_only_committee_caches(), - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), )); } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), @@ -198,7 +198,7 @@ impl StateId { .map_err(warp_utils::reject::beacon_chain_error)?; return func( &head.snapshot.beacon_state, - execution_status.is_optimistic(), + execution_status.is_optimistic_or_invalid(), ); } _ => self.state(chain)?, @@ -241,7 +241,7 @@ pub fn checkpoint_slot_and_execution_optimistic<T: BeaconChainTypes>( }; let execution_optimistic = fork_choice - .is_optimistic_block_no_fallback(root) + .is_optimistic_or_invalid_block_no_fallback(root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)?; diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 54a3e075d3..77becef7df 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -40,7 +40,7 @@ pub fn sync_committee_duties<T: BeaconChainTypes>( // Even when computing duties from state, any block roots pulled using the request epoch are // still dependent on the head. So using `is_optimistic_head` is fine for both cases. let execution_optimistic = chain - .is_optimistic_head() + .is_optimistic_or_invalid_head() .map_err(warp_utils::reject::beacon_chain_error)?; // Try using the head's sync committees to satisfy the request. This should be sufficient for diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c17c46a777..5438aaf62b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1269,34 +1269,40 @@ where .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } - /// Returns `Ok(true)` if `block_root` has been imported optimistically. That is, the - /// execution payload has not been verified. + /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. /// - /// Returns `Ok(false)` if `block_root`'s execution payload has been verfied, if it is a - /// pre-Bellatrix block or if it is before the PoW terminal block. + /// Returns `Ok(false)` if `block_root`'s execution payload has been elected as fully VALID, if + /// it is a pre-Bellatrix block or if it is before the PoW terminal block. /// /// In the case where the block could not be found in fork-choice, it returns the /// `execution_status` of the current finalized block. /// /// This function assumes the `block_root` exists. - pub fn is_optimistic_block(&self, block_root: &Hash256) -> Result<bool, Error<T::Error>> { + pub fn is_optimistic_or_invalid_block( + &self, + block_root: &Hash256, + ) -> Result<bool, Error<T::Error>> { if let Some(status) = self.get_block_execution_status(block_root) { - Ok(status.is_optimistic()) + Ok(status.is_optimistic_or_invalid()) } else { - Ok(self.get_finalized_block()?.execution_status.is_optimistic()) + Ok(self + .get_finalized_block()? + .execution_status + .is_optimistic_or_invalid()) } } /// The same as `is_optimistic_block` but does not fallback to `self.get_finalized_block` /// when the block cannot be found. /// - /// Intended to be used when checking if the head has been imported optimistically. - pub fn is_optimistic_block_no_fallback( + /// Intended to be used when checking if the head has been imported optimistically or is + /// invalid. + pub fn is_optimistic_or_invalid_block_no_fallback( &self, block_root: &Hash256, ) -> Result<bool, Error<T::Error>> { if let Some(status) = self.get_block_execution_status(block_root) { - Ok(status.is_optimistic()) + Ok(status.is_optimistic_or_invalid()) } else { Err(Error::MissingProtoArrayBlock(*block_root)) } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 3ecdc68a2e..306c986018 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -89,10 +89,22 @@ impl ExecutionStatus { /// /// - Has execution enabled, AND /// - Has a payload that has not yet been verified by an EL. - pub fn is_optimistic(&self) -> bool { + pub fn is_strictly_optimistic(&self) -> bool { matches!(self, ExecutionStatus::Optimistic(_)) } + /// Returns `true` if the block: + /// + /// - Has execution enabled, AND + /// - Has a payload that has not yet been verified by an EL, OR. + /// - Has a payload that has been deemed invalid by an EL. + pub fn is_optimistic_or_invalid(&self) -> bool { + matches!( + self, + ExecutionStatus::Optimistic(_) | ExecutionStatus::Invalid(_) + ) + } + /// Returns `true` if the block: /// /// - Has execution enabled, AND From 2983235650811437b44199f9c94e517e948a1e9b Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Mon, 1 Aug 2022 03:41:08 +0000 Subject: [PATCH 105/184] v2.5.0 (#3392) ## Issue Addressed NA ## Proposed Changes Bump versions. ## Additional Info - ~~Blocked on #3383~~ - ~~Awaiting further testing.~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a93bd7fd5e..c42dd38ac6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.4.0" +version = "2.5.0" dependencies = [ "beacon_chain", "clap", @@ -593,7 +593,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.4.0" +version = "2.5.0" dependencies = [ "beacon_node", "clap", @@ -3102,7 +3102,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.4.0" +version = "2.5.0" dependencies = [ "account_utils", "bls", @@ -3601,7 +3601,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.4.0" +version = "2.5.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 73e5ad65cc..ef430c2bc3 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.4.0" +version = "2.5.0" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] edition = "2021" diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 88651023f2..8523237c69 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.4.0" +version = "2.5.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5e2862951e..7ba1afac60 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.4.0-", - fallback = "Lighthouse/v2.4.0" + prefix = "Lighthouse/v2.5.0-", + fallback = "Lighthouse/v2.5.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index ddf0cdc8cb..dfc8aac7bd 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.4.0" +version = "2.5.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 8c6f7524b9..da4ca81884 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.4.0" +version = "2.5.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false From 18383a63b275e456774c2c8e03822c22111dd613 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 1 Aug 2022 07:20:43 +0000 Subject: [PATCH 106/184] Tidy eth1/deposit contract logging (#3397) ## Issue Addressed Fixes an issue identified by @remyroy whereby we were logging a recommendation to use `--eth1-endpoints` on merge-ready setups (when the execution layer was out of sync). ## Proposed Changes I took the opportunity to clean up the other eth1-related logs, replacing "eth1" by "deposit contract" or "execution" as appropriate. I've downgraded the severity of the `CRIT` log to `ERRO` and removed most of the recommendation text. The reason being that users lacking an execution endpoint will be informed by the new `WARN Not merge ready` log pre-Bellatrix, or the regular errors from block verification post-Bellatrix. --- beacon_node/client/src/builder.rs | 2 +- beacon_node/client/src/notifier.rs | 4 +- beacon_node/eth1/src/service.rs | 63 +++++++++++++----------------- book/src/faq.md | 6 +-- common/fallback/src/lib.rs | 2 +- 5 files changed, 35 insertions(+), 42 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 4de28d8368..d4c41244d2 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -851,7 +851,7 @@ where .runtime_context .as_ref() .ok_or("caching_eth1_backend requires a runtime_context")? - .service_context("eth1_rpc".into()); + .service_context("deposit_contract_rpc".into()); let beacon_chain_builder = self .beacon_chain_builder .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 74947c16f5..9f82cd2012 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -441,14 +441,14 @@ fn eth1_logging<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>, log: &Logger warn!( log, - "Syncing eth1 block cache"; + "Syncing deposit contract block cache"; "est_blocks_remaining" => distance, ); } } else { error!( log, - "Unable to determine eth1 sync status"; + "Unable to determine deposit contract sync status"; ); } } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 6f40015fac..a4d4e5e254 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -14,7 +14,7 @@ use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, warn, Logger}; +use slog::{debug, error, info, trace, warn, Logger}; use std::fmt::Debug; use std::future::Future; use std::ops::{Range, RangeInclusive}; @@ -39,8 +39,6 @@ const GET_BLOCK_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; /// Timeout when doing an eth_getLogs to read the deposit contract logs. const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = 60_000; -const WARNING_MSG: &str = "BLOCK PROPOSALS WILL FAIL WITHOUT VALID, SYNCED ETH1 CONNECTION"; - /// Number of blocks to download if the node detects it is lagging behind due to an inaccurate /// relationship between block-number-based follow distance and time-based follow distance. const CATCHUP_BATCH_SIZE: u64 = 128; @@ -202,7 +200,7 @@ async fn endpoint_state( if chain_id == Eth1Id::Custom(0) { warn!( log, - "Remote eth1 node is not synced"; + "Remote execution node is not synced"; "endpoint" => %endpoint, "action" => "trying fallbacks" ); @@ -211,11 +209,11 @@ async fn endpoint_state( if &chain_id != config_chain_id { warn!( log, - "Invalid eth1 chain id. Please switch to correct chain id on endpoint"; + "Invalid execution chain ID. Please switch to correct chain ID on endpoint"; "endpoint" => %endpoint, "action" => "trying fallbacks", - "expected" => format!("{:?}",config_chain_id), - "received" => format!("{:?}", chain_id), + "expected" => ?config_chain_id, + "received" => ?chain_id, ); Err(EndpointError::WrongChainId) } else { @@ -252,7 +250,7 @@ async fn get_remote_head_and_new_block_ranges( if remote_head_block.timestamp + node_far_behind_seconds < now { warn!( service.log, - "Eth1 endpoint is not synced"; + "Execution endpoint is not synced"; "endpoint" => %endpoint, "last_seen_block_unix_timestamp" => remote_head_block.timestamp, "action" => "trying fallback" @@ -264,7 +262,7 @@ async fn get_remote_head_and_new_block_ranges( if let SingleEndpointError::RemoteNotSynced { .. } = e { warn!( service.log, - "Eth1 endpoint is not synced"; + "Execution endpoint is not synced"; "endpoint" => %endpoint, "action" => "trying fallbacks" ); @@ -749,15 +747,11 @@ impl Service { .iter() .all(|error| matches!(error, SingleEndpointError::EndpointError(_))) { - crit!( + error!( self.log, - "Could not connect to a suitable eth1 node. Please ensure that you have \ - an eth1 http server running locally on http://localhost:8545 or specify \ - one or more (remote) endpoints using \ - `--eth1-endpoints <COMMA-SEPARATED-SERVER-ADDRESSES>`. \ - Also ensure that `eth` and `net` apis are enabled on the eth1 http \ - server"; - "warning" => WARNING_MSG + "No synced execution endpoint"; + "advice" => "ensure you have an execution node configured via \ + --execution-endpoint or if pre-merge, --eth1-endpoints" ); } } @@ -778,12 +772,7 @@ impl Service { get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await }) .await - .map_err(|e| { - format!( - "Failed to update Eth1 service: {:?}", - process_single_err(&e) - ) - })?; + .map_err(|e| format!("{:?}", process_single_err(&e)))?; if num_errors > 0 { info!(self.log, "Fetched data from fallback"; "fallback_number" => num_errors); @@ -815,16 +804,15 @@ impl Service { deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); } - let outcome = outcome_result.map_err(|e| { - format!("Failed to update eth1 deposit cache: {:?}", process_err(e)) - })?; + let outcome = outcome_result + .map_err(|e| format!("Failed to update deposit cache: {:?}", process_err(e)))?; trace!( self.log, - "Updated eth1 deposit cache"; + "Updated deposit cache"; "cached_deposits" => self.inner.deposit_cache.read().cache.len(), "logs_imported" => outcome.logs_imported, - "last_processed_eth1_block" => self.inner.deposit_cache.read().last_processed_block, + "last_processed_execution_block" => self.inner.deposit_cache.read().last_processed_block, ); Ok::<_, String>(outcome) }; @@ -833,11 +821,16 @@ impl Service { let outcome = self .update_block_cache(Some(new_block_numbers_block_cache), &endpoints) .await - .map_err(|e| format!("Failed to update eth1 block cache: {:?}", process_err(e)))?; + .map_err(|e| { + format!( + "Failed to update deposit contract block cache: {:?}", + process_err(e) + ) + })?; trace!( self.log, - "Updated eth1 block cache"; + "Updated deposit contract block cache"; "cached_blocks" => self.inner.block_cache.read().len(), "blocks_imported" => outcome.blocks_imported, "head_block" => outcome.head_block_number, @@ -890,13 +883,13 @@ impl Service { match update_result { Err(e) => error!( self.log, - "Failed to update eth1 cache"; + "Error updating deposit contract cache"; "retry_millis" => update_interval.as_millis(), "error" => e, ), Ok((deposit, block)) => debug!( self.log, - "Updated eth1 cache"; + "Updated deposit contract cache"; "retry_millis" => update_interval.as_millis(), "blocks" => format!("{:?}", block), "deposits" => format!("{:?}", deposit), @@ -1180,7 +1173,7 @@ impl Service { debug!( self.log, - "Downloading eth1 blocks"; + "Downloading execution blocks"; "first" => ?required_block_numbers.first(), "last" => ?required_block_numbers.last(), ); @@ -1243,7 +1236,7 @@ impl Service { if blocks_imported > 0 { debug!( self.log, - "Imported eth1 block(s)"; + "Imported execution block(s)"; "latest_block_age" => latest_block_mins, "latest_block" => block_cache.highest_block_number(), "total_cached_blocks" => block_cache.len(), @@ -1252,7 +1245,7 @@ impl Service { } else { debug!( self.log, - "No new eth1 blocks imported"; + "No new execution blocks imported"; "latest_block" => block_cache.highest_block_number(), "cached_blocks" => block_cache.len(), ); diff --git a/book/src/faq.md b/book/src/faq.md index e14947fb05..6692d61495 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -6,7 +6,7 @@ - [What should I do if I lose my slashing protection database?](#what-should-i-do-if-i-lose-my-slashing-protection-database) - [How do I update lighthouse?](#how-do-i-update-lighthouse) - [I can't compile lighthouse](#i-cant-compile-lighthouse) -- [What is "Syncing eth1 block cache"](#what-is-syncing-eth1-block-cache) +- [What is "Syncing deposit contract block cache"](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) - [How can I monitor my validators](#how-can-i-monitor-my-validators) @@ -154,10 +154,10 @@ You will just also need to make sure the code you have checked out is up to date See [here.](./installation-source.md#troubleshooting) -### What is "Syncing eth1 block cache" +### What is "Syncing deposit contract block cache" ``` -Nov 30 21:04:28.268 WARN Syncing eth1 block cache est_blocks_remaining: initializing deposits, service: slot_notifier +Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier ``` This log indicates that your beacon node is downloading blocks and deposits diff --git a/common/fallback/src/lib.rs b/common/fallback/src/lib.rs index d91de09be0..70f327d204 100644 --- a/common/fallback/src/lib.rs +++ b/common/fallback/src/lib.rs @@ -45,7 +45,7 @@ impl<T> Fallback<T> { { match error { FallbackError::AllErrored(v) => format!( - "All fallback errored: {}", + "All fallbacks errored: {}", join( zip(self.servers.iter().map(f), v.iter()) .map(|(server, error)| format!("{} => {:?}", server, error)), From 3b056232d8eb90a26485fcde59d4f699654fc7b5 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Tue, 2 Aug 2022 00:58:23 +0000 Subject: [PATCH 107/184] Add list of DB migrations to docs (#3399) ## Proposed Changes Add a list of schema version changes to the book. I hope this will be helpful for users upgrading to v2.5.0, to know that they can downgrade to schema v9 to run v2.3.0/v2.4.0 or to schema v8 to run v2.2.0/v2.1.0. --- book/src/database-migrations.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index ce7ff21328..6bbe1345d3 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -12,15 +12,29 @@ command for applying database downgrades. **Everything on this page applies to the Lighthouse _beacon node_, not to the validator client or the slasher**. +## List of schema versions + +| Lighthouse version | Release date | Schema version | Downgrade available? | +|--------------------|--------------|----------------|----------------------| +| v2.0.0 | Oct 2021 | v5 | no | +| v2.1.0 | Jan 2022 | v8 | no | +| v2.2.0 | Apr 2022 | v8 | no | +| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) | +| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | +| v2.5.0 | Aug 2022 | v11 | yes | + +> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release +> (e.g. v2.3.0). + ## How to apply a database downgrade To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. 1. Make sure you have a copy of the latest version of Lighthouse. This will be the version that knows about the latest schema change, and has the ability to revert it. -2. Work out the schema version you would like to downgrade to by checking the Lighthouse release - notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version from v8 to v9, then - you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. +2. Work out the schema version you would like to downgrade to by checking the table above, or the + Lighthouse release notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version + from v8 to v9, then you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. 3. **Ensure that downgrading is feasible**. Not all schema upgrades can be reverted, and some of them are time-sensitive. The release notes will state whether a downgrade is available and whether any caveats apply to it. From 807bc8b0b317580885f20f4de3abcd2914f1e74e Mon Sep 17 00:00:00 2001 From: Justin Traglia <jtraglia@pm.me> Date: Tue, 2 Aug 2022 00:58:24 +0000 Subject: [PATCH 108/184] Fix a few typos in option help strings (#3401) ## Proposed Changes Fixes a typo I noticed while looking at options. --- beacon_node/src/cli.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 5f205feeac..3515263878 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -148,7 +148,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { If a DNS address is provided, the enr-address is set to the IP address it resolves to and \ does not auto-update based on PONG responses in discovery. \ Set this only if you are sure other nodes can connect to your local node on this address. \ - Discovery will automatically find your external address,if possible.") + Discovery will automatically find your external address, if possible.") .requires("enr-udp-port") .takes_value(true), ) @@ -441,7 +441,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .alias("jwt-id") .help("Used by the beacon node to communicate a unique identifier to execution nodes \ during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ - Set to empty by deafult") + Set to empty by default") .takes_value(true) ) .arg( @@ -451,7 +451,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .alias("jwt-version") .help("Used by the beacon node to communicate a client version to execution nodes \ during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ - Set to empty by deafult") + Set to empty by default") .takes_value(true) ) .arg( From d23437f726f88249ff735215e2a2f3767ddb5d6f Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 2 Aug 2022 00:58:25 +0000 Subject: [PATCH 109/184] Ensure FC uses the current slot from the store (#3402) ## Issue Addressed NA ## Proposed Changes Ensure that we read the current slot from the `fc_store` rather than the slot clock. This is because the `fc_store` will never allow the slot to go backwards, even if the system clock does. The `ProtoArray::find_head` function assumes a non-decreasing slot. This issue can cause logs like this: ``` ERRO Error whist recomputing head, error: ForkChoiceError(ProtoArrayError("find_head failed: InvalidBestNode(InvalidBestNodeInfo { start_root: 0xb22655aa2ae23075a60bd40797b3ba220db33d6fb86fa7910f0ed48e34bda72f, justified_checkpoint: Checkpoint { epoch: Epoch(111569), root: 0xb22655aa2ae23075a60bd40797b3ba220db33d6fb86fa7910f0ed48e34bda72f }, finalized_checkpoint: Checkpoint { epoch: Epoch(111568), root: 0x6140797e40c587b0d3f159483bbc603accb7b3af69891979d63efac437f9896f }, head_root: 0xb22655aa2ae23075a60bd40797b3ba220db33d6fb86fa7910f0ed48e34bda72f, head_justified_checkpoint: Some(Checkpoint { epoch: Epoch(111568), root: 0x6140797e40c587b0d3f159483bbc603accb7b3af69891979d63efac437f9896f }), head_finalized_checkpoint: Some(Checkpoint { epoch: Epoch(111567), root: 0x59b913d37383a158a9ea5546a572acc79e2cdfbc904c744744789d2c3814c570 }) })")), service: beacon, module: beacon_chain::canonical_head:499 ``` We expect nodes to automatically recover from this issue within seconds without any major impact. However, having *any* errors in the path of fork choice is undesirable and should be avoided. ## Additional Info NA --- consensus/fork_choice/src/fork_choice.rs | 19 ++++++++++++------- consensus/proto_array/src/error.rs | 3 ++- consensus/proto_array/src/proto_array.rs | 1 + 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 5438aaf62b..c8d119a99b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -485,10 +485,13 @@ where /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_head pub fn get_head( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, Error<T::Error>> { - self.update_time(current_slot, spec)?; + // Provide the slot (as per the system clock) to the `fc_store` and then return its view of + // the current slot. The `fc_store` will ensure that the `current_slot` is never + // decreasing, a property which we must maintain. + let current_slot = self.update_time(system_time_current_slot, spec)?; let store = &mut self.fc_store; @@ -639,7 +642,7 @@ where #[allow(clippy::too_many_arguments)] pub fn on_block<Payload: ExecPayload<E>>( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, block: BeaconBlockRef<E, Payload>, block_root: Hash256, block_delay: Duration, @@ -648,7 +651,10 @@ where spec: &ChainSpec, count_unrealized: CountUnrealized, ) -> Result<(), Error<T::Error>> { - let current_slot = self.update_time(current_slot, spec)?; + // Provide the slot (as per the system clock) to the `fc_store` and then return its view of + // the current slot. The `fc_store` will ensure that the `current_slot` is never + // decreasing, a property which we must maintain. + let current_slot = self.update_time(system_time_current_slot, spec)?; // Parent block must be known. let parent_block = self @@ -1051,13 +1057,12 @@ where /// will not be run here. pub fn on_attestation( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, attestation: &IndexedAttestation<E>, is_from_block: AttestationFromBlock, spec: &ChainSpec, ) -> Result<(), Error<T::Error>> { - // Ensure the store is up-to-date. - self.update_time(current_slot, spec)?; + self.update_time(system_time_current_slot, spec)?; // Ignore any attestations to the zero hash. // diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 79b4cb2d80..826bf6c3a7 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,4 +1,4 @@ -use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256}; +use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256, Slot}; #[derive(Clone, PartialEq, Debug)] pub enum Error { @@ -52,6 +52,7 @@ pub enum Error { #[derive(Clone, PartialEq, Debug)] pub struct InvalidBestNodeInfo { + pub current_slot: Slot, pub start_root: Hash256, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 962408513e..390eb902a7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -659,6 +659,7 @@ impl ProtoArray { // Perform a sanity check that the node is indeed valid to be the head. if !self.node_is_viable_for_head::<E>(best_node, current_slot) { return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { + current_slot, start_root: *justified_root, justified_checkpoint: self.justified_checkpoint, finalized_checkpoint: self.finalized_checkpoint, From d0beecca20803445b19a68993af210fc4d7fc99f Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 2 Aug 2022 07:58:42 +0000 Subject: [PATCH 110/184] Make fork choice prune again (#3408) ## Issue Addressed NA ## Proposed Changes There was a regression in #3244 (released in v2.4.0) which stopped pruning fork choice (see [here](https://github.com/sigp/lighthouse/pull/3244#discussion_r935187485)). This would form a very slow memory leak, using ~100mb per month. The release has been out for ~11 days, so users should not be seeing a dangerous increase in memory, *yet*. Credits to @michaelsproul for noticing this :tada: ## Additional Info NA --- beacon_node/beacon_chain/src/canonical_head.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 709382f05b..6559487980 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -719,6 +719,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { drop(old_cached_head); // If the finalized checkpoint changed, perform some updates. + // + // The `after_finalization` function will take a write-lock on `fork_choice`, therefore it + // is a dead-lock risk to hold any other lock on fork choice at this point. if new_view.finalized_checkpoint != old_view.finalized_checkpoint { if let Err(e) = self.after_finalization(&new_cached_head, new_view, finalized_proto_block) @@ -878,6 +881,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// Perform updates to caches and other components after the finalized checkpoint has been /// changed. + /// + /// This function will take a write-lock on `canonical_head.fork_choice`, therefore it would be + /// unwise to hold any lock on fork choice while calling this function. fn after_finalization( self: &Arc<Self>, new_cached_head: &CachedHead<T::EthSpec>, @@ -966,6 +972,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { self.head_tracker.clone(), )?; + // Take a write-lock on the canonical head and signal for it to prune. + self.canonical_head.fork_choice_write_lock().prune()?; + Ok(()) } From e24552d61ad34e388ddc14744dd66f2a68a8c428 Mon Sep 17 00:00:00 2001 From: Mac L <mjladson@pm.me> Date: Tue, 2 Aug 2022 23:20:51 +0000 Subject: [PATCH 111/184] Restore backwards compatibility when using older BNs (#3410) ## Issue Addressed https://github.com/status-im/nimbus-eth2/issues/3930 ## Proposed Changes We can trivially support beacon nodes which do not provide the `is_optimistic` field by wrapping the field in an `Option`. --- beacon_node/http_api/src/lib.rs | 2 +- beacon_node/http_api/tests/tests.rs | 2 +- common/eth2/src/types.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a8e305f3c1..0152f20e98 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1704,7 +1704,7 @@ pub fn serve<T: BeaconChainTypes>( let syncing_data = api_types::SyncingData { is_syncing: network_globals.sync_state.read().is_syncing(), - is_optimistic, + is_optimistic: Some(is_optimistic), head_slot, sync_distance, }; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 38c06848cf..cc0281e454 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1300,7 +1300,7 @@ impl ApiTester { let expected = SyncingData { is_syncing: false, - is_optimistic: false, + is_optimistic: Some(false), head_slot, sync_distance, }; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 3e480e0827..340d38b85a 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -552,7 +552,7 @@ pub struct VersionData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncingData { pub is_syncing: bool, - pub is_optimistic: bool, + pub is_optimistic: Option<bool>, pub head_slot: Slot, pub sync_distance: Slot, } From 553a7949942b74df45357e0a699c1e3006252326 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 2 Aug 2022 23:20:52 +0000 Subject: [PATCH 112/184] Ignore RUSTSEC-2022-0040 - `owning_ref` soundness (#3415) ## Issue Addressed NA ## Proposed Changes We are unaffected by this issue: https://github.com/sigp/lighthouse/pull/3410#issuecomment-1203244792 ## Additional Info NA --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 53fd4143d9..55e987be8b 100644 --- a/Makefile +++ b/Makefile @@ -169,7 +169,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 --ignore RUSTSEC-2022-0040 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: From df51a73272489fe154bd10995c96199062b6c3f7 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Wed, 3 Aug 2022 04:23:09 +0000 Subject: [PATCH 113/184] Release v2.5.1 (#3406) ## Issue Addressed Patch release to address fork choice issues in the presence of clock drift: https://github.com/sigp/lighthouse/pull/3402 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c42dd38ac6..1160609bec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.5.0" +version = "2.5.1" dependencies = [ "beacon_chain", "clap", @@ -593,7 +593,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.5.0" +version = "2.5.1" dependencies = [ "beacon_node", "clap", @@ -3102,7 +3102,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.5.0" +version = "2.5.1" dependencies = [ "account_utils", "bls", @@ -3601,7 +3601,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.5.0" +version = "2.5.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index ef430c2bc3..9c6385e8ed 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.5.0" +version = "2.5.1" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] edition = "2021" diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 8523237c69..b53b8a5fd6 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.5.0" +version = "2.5.1" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 7ba1afac60..f5d4d44878 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.5.0-", - fallback = "Lighthouse/v2.5.0" + prefix = "Lighthouse/v2.5.1-", + fallback = "Lighthouse/v2.5.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index dfc8aac7bd..e54d9d8c95 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.5.0" +version = "2.5.1" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index da4ca81884..7792ad074e 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.5.0" +version = "2.5.1" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false From fe6af05bf6672bb7f8e2577d0a571eee7a83bc67 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 3 Aug 2022 17:13:14 +0000 Subject: [PATCH 114/184] Use latest Geth release in EE integration tests (#3395) ## Issue Addressed NA ## Proposed Changes This PR reverts #3382 and adds the `--syncmode=full` as described here: https://github.com/sigp/lighthouse/pull/3382#issuecomment-1197680345 ## Additional Info - Blocked on #3392 --- testing/execution_engine_integration/src/geth.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index ae5210b2a3..467fd8b430 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,7 +7,7 @@ use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -// const GETH_BRANCH: &str = "master"; +const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -26,13 +26,8 @@ pub fn build(execution_clients_dir: &Path) { build_utils::clone_repo(execution_clients_dir, GETH_REPO_URL).unwrap(); } - // TODO: this should be set back to the latest release once the following issue is resolved: - // - // - https://github.com/ethereum/go-ethereum/issues/25427 - // // Get the latest tag on the branch - // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); - let last_release = "v1.10.20"; + let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth @@ -105,6 +100,11 @@ impl GenericExecutionEngine for GethEngine { .arg("--allow-insecure-unlock") .arg("--authrpc.jwtsecret") .arg(jwt_secret_path.as_path().to_str().unwrap()) + // This flag is required to help Geth perform reliably when feeding it blocks + // one-by-one. For more information, see: + // + // https://github.com/sigp/lighthouse/pull/3382#issuecomment-1197680345 + .arg("--syncmode=full") .stdout(build_utils::build_stdio()) .stderr(build_utils::build_stdio()) .spawn() From 43ce0de73f992d0f184c722e58ab203b7086fead Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Wed, 3 Aug 2022 17:13:15 +0000 Subject: [PATCH 115/184] Downgrade log for 204 from builder (#3411) ## Issue Addressed A 204 from the connected builder just indicates there's no payload available from the builder, not that there's an issue. So I don't actually think this should be a warn. During the merge transition when we are pre-finalization a 204 will actually be expected. And maybe even longer if the relay chooses to delay providing payloads for a longer period post-merge. Co-authored-by: realbigsean <sean@sigmaprime.io> --- beacon_node/execution_layer/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index aea952a57d..59c8f009fa 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -605,7 +605,7 @@ impl<T: EthSpec> ExecutionLayer<T> { Ok(local) } (Ok(None), Ok(local)) => { - warn!( + info!( self.log(), "No payload provided by connected builder. \ Attempting to propose through local execution engine" From 386ced1aed4c493d8e8cbbd357d94e8cd949e588 Mon Sep 17 00:00:00 2001 From: Ramana Kumar <ramana@member.fsf.org> Date: Fri, 5 Aug 2022 01:51:39 +0000 Subject: [PATCH 116/184] Include validator indices in attestation logs (#3393) ## Issue Addressed Fixes #2967 ## Proposed Changes Collect validator indices alongside attestations when creating signed attestations (and aggregates) for inclusion in the logs. ## Additional Info This is my first time looking at Lighthouse source code and using Rust, so newbie feedback appreciated! --- validator_client/src/attestation_service.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 95500fc947..cdc9b88f68 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -389,12 +389,13 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { ) .await { - Ok(()) => Some(attestation), + Ok(()) => Some((attestation, duty.validator_index)), Err(e) => { crit!( log, "Failed to sign attestation"; "error" => ?e, + "validator" => ?duty.pubkey, "committee_index" => committee_index, "slot" => slot.as_u64(), ); @@ -404,11 +405,11 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { }); // Execute all the futures in parallel, collecting any successful results. - let attestations = &join_all(signing_futures) + let (ref attestations, ref validator_indices): (Vec<_>, Vec<_>) = join_all(signing_futures) .await .into_iter() .flatten() - .collect::<Vec<Attestation<E>>>(); + .unzip(); // Post the attestations to the BN. match self @@ -428,6 +429,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { log, "Successfully published attestations"; "count" => attestations.len(), + "validator_indices" => ?validator_indices, "head_block" => ?attestation_data.beacon_block_root, "committee_index" => attestation_data.index, "slot" => attestation_data.slot.as_u64(), @@ -549,7 +551,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let attestation = &signed_aggregate_and_proof.message.aggregate; info!( log, - "Successfully published attestations"; + "Successfully published attestation"; "aggregator" => signed_aggregate_and_proof.message.aggregator_index, "signatures" => attestation.aggregation_bits.num_set_bits(), "head_block" => format!("{:?}", attestation.data.beacon_block_root), @@ -566,6 +568,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { log, "Failed to publish attestation"; "error" => %e, + "aggregator" => signed_aggregate_and_proof.message.aggregator_index, "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", From 5d317779bb1f77d4aa563f70736b763993ed7ab8 Mon Sep 17 00:00:00 2001 From: Mac L <mjladson@pm.me> Date: Fri, 5 Aug 2022 06:46:58 +0000 Subject: [PATCH 117/184] Ensure `validator/blinded_blocks/{slot}` endpoint conforms to spec (#3429) ## Issue Addressed #3418 ## Proposed Changes - Remove `eth/v2/validator/blinded_blocks/{slot}` as this endpoint does not exist in the spec. - Return `version` in the `eth/v1/validator/blinded_blocks/{slot}` endpoint. ## Additional Info Since this removes the `v2` endpoint, this is *technically* a breaking change, but as this does not exist in the spec users may or may not be relying on this. Depending on what we feel is appropriate, I'm happy to edit this so we keep the `v2` endpoint for now but simply bring the `v1` endpoint in line with `v2`. --- beacon_node/http_api/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0152f20e98..ba1dd01cc3 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1988,7 +1988,7 @@ pub fn serve<T: BeaconChainTypes>( ); // GET validator/blinded_blocks/{slot} - let get_validator_blinded_blocks = any_version + let get_validator_blinded_blocks = eth_v1 .and(warp::path("validator")) .and(warp::path("blinded_blocks")) .and(warp::path::param::<Slot>().or_else(|_| async { @@ -2001,8 +2001,7 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::query::<api_types::ValidatorBlocksQuery>()) .and(chain_filter.clone()) .and_then( - |endpoint_version: EndpointVersion, - slot: Slot, + |slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc<BeaconChain<T>>| async move { let randao_reveal = query.randao_reveal.as_ref().map_or_else( @@ -2044,7 +2043,8 @@ pub fn serve<T: BeaconChainTypes>( .to_ref() .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) + // Pose as a V2 endpoint so we return the fork `version`. + fork_versioned_response(V2, fork_name, block) .map(|response| warp::reply::json(&response)) }, ); From 83666e04fd0aaff31249f94c7c01383a20927f58 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Fri, 5 Aug 2022 06:46:59 +0000 Subject: [PATCH 118/184] Expand merge migration docs (#3430) ## Issue Addressed Resolves #3424 ## Proposed Changes This PR expands the merge migration docs to include (hopefully) clearer guidance on the steps required. It's inspired by @winksaville's work in #3426 but takes a more drastic approach to rewriting large sections. * Add a prominent _When?_ section * Add links to execution engine configuration guides * Add links to community guides * Fix the location of the _Strict fee recipient_ section --- book/src/merge-migration.md | 96 ++++++++++++++++++++++++----- book/src/suggested-fee-recipient.md | 41 +++++++----- 2 files changed, 108 insertions(+), 29 deletions(-) diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 6ed6a9977a..e2d54ea0aa 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -1,20 +1,69 @@ # Merge Migration -This document provides detail for users who have been running a Lighthouse node *before* the merge -and are now preparing their node for the merge transition. +This document provides detail for users who want to run a merge-ready Lighthouse node. -## "Pre-Merge" and "Post-Merge" +> If you are running a testnet node, this configuration is necessary _now_. -As of [v2.4.0](https://github.com/sigp/lighthouse/releases/tag/v2.4.0) Lighthouse can be considered -to have two modes: +## Necessary Configuration -- "Pre-merge": `--execution-endpoint` flag *is not* provided. -- "Post-merge": `--execution-endpoint` flag *is* provided. +There are two configuration changes required for a Lighthouse node to operate correctly throughout +the merge: -A "pre-merge" node, by definition, will fail to transition through the merge. Such a node *must* be -upgraded before the Bellatrix upgrade. +1. You *must* run your own execution engine such as Geth or Nethermind alongside Lighthouse. + You *must* update your Lighthouse configuration to connect to the execution engine using new + flags which are documented on this page in the + [Connecting to an execution engine](#connecting-to-an-execution-engine) section. +2. If your Lighthouse node has validators attached you *must* nominate an Ethereum address to + receive transactions tips from blocks proposed by your validators. This is covered on the + [Suggested fee recipient](./suggested-fee-recipient.md) page. -## Migration +Additionally, you _must_ update Lighthouse to a merge-compatible release in the weeks before +the merge. Merge releases are available now for all testnets. + +## When? + +You must configure your node to be merge-ready before the Bellatrix fork occurs on the network +on which your node is operating. + +* **Mainnet**: the Bellatrix fork epoch has not yet been announced. It's possible to set up a + merge-ready node now, but some execution engines will require additional configuration. Please see + the section on [Execution engine configuration](#execution-engine-configuration) below. + +* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: you must have a merge-ready configuration + right now. + +## Connecting to an execution engine + +The Lighthouse beacon node must connect to an execution engine in order to validate the transactions +present in post-merge blocks. Two new flags are used to configure this connection: + +- `--execution-endpoint <URL>`: the URL of the execution engine API. Often this will be + `http://localhost:8551`. +- `--execution-jwt <FILE>`: the path to the file containing the JWT secret shared by Lighthouse and the + execution engine. + +If you set up an execution engine with `--execution-endpoint` then you *must* provide a JWT secret +using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse +has authority to control the execution engine. + +### Execution engine configuration + +Each execution engine has its own flags for configuring the engine API and JWT. Please consult +the relevant page for your execution engine for the required flags: + +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) + +Once you have configured your execution engine to open up the engine API (usually on port 8551) you +should add the URL to your `lighthouse bn` flags with `--execution-endpoint <URL>`, as well as +the path to the JWT secret with `--execution-jwt <FILE>`. + +> NOTE: Geth v1.10.21 or earlier requires a manual TTD override to communicate with Lighthouse over +> the engine API on mainnet. We recommend waiting for a compatible Geth release before configuring +> Lighthouse-Geth on mainnet. + +### Example Let us look at an example of the command line arguments for a pre-merge production staking BN: @@ -60,10 +109,7 @@ merge are ensuring that `--execution-endpoint` and `--execution-jwt` flags are p you can even leave the `--eth1-endpoints` flag there, it will be ignored. This is not recommended as a deprecation warning will be logged and Lighthouse *may* remove these flags in the future. -There are no changes required for the validator client, apart from ensure it has been updated to the -same version as the beacon node. Check the version with `lighthouse --version`. - -## The relationship between `--eth1-endpoints` and `--execution-endpoint` +### The relationship between `--eth1-endpoints` and `--execution-endpoint` Pre-merge users will be familiar with the `--eth1-endpoints` flag. This provides a list of Ethereum "eth1" nodes (e.g., Geth, Nethermind, etc). Each beacon node (BN) can have multiple eth1 endpoints @@ -90,7 +136,16 @@ contains the transaction history of the Ethereum chain, there is no longer a nee be used for all such queries. Therefore we can say that where `--execution-endpoint` is included `--eth1-endpoints` should be omitted. -## What about multiple execution endpoints? +## FAQ + +### Can I use `http://localhost:8545` for the execution endpoint? + +Most execution nodes use port `8545` for the Ethereum JSON-RPC API. Unless custom configuration is +used, an execution node _will not_ provide the necessary engine API on port `8545`. You should +not attempt to use `http://localhost:8545` as your engine URL and should instead use +`http://localhost:8551`. + +### What about multiple execution endpoints? Since an execution engine can only have one connected BN, the value of having multiple execution engines connected to the same BN is very low. An execution engine cannot be shared between BNs to @@ -99,3 +154,14 @@ reduce costs. Whilst having multiple execution engines connected to a single BN might be useful for advanced testing scenarios, Lighthouse (and other consensus clients) have decided to support *only one* execution endpoint. Such scenarios could be resolved with a custom-made HTTP proxy. + +## Additional Resources + +There are several community-maintained guides which provide more background information, as well as +guidance for specific setups. + +- [Ethereum.org: The Merge](https://ethereum.org/en/upgrades/merge/) +- [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). +- [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) +- [EthDocker: Merge Preparation](https://eth-docker.net/docs/About/MergePrep/) +- [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index a584be306f..d862cf1a6c 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -1,8 +1,10 @@ # Suggested Fee Recipient -*Note: these documents are not relevant until the Bellatrix (Merge) upgrade has occurred.* +The _fee recipient_ is an Ethereum address nominated by a beacon chain validator to receive +tips from user transactions. If you run validators on a network that has already merged +or is due to merge soon then you should nominate a fee recipient for your validators. -## Fee recipient trust assumptions +## Background During post-merge block production, the Beacon Node (BN) will provide a `suggested_fee_recipient` to the execution node. This is a 20-byte Ethereum address which the EL might choose to set as the @@ -13,32 +15,25 @@ it may use any address it chooses. It is assumed that an honest execution node * `suggested_fee_recipient`, but users should note this trust assumption. Check out the [strict fee recipient](#strict-fee-recipient) section for how to mitigate this assumption. -The `suggested_fee_recipient` can be provided to the VC, who will transmit it to the BN. The BN also +The `suggested_fee_recipient` can be provided to the VC, which will transmit it to the BN. The BN also has a choice regarding the fee recipient it passes to the execution node, creating another noteworthy trust assumption. To be sure *you* control your fee recipient value, run your own BN and execution node (don't use third-party services). -The Lighthouse VC provides three methods for setting the `suggested_fee_recipient` (also known +## How to configure a suggested fee recipient + +The Lighthouse VC provides two methods for setting the `suggested_fee_recipient` (also known simply as the "fee recipient") to be passed to the execution layer during block production. The Lighthouse BN also provides a method for defining this value, should the VC not transmit a value. -Assuming trustworthy nodes, the priority for the four methods is: +Assuming trustworthy nodes, the priority for the three methods is: 1. `validator_definitions.yml` 1. `--suggested-fee-recipient` provided to the VC. 1. `--suggested-fee-recipient` provided to the BN. -## Strict Fee Recipient - -If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose -`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal -block proposal flow and block proposals through the builder API. Proposals through the builder API are more likely -to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before -using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the -local execution engine for payload construction, where a strict fee recipient check will still be applied. - ### 1. Setting the fee recipient in the `validator_definitions.yml` Users can set the fee recipient in `validator_definitions.yml` with the `suggested_fee_recipient` @@ -168,4 +163,22 @@ curl -X DELETE \ null ``` +## Strict Fee Recipient +If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose +`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal +block proposal flow and block proposals through the builder API. Proposals through the builder API are more likely +to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before +using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the +local execution engine for payload construction, where a strict fee recipient check will still be applied. + +## FAQ + +### Why do I have to nominate an Ethereum address as the fee recipient? + +You might wonder why the validator can't just accumulate transactions fees in the same way that it +accumulates other staking rewards. The reason for this is that transaction fees are computed and +validated by the execution node, and therefore need to be paid to an address that exists on the +execution chain. Validators use BLS keys which do not correspond to Ethereum addresses, so they +have no "presence" on the execution chain. Therefore it's necessary for each validator to nominate +a separate fee recipient address. From 6bc4a2cc91193438db698006740747a4b83664ef Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Fri, 5 Aug 2022 23:41:09 +0000 Subject: [PATCH 119/184] Update invalid head tests (#3400) ## Proposed Changes Update the invalid head tests so that they work with the current default fork choice configuration. Thanks @realbigsean for fixing the persistence test and the EF tests. Co-authored-by: realbigsean <sean@sigmaprime.io> --- beacon_node/beacon_chain/src/chain_config.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 11 +- .../beacon_chain/tests/block_verification.rs | 28 ++-- .../tests/payload_invalidation.rs | 144 +++++++++--------- testing/ef_tests/src/cases/fork_choice.rs | 2 +- 5 files changed, 101 insertions(+), 86 deletions(-) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 2c43ca53ed..aa7ff02af1 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -51,7 +51,7 @@ impl Default for ChainConfig { builder_fallback_skips_per_epoch: 8, builder_fallback_epochs_since_finalization: 3, builder_fallback_disable_checks: false, - count_unrealized: false, + count_unrealized: true, } } } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6771861dfd..411bd7b1fd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -157,6 +157,7 @@ pub struct Builder<T: BeaconChainTypes> { execution_layer: Option<ExecutionLayer<T::EthSpec>>, mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>, mock_builder: Option<TestingBuilder<T::EthSpec>>, + testing_slot_clock: Option<TestingSlotClock>, runtime: TestRuntime, log: Logger, } @@ -289,6 +290,7 @@ where execution_layer: None, mock_execution_layer: None, mock_builder: None, + testing_slot_clock: None, runtime, log, } @@ -435,6 +437,11 @@ where self } + pub fn testing_slot_clock(mut self, slot_clock: TestingSlotClock) -> Self { + self.testing_slot_clock = Some(slot_clock); + self + } + pub fn build(self) -> BeaconChainHarness<BaseHarnessType<E, Hot, Cold>> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -475,7 +482,9 @@ where }; // Initialize the slot clock only if it hasn't already been initialized. - builder = if builder.get_slot_clock().is_none() { + builder = if let Some(testing_slot_clock) = self.testing_slot_clock { + builder.slot_clock(testing_slot_clock) + } else if builder.get_slot_clock().is_none() { builder .testing_slot_clock(Duration::from_secs(seconds_per_slot)) .expect("should configure testing slot clock") diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 88d6914036..c2283321cb 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -327,6 +327,9 @@ async fn assert_invalid_signature( item ); + // Call fork choice to update cached head (including finalization). + harness.chain.recompute_head_at_current_slot().await; + // Ensure the block will be rejected if imported on its own (without gossip checking). let ancestor_blocks = chain_segment .iter() @@ -339,19 +342,20 @@ async fn assert_invalid_signature( .chain .process_chain_segment(ancestor_blocks, CountUnrealized::True) .await; + harness.chain.recompute_head_at_current_slot().await; + + let process_res = harness + .chain + .process_block( + snapshots[block_index].beacon_block.clone(), + CountUnrealized::True, + ) + .await; assert!( - matches!( - harness - .chain - .process_block( - snapshots[block_index].beacon_block.clone(), - CountUnrealized::True - ) - .await, - Err(BlockError::InvalidSignature) - ), - "should not import individual block with an invalid {} signature", - item + matches!(process_res, Err(BlockError::InvalidSignature)), + "should not import individual block with an invalid {} signature, got: {:?}", + item, + process_res ); // NOTE: we choose not to check gossip verification here. It only checks one signature diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 5e03ef2335..7728b319d9 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -179,7 +179,7 @@ impl InvalidPayloadRig { /// Import a block while setting the newPayload and forkchoiceUpdated responses to `is_valid`. async fn import_block(&mut self, is_valid: Payload) -> Hash256 { - self.import_block_parametric(is_valid, is_valid, |error| { + self.import_block_parametric(is_valid, is_valid, None, |error| { matches!( error, BlockError::ExecutionPayloadError( @@ -210,13 +210,14 @@ impl InvalidPayloadRig { &mut self, new_payload_response: Payload, forkchoice_response: Payload, + slot_override: Option<Slot>, evaluate_error: F, ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let head = self.harness.chain.head_snapshot(); let state = head.beacon_state.clone_with_only_committee_caches(); - let slot = state.slot() + 1; + let slot = slot_override.unwrap_or(state.slot() + 1); let (block, post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); @@ -445,9 +446,12 @@ async fn immediate_forkchoice_update_invalid_test( // Import a block which returns syncing when supplied via newPayload, and then // invalid when the forkchoice update is sent. - rig.import_block_parametric(Payload::Syncing, invalid_payload(latest_valid_hash), |_| { - false - }) + rig.import_block_parametric( + Payload::Syncing, + invalid_payload(latest_valid_hash), + None, + |_| false, + ) .await; // The head should be the latest valid block. @@ -497,7 +501,7 @@ async fn justified_checkpoint_becomes_invalid() { let is_valid = Payload::Invalid { latest_valid_hash: Some(parent_hash_of_justified), }; - rig.import_block_parametric(is_valid, is_valid, |error| { + rig.import_block_parametric(is_valid, is_valid, None, |error| { matches!( error, // The block import should fail since the beacon chain knows the justified payload @@ -1757,11 +1761,11 @@ async fn optimistic_transition_block_invalid_finalized() { ); } -/// Helper for running tests where we generate a chain with an invalid head and then some -/// `fork_blocks` to recover it. +/// Helper for running tests where we generate a chain with an invalid head and then a +/// `fork_block` to recover it. struct InvalidHeadSetup { rig: InvalidPayloadRig, - fork_blocks: Vec<Arc<SignedBeaconBlock<E>>>, + fork_block: Arc<SignedBeaconBlock<E>>, invalid_head: CachedHead<E>, } @@ -1776,11 +1780,59 @@ impl InvalidHeadSetup { rig.import_block(Payload::Syncing).await; } + let slots_per_epoch = E::slots_per_epoch(); + let start_slot = rig.cached_head().head_slot() + 1; + let mut opt_fork_block = None; + + assert_eq!(start_slot % slots_per_epoch, 1); + for i in 0..slots_per_epoch - 1 { + let slot = start_slot + i; + let slot_offset = slot.as_u64() % slots_per_epoch; + + rig.harness.set_current_slot(slot); + + if slot_offset == slots_per_epoch - 1 { + // Optimistic head block right before epoch boundary. + let is_valid = Payload::Syncing; + rig.import_block_parametric(is_valid, is_valid, Some(slot), |error| { + matches!( + error, + BlockError::ExecutionPayloadError( + ExecutionPayloadError::RejectedByExecutionEngine { .. } + ) + ) + }) + .await; + } else if 3 * slot_offset < 2 * slots_per_epoch { + // Valid block in previous epoch. + rig.import_block(Payload::Valid).await; + } else if slot_offset == slots_per_epoch - 2 { + // Fork block one slot prior to invalid head, not applied immediately. + let parent_state = rig + .harness + .chain + .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) + .unwrap(); + let (fork_block, _) = rig.harness.make_block(parent_state, slot).await; + opt_fork_block = Some(Arc::new(fork_block)); + } else { + // Skipped slot. + }; + } + let invalid_head = rig.cached_head(); + assert_eq!( + invalid_head.head_slot() % slots_per_epoch, + slots_per_epoch - 1 + ); + + // Advance clock to new epoch to realize the justification of soon-to-be-invalid head block. + rig.harness.set_current_slot(invalid_head.head_slot() + 1); // Invalidate the head block. rig.invalidate_manually(invalid_head.head_block_root()) .await; + assert!(rig .canonical_head() .head_execution_status() @@ -1790,27 +1842,9 @@ impl InvalidHeadSetup { // Finding a new head should fail since the only possible head is not valid. rig.assert_get_head_error_contains("InvalidBestNode"); - // Build three "fork" blocks that conflict with the current canonical head. Don't apply them to - // the chain yet. - let mut fork_blocks = vec![]; - let mut parent_state = rig - .harness - .chain - .state_at_slot( - invalid_head.head_slot() - 3, - StateSkipConfig::WithStateRoots, - ) - .unwrap(); - for _ in 0..3 { - let slot = parent_state.slot() + 1; - let (fork_block, post_state) = rig.harness.make_block(parent_state, slot).await; - parent_state = post_state; - fork_blocks.push(Arc::new(fork_block)) - } - Self { rig, - fork_blocks, + fork_block: opt_fork_block.unwrap(), invalid_head, } } @@ -1820,57 +1854,22 @@ impl InvalidHeadSetup { async fn recover_from_invalid_head_by_importing_blocks() { let InvalidHeadSetup { rig, - fork_blocks, - invalid_head, + fork_block, + invalid_head: _, } = InvalidHeadSetup::new().await; - // Import the first two blocks, they should not become the head. - for i in 0..2 { - if i == 0 { - // The first block should be `VALID` during import. - rig.harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .all_payloads_valid_on_new_payload(); - } else { - // All blocks after the first block should return `SYNCING`. - rig.harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .all_payloads_syncing_on_new_payload(true); - } - - rig.harness - .chain - .process_block(fork_blocks[i].clone(), CountUnrealized::True) - .await - .unwrap(); - rig.recompute_head().await; - rig.assert_get_head_error_contains("InvalidBestNode"); - let new_head = rig.cached_head(); - assert_eq!( - new_head.head_block_root(), - invalid_head.head_block_root(), - "the head should not change" - ); - } - - // Import the third block, it should become the head. + // Import the fork block, it should become the head. rig.harness .chain - .process_block(fork_blocks[2].clone(), CountUnrealized::True) + .process_block(fork_block.clone(), CountUnrealized::True) .await .unwrap(); rig.recompute_head().await; let new_head = rig.cached_head(); assert_eq!( new_head.head_block_root(), - fork_blocks[2].canonical_root(), - "the third block should become the head" + fork_block.canonical_root(), + "the fork block should become the head" ); let manual_get_head = rig @@ -1880,17 +1879,19 @@ async fn recover_from_invalid_head_by_importing_blocks() { .fork_choice_write_lock() .get_head(rig.harness.chain.slot().unwrap(), &rig.harness.chain.spec) .unwrap(); - assert_eq!(manual_get_head, new_head.head_block_root(),); + assert_eq!(manual_get_head, new_head.head_block_root()); } #[tokio::test] async fn recover_from_invalid_head_after_persist_and_reboot() { let InvalidHeadSetup { rig, - fork_blocks: _, + fork_block: _, invalid_head, } = InvalidHeadSetup::new().await; + let slot_clock = rig.harness.chain.slot_clock.clone(); + // Forcefully persist the head and fork choice. rig.harness.chain.persist_head_and_fork_choice().unwrap(); @@ -1899,6 +1900,7 @@ async fn recover_from_invalid_head_after_persist_and_reboot() { .deterministic_keypairs(VALIDATOR_COUNT) .resumed_ephemeral_store(rig.harness.chain.store.clone()) .mock_execution_layer() + .testing_slot_clock(slot_clock) .build(); // Forget the original rig so we don't accidentally use it again. diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 65872efbe9..9efb7ada12 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -341,7 +341,7 @@ impl<E: EthSpec> Tester<E> { let result = self.block_on_dangerous( self.harness .chain - .process_block(block.clone(), CountUnrealized::True), + .process_block(block.clone(), CountUnrealized::False), )?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( From aba52251479100a5900f88ff9308139435a0a625 Mon Sep 17 00:00:00 2001 From: Kirill <septengineering@pm.me> Date: Mon, 8 Aug 2022 23:56:59 +0000 Subject: [PATCH 120/184] `crypto/bls`: make `blst` dependency optional (#3387) ## Issue Addressed #3386 ## Proposed Changes * make `blst` crate `optional` * include `blst` dependency into `supranational` feature * hide `blst`-related code with `supranational` feature Co-authored-by: Kirill <kirill@aurora.dev> --- crypto/bls/Cargo.toml | 4 ++-- crypto/bls/src/impls/mod.rs | 1 + crypto/bls/src/lib.rs | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 912f49c6f0..9ac468d227 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -17,12 +17,12 @@ eth2_hashing = "0.3.0" ethereum-types = "0.12.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } -blst = "0.3.3" +blst = { version = "0.3.3", optional = true } [features] default = ["supranational"] fake_crypto = [] milagro = ["milagro_bls"] -supranational = [] +supranational = ["blst"] supranational-portable = ["supranational", "blst/portable"] supranational-force-adx = ["supranational", "blst/force-adx"] diff --git a/crypto/bls/src/impls/mod.rs b/crypto/bls/src/impls/mod.rs index 7a99798be3..b3f2da77b1 100644 --- a/crypto/bls/src/impls/mod.rs +++ b/crypto/bls/src/impls/mod.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "supranational")] pub mod blst; pub mod fake_crypto; #[cfg(feature = "milagro")] diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 8a31a90a14..eacbc2b268 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -41,6 +41,7 @@ pub use generic_signature::{INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN}; pub use get_withdrawal_credentials::get_withdrawal_credentials; pub use zeroize_hash::ZeroizeHash; +#[cfg(feature = "supranational")] use blst::BLST_ERROR as BlstError; #[cfg(feature = "milagro")] use milagro_bls::AmclError; @@ -53,6 +54,7 @@ pub enum Error { #[cfg(feature = "milagro")] MilagroError(AmclError), /// An error was raised from the Supranational BLST BLS library. + #[cfg(feature = "supranational")] BlstError(BlstError), /// The provided bytes were an incorrect length. InvalidByteLength { got: usize, expected: usize }, @@ -71,6 +73,7 @@ impl From<AmclError> for Error { } } +#[cfg(feature = "supranational")] impl From<BlstError> for Error { fn from(e: BlstError) -> Error { Error::BlstError(e) @@ -130,6 +133,7 @@ macro_rules! define_mod { #[cfg(feature = "milagro")] define_mod!(milagro_implementations, crate::impls::milagro::types); +#[cfg(feature = "supranational")] define_mod!(blst_implementations, crate::impls::blst::types); #[cfg(feature = "fake_crypto")] define_mod!( From e26004461ff20eac1379edd5ce9020c0f5e8f8d6 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Mon, 8 Aug 2022 23:57:00 +0000 Subject: [PATCH 121/184] Don't attempt to register validators that are pre-activation (#3441) ## Issue Addressed https://github.com/sigp/lighthouse/issues/3440 ## Proposed Changes Don't consider pre-activation validators for validator registration. Co-authored-by: sean <seananderson33@gmail.com> Co-authored-by: Michael Sproul <micsproul@gmail.com> --- validator_client/src/preparation_service.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index b138d3e4ee..6dc8e7d56e 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -285,6 +285,9 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { fn collect_validator_registration_keys(&self) -> Vec<ValidatorRegistrationKey> { self.collect_proposal_data(|pubkey, proposal_data| { + // Ignore fee recipients for keys without indices, they are inactive. + proposal_data.validator_index?; + // We don't log for missing fee recipients here because this will be logged more // frequently in `collect_preparation_data`. proposal_data.fee_recipient.and_then(|fee_recipient| { From 68bd7cae21b6890236f5d892f5847363fefd8307 Mon Sep 17 00:00:00 2001 From: kayla-henrie <109097759+kayla-henrie@users.noreply.github.com> Date: Tue, 9 Aug 2022 02:27:04 +0000 Subject: [PATCH 122/184] [Contribution docs] Add GitPOAP Badge to Display Number of Minted GitPOAPs for Contributors (#3343) ## Issue Addressed - N/A ## Proposed Changes Adding badge to contribution docs that shows the number of minted GitPOAPs ## Additional Info Hey all, this PR adds a [GitPOAP Badge](https://docs.gitpoap.io/api#get-v1repoownernamebadge) to the contribution docs that displays the number of minted GitPOAPs for this repository by contributors to this repo. You can see an example of this in [our Documentation repository](https://github.com/gitpoap/gitpoap-docs#gitpoap-docs). This should help would-be contributors as well as existing contributors find out that they will/have received GitPOAPs for their contributions. CC: @colfax23 @kayla-henrie Replaces: https://github.com/sigp/lighthouse/pull/3330 Co-authored-by: Michael Sproul <micsproul@gmail.com> --- CONTRIBUTING.md | 1 + book/src/contributing.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 72f5e73920..489d12eb88 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,5 @@ # Contributors Guide +[![GitPOAP badge](https://public-api.gitpoap.io/v1/repo/sigp/lighthouse/badge)](https://www.gitpoap.io/gh/sigp/lighthouse) Lighthouse is an open-source Ethereum 2.0 client. We're community driven and welcome all contribution. We aim to provide a constructive, respectful and fun diff --git a/book/src/contributing.md b/book/src/contributing.md index 4b21d1ecf2..6b84843a69 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -1,6 +1,7 @@ # Contributing to Lighthouse [![Chat Badge]][Chat Link] +[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/sigp/lighthouse/badge)](https://www.gitpoap.io/gh/sigp/lighthouse) [Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da [Chat Link]: https://discord.gg/cyAszAh From a6886219191e2553177a9f45ad2a046faa1674d3 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 9 Aug 2022 06:05:13 +0000 Subject: [PATCH 123/184] Add support for beaconAPI in `lcli` functions (#3252) ## Issue Addressed NA ## Proposed Changes Modifies `lcli skip-slots` and `lcli transition-blocks` allow them to source blocks/states from a beaconAPI and also gives them some more features to assist with benchmarking. ## Additional Info Breaks the current `lcli skip-slots` and `lcli transition-blocks` APIs by changing some flag names. It should be simple enough to figure out the changes via `--help`. Currently blocked on #3263. --- Cargo.lock | 2 + beacon_node/beacon_chain/src/lib.rs | 2 +- .../src/validator_pubkey_cache.rs | 5 + common/eth2/src/lib.rs | 2 +- common/sensitive_url/src/lib.rs | 15 + .../src/per_block_processing.rs | 2 +- lcli/Cargo.toml | 2 + lcli/src/main.rs | 150 +++++-- lcli/src/skip_slots.rs | 160 +++++-- lcli/src/transition_blocks.rs | 410 ++++++++++++++---- 10 files changed, 607 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1160609bec..3702b95485 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3105,6 +3105,7 @@ name = "lcli" version = "2.5.1" dependencies = [ "account_utils", + "beacon_chain", "bls", "clap", "clap_utils", @@ -3128,6 +3129,7 @@ dependencies = [ "serde_yaml", "snap", "state_processing", + "store", "tree_hash", "types", "validator_dir", diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index ed6c2459eb..481b1ae736 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -41,7 +41,7 @@ pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; pub mod validator_monitor; -mod validator_pubkey_cache; +pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index beb8da8b64..60fdb607c8 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -156,6 +156,11 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> { pub fn len(&self) -> usize { self.indices.len() } + + /// Returns `true` if there are no validators in the cache. + pub fn is_empty(&self) -> bool { + self.indices.is_empty() + } } /// Wrapper for a public key stored in the database. diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 8cd138e980..21608ba6dd 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -23,7 +23,7 @@ use lighthouse_network::PeerId; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; -use sensitive_url::SensitiveUrl; +pub use sensitive_url::SensitiveUrl; use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index 7a3cbae20c..aac4cb5500 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -1,5 +1,6 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; +use std::str::FromStr; use url::Url; #[derive(Debug)] @@ -9,6 +10,12 @@ pub enum SensitiveError { RedactError(String), } +impl fmt::Display for SensitiveError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + // Wrapper around Url which provides a custom `Display` implementation to protect user secrets. #[derive(Clone, PartialEq)] pub struct SensitiveUrl { @@ -54,6 +61,14 @@ impl<'de> Deserialize<'de> for SensitiveUrl { } } +impl FromStr for SensitiveUrl { + type Err = SensitiveError; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + Self::parse(s) + } +} + impl SensitiveUrl { pub fn parse(url: &str) -> Result<Self, SensitiveError> { let surl = Url::parse(url).map_err(SensitiveError::ParseError)?; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 89cb76e0a1..e409372ddd 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -40,7 +40,7 @@ use arbitrary::Arbitrary; /// The strategy to be used when validating the block's signatures. #[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] -#[derive(PartialEq, Clone, Copy)] +#[derive(PartialEq, Clone, Copy, Debug)] pub enum BlockSignatureStrategy { /// Do not validate any signature. Use with caution. NoVerification, diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index e54d9d8c95..5d94a50461 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -38,3 +38,5 @@ eth1_test_rig = { path = "../testing/eth1_test_rig" } sensitive_url = { path = "../common/sensitive_url" } eth2 = { path = "../common/eth2" } snap = "1.0.1" +beacon_chain = { path = "../beacon_node/beacon_chain" } +store = { path = "../beacon_node/store" } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index c440f50008..2fd0538850 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -22,7 +22,6 @@ use parse_ssz::run_parse_ssz; use std::path::PathBuf; use std::process; use std::str::FromStr; -use transition_blocks::run_transition_blocks; use types::{EthSpec, EthSpecId}; fn main() { @@ -57,52 +56,128 @@ fn main() { "Performs a state transition from some state across some number of skip slots", ) .arg( - Arg::with_name("pre-state") - .value_name("BEACON_STATE") + Arg::with_name("output-path") + .long("output-path") + .value_name("PATH") .takes_value(true) - .required(true) + .help("Path to output a SSZ file."), + ) + .arg( + Arg::with_name("pre-state-path") + .long("pre-state-path") + .value_name("PATH") + .takes_value(true) + .conflicts_with("beacon-url") .help("Path to a SSZ file of the pre-state."), ) .arg( - Arg::with_name("slots") - .value_name("SLOT_COUNT") + Arg::with_name("beacon-url") + .long("beacon-url") + .value_name("URL") .takes_value(true) - .required(true) - .help("Number of slots to skip before outputting a state.."), + .help("URL to a beacon-API provider."), ) .arg( - Arg::with_name("output") - .value_name("SSZ_FILE") + Arg::with_name("state-id") + .long("state-id") + .value_name("STATE_ID") .takes_value(true) - .required(true) - .default_value("./output.ssz") - .help("Path to output a SSZ file."), - ), + .requires("beacon-url") + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .value_name("INTEGER") + .takes_value(true) + .default_value("1") + .help("Number of repeat runs, useful for benchmarking."), + ) + .arg( + Arg::with_name("state-root") + .long("state-root") + .value_name("HASH256") + .takes_value(true) + .help("Tree hash root of the provided state, to avoid computing it."), + ) + .arg( + Arg::with_name("slots") + .long("slots") + .value_name("INTEGER") + .takes_value(true) + .help("Number of slots to skip forward."), + ) + .arg( + Arg::with_name("partial-state-advance") + .long("partial-state-advance") + .takes_value(false) + .help("If present, don't compute state roots when skipping forward."), + ) ) .subcommand( SubCommand::with_name("transition-blocks") .about("Performs a state transition given a pre-state and block") .arg( - Arg::with_name("pre-state") - .value_name("BEACON_STATE") + Arg::with_name("pre-state-path") + .long("pre-state-path") + .value_name("PATH") .takes_value(true) - .required(true) - .help("Path to a SSZ file of the pre-state."), + .conflicts_with("beacon-url") + .requires("block-path") + .help("Path to load a BeaconState from file as SSZ."), ) .arg( - Arg::with_name("block") - .value_name("BEACON_BLOCK") + Arg::with_name("block-path") + .long("block-path") + .value_name("PATH") .takes_value(true) - .required(true) - .help("Path to a SSZ file of the block to apply to pre-state."), + .conflicts_with("beacon-url") + .requires("pre-state-path") + .help("Path to load a SignedBeaconBlock from file as SSZ."), ) .arg( - Arg::with_name("output") - .value_name("SSZ_FILE") + Arg::with_name("post-state-output-path") + .long("post-state-output-path") + .value_name("PATH") .takes_value(true) - .required(true) - .default_value("./output.ssz") - .help("Path to output a SSZ file."), + .help("Path to output the post-state."), + ) + .arg( + Arg::with_name("pre-state-output-path") + .long("pre-state-output-path") + .value_name("PATH") + .takes_value(true) + .help("Path to output the pre-state, useful when used with --beacon-url."), + ) + .arg( + Arg::with_name("block-output-path") + .long("block-output-path") + .value_name("PATH") + .takes_value(true) + .help("Path to output the block, useful when used with --beacon-url."), + ) + .arg( + Arg::with_name("beacon-url") + .long("beacon-url") + .value_name("URL") + .takes_value(true) + .help("URL to a beacon-API provider."), + ) + .arg( + Arg::with_name("block-id") + .long("block-id") + .value_name("BLOCK_ID") + .takes_value(true) + .requires("beacon-url") + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .value_name("INTEGER") + .takes_value(true) + .default_value("1") + .help("Number of repeat runs, useful for benchmarking."), ) .arg( Arg::with_name("no-signature-verification") @@ -110,6 +185,20 @@ fn main() { .takes_value(false) .help("Disable signature verification.") ) + .arg( + Arg::with_name("exclude-cache-builds") + .long("exclude-cache-builds") + .takes_value(false) + .help("If present, pre-build the committee and tree-hash caches without \ + including them in the timings."), + ) + .arg( + Arg::with_name("exclude-post-block-thc") + .long("exclude-post-block-thc") + .takes_value(false) + .help("If present, don't rebuild the tree-hash-cache after applying \ + the block."), + ) ) .subcommand( SubCommand::with_name("pretty-ssz") @@ -673,10 +762,11 @@ fn run<T: EthSpec>( )?; match matches.subcommand() { - ("transition-blocks", Some(matches)) => run_transition_blocks::<T>(testnet_dir, matches) + ("transition-blocks", Some(matches)) => transition_blocks::run::<T>(env, matches) .map_err(|e| format!("Failed to transition blocks: {}", e)), - ("skip-slots", Some(matches)) => skip_slots::run::<T>(testnet_dir, matches) - .map_err(|e| format!("Failed to skip slots: {}", e)), + ("skip-slots", Some(matches)) => { + skip_slots::run::<T>(env, matches).map_err(|e| format!("Failed to skip slots: {}", e)) + } ("pretty-ssz", Some(matches)) => { run_parse_ssz::<T>(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) } diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index cb502d37ae..28310f7683 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -1,58 +1,150 @@ +//! # Skip-Slots +//! +//! Use this tool to process a `BeaconState` through empty slots. Useful for benchmarking or +//! troubleshooting consensus failures. +//! +//! It can load states from file or pull them from a beaconAPI. States pulled from a beaconAPI can +//! be saved to disk to reduce future calls to that server. +//! +//! ## Examples +//! +//! ### Example 1. +//! +//! Download a state from a HTTP endpoint and skip forward an epoch, twice (the initial state is +//! advanced 32 slots twice, rather than it being advanced 64 slots): +//! +//! ```ignore +//! lcli skip-slots \ +//! --beacon-url http://localhost:5052 \ +//! --state-id 0x3cdc33cd02713d8d6cc33a6dbe2d3a5bf9af1d357de0d175a403496486ff845e \\ +//! --slots 32 \ +//! --runs 2 +//! ``` +//! +//! ### Example 2. +//! +//! Download a state to a SSZ file (without modifying it): +//! +//! ```ignore +//! lcli skip-slots \ +//! --beacon-url http://localhost:5052 \ +//! --state-id 0x3cdc33cd02713d8d6cc33a6dbe2d3a5bf9af1d357de0d175a403496486ff845e \ +//! --slots 0 \ +//! --runs 0 \ +//! --output-path /tmp/state-0x3cdc.ssz +//! ``` +//! +//! ### Example 3. +//! +//! Do two runs over the state that was downloaded in the previous example: +//! +//! ```ignore +//! lcli skip-slots \ +//! --pre-state-path /tmp/state-0x3cdc.ssz \ +//! --slots 32 \ +//! --runs 2 +//! ``` use crate::transition_blocks::load_from_ssz_with; use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use ssz::Encode; -use state_processing::per_slot_processing; +use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; -use types::{BeaconState, EthSpec}; +use std::time::{Duration, Instant}; +use types::{BeaconState, CloneConfig, EthSpec, Hash256}; -pub fn run<T: EthSpec>(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let pre_state_path = matches - .value_of("pre-state") - .ok_or("No pre-state file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; +const HTTP_TIMEOUT: Duration = Duration::from_secs(10); - let slots = matches - .value_of("slots") - .ok_or("No slots supplied")? - .parse::<usize>() - .map_err(|e| format!("Failed to parse slots: {}", e))?; +pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<(), String> { + let spec = &T::default_spec(); + let executor = env.core_context().executor; - let output_path = matches - .value_of("output") - .ok_or("No output file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse output path: {}", e))?; + let output_path: Option<PathBuf> = parse_optional(matches, "output-path")?; + let state_path: Option<PathBuf> = parse_optional(matches, "pre-state-path")?; + let beacon_url: Option<SensitiveUrl> = parse_optional(matches, "beacon-url")?; + let runs: usize = parse_required(matches, "runs")?; + let slots: u64 = parse_required(matches, "slots")?; + let cli_state_root: Option<Hash256> = parse_optional(matches, "state-root")?; + let partial: bool = matches.is_present("partial-state-advance"); info!("Using {} spec", T::spec_name()); - info!("Pre-state path: {:?}", pre_state_path); - info!("Slots: {:?}", slots); + info!("Advancing {} slots", slots); + info!("Doing {} runs", runs); - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::<T>()?; + let (mut state, state_root) = match (state_path, beacon_url) { + (Some(state_path), None) => { + info!("State path: {:?}", state_path); + let state = load_from_ssz_with(&state_path, spec, BeaconState::from_ssz_bytes)?; + (state, None) + } + (None, Some(beacon_url)) => { + let state_id: StateId = parse_required(matches, "state-id")?; + let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + let state = executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { + client + .get_debug_beacon_states::<T>(state_id) + .await + .map_err(|e| format!("Failed to download state: {:?}", e)) + }) + .map_err(|e| format!("Failed to complete task: {:?}", e))? + .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? + .data; + let state_root = match state_id { + StateId::Root(root) => Some(root), + _ => None, + }; + (state, state_root) + } + _ => return Err("must supply either --state-path or --beacon-url".into()), + }; - let mut state: BeaconState<T> = - load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; + let initial_slot = state.slot(); + let target_slot = initial_slot + slots; state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; - // Transition the parent state to the block slot. - for i in 0..slots { - per_slot_processing(&mut state, None, spec) - .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; + let state_root = if let Some(root) = cli_state_root.or(state_root) { + root + } else { + state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build THC: {:?}", e))? + }; + + for i in 0..runs { + let mut state = state.clone_with(CloneConfig::committee_caches_only()); + + let start = Instant::now(); + + if partial { + partial_state_advance(&mut state, Some(state_root), target_slot, spec) + .map_err(|e| format!("Unable to perform partial advance: {:?}", e))?; + } else { + complete_state_advance(&mut state, Some(state_root), target_slot, spec) + .map_err(|e| format!("Unable to perform complete advance: {:?}", e))?; + } + + let duration = Instant::now().duration_since(start); + info!("Run {}: {:?}", i, duration); } - let mut output_file = - File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + if let Some(output_path) = output_path { + let mut output_file = File::create(output_path) + .map_err(|e| format!("Unable to create output file: {:?}", e))?; - output_file - .write_all(&state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + output_file + .write_all(&state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } Ok(()) } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 74be1e6284..793bdb6422 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -1,125 +1,387 @@ +//! # Transition Blocks +//! +//! Use this tool to apply a `SignedBeaconBlock` to a `BeaconState`. Useful for benchmarking or +//! troubleshooting consensus failures. +//! +//! It can load states and blocks from file or pull them from a beaconAPI. Objects pulled from a +//! beaconAPI can be saved to disk to reduce future calls to that server. +//! +//! ## Examples +//! +//! ### Run using a block from a beaconAPI +//! +//! Download the 0x6c69 block and its pre-state (the state from its parent block) from the +//! beaconAPI. Advance the pre-state to the slot of the 0x6c69 block and apply that block to the +//! pre-state. +//! +//! ```ignore +//! lcli transition-blocks \ +//! --beacon-url http://localhost:5052 \ +//! --block-id 0x6c69cf50a451f1ec905e954bf1fa22970f371a72a5aa9f8e3a43a18fdd980bec \ +//! --runs 10 +//! ``` +//! +//! ### Download a block and pre-state from a beaconAPI to the filesystem +//! +//! Download a block and pre-state to the filesystem, without performing any transitions: +//! +//! ```ignore +//! lcli transition-blocks \ +//! --beacon-url http://localhost:5052 \ +//! --block-id 0x6c69cf50a451f1ec905e954bf1fa22970f371a72a5aa9f8e3a43a18fdd980bec \ +//! --runs 0 \ +//! --block-output-path /tmp/block-0x6c69.ssz \ +//! --pre-state-output-path /tmp/pre-state-0x6c69.ssz +//! ``` +//! +//! ### Use a block and pre-state from the filesystem +//! +//! Do one run over the block and pre-state downloaded in the previous example and save the post +//! state to file: +//! +//! ```ignore +//! lcli transition-blocks \ +//! --block-path /tmp/block-0x6c69.ssz \ +//! --pre-state-path /tmp/pre-state-0x6c69.ssz +//! --post-state-output-path /tmp/post-state-0x6c69.ssz +//! ``` +//! +//! ### Isolate block processing for benchmarking +//! +//! Try to isolate block processing as much as possible for benchmarking: +//! +//! ```ignore +//! lcli transition-blocks \ +//! --block-path /tmp/block-0x6c69.ssz \ +//! --pre-state-path /tmp/pre-state-0x6c69.ssz \ +//! --runs 10 \ +//! --exclude-cache-builds \ +//! --exclude-post-block-thc +//! ``` +use beacon_chain::{ + test_utils::EphemeralHarnessType, validator_pubkey_cache::ValidatorPubkeyCache, +}; use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; +use clap_utils::{parse_optional, parse_required}; +use environment::{null_logger, Environment}; +use eth2::{ + types::{BlockId, StateId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; use ssz::Encode; use state_processing::{ - per_block_processing, per_slot_processing, BlockSignatureStrategy, VerifyBlockRoot, + block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, + BlockSignatureStrategy, VerifyBlockRoot, }; +use std::borrow::Cow; use std::fs::File; use std::io::prelude::*; use std::path::{Path, PathBuf}; -use std::time::Instant; -use types::{BeaconState, ChainSpec, EthSpec, SignedBeaconBlock}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use store::HotColdDB; +use types::{BeaconState, ChainSpec, CloneConfig, EthSpec, Hash256, SignedBeaconBlock}; -pub fn run_transition_blocks<T: EthSpec>( - testnet_dir: PathBuf, - matches: &ArgMatches, -) -> Result<(), String> { - let pre_state_path = matches - .value_of("pre-state") - .ok_or("No pre-state file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; +const HTTP_TIMEOUT: Duration = Duration::from_secs(10); - let block_path = matches - .value_of("block") - .ok_or("No block file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse block path: {}", e))?; +#[derive(Debug)] +struct Config { + no_signature_verification: bool, + exclude_cache_builds: bool, + exclude_post_block_thc: bool, +} - let output_path = matches - .value_of("output") - .ok_or("No output file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse output path: {}", e))?; +pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<(), String> { + let spec = &T::default_spec(); + let executor = env.core_context().executor; - let no_signature_verification = matches.is_present("no-signature-verification"); - let signature_strategy = if no_signature_verification { - BlockSignatureStrategy::NoVerification - } else { - BlockSignatureStrategy::VerifyIndividual + /* + * Parse (most) CLI arguments. + */ + + let pre_state_path: Option<PathBuf> = parse_optional(matches, "pre-state-path")?; + let block_path: Option<PathBuf> = parse_optional(matches, "block-path")?; + let post_state_output_path: Option<PathBuf> = + parse_optional(matches, "post-state-output-path")?; + let pre_state_output_path: Option<PathBuf> = parse_optional(matches, "pre-state-output-path")?; + let block_output_path: Option<PathBuf> = parse_optional(matches, "block-output-path")?; + let beacon_url: Option<SensitiveUrl> = parse_optional(matches, "beacon-url")?; + let runs: usize = parse_required(matches, "runs")?; + let config = Config { + no_signature_verification: matches.is_present("no-signature-verification"), + exclude_cache_builds: matches.is_present("exclude-cache-builds"), + exclude_post_block_thc: matches.is_present("exclude-post-block-thc"), }; info!("Using {} spec", T::spec_name()); - info!("Pre-state path: {:?}", pre_state_path); - info!("Block path: {:?}", block_path); + info!("Doing {} runs", runs); + info!("{:?}", &config); - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::<T>()?; + /* + * Load the block and pre-state from disk or beaconAPI URL. + */ - let pre_state: BeaconState<T> = - load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; - let block: SignedBeaconBlock<T> = - load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; + let (mut pre_state, mut state_root_opt, block) = match (pre_state_path, block_path, beacon_url) + { + (Some(pre_state_path), Some(block_path), None) => { + info!("Block path: {:?}", pre_state_path); + info!("Pre-state path: {:?}", block_path); + let pre_state = load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; + let block = load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; + (pre_state, None, block) + } + (None, None, Some(beacon_url)) => { + let block_id: BlockId = parse_required(matches, "block-id")?; + let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { + let block = client + .get_beacon_blocks(block_id) + .await + .map_err(|e| format!("Failed to download block: {:?}", e))? + .ok_or_else(|| format!("Unable to locate block at {:?}", block_id))? + .data; - let t = Instant::now(); - let post_state = do_transition(pre_state, block, signature_strategy, spec)?; - println!("Total transition time: {}ms", t.elapsed().as_millis()); + if block.slot() == spec.genesis_slot { + return Err("Cannot run on the genesis block".to_string()); + } - let mut output_file = - File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + let parent_block: SignedBeaconBlock<T> = client + .get_beacon_blocks(BlockId::Root(block.parent_root())) + .await + .map_err(|e| format!("Failed to download parent block: {:?}", e))? + .ok_or_else(|| format!("Unable to locate parent block at {:?}", block_id))? + .data; - output_file - .write_all(&post_state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + let state_root = parent_block.state_root(); + let state_id = StateId::Root(state_root); + let pre_state = client + .get_debug_beacon_states::<T>(state_id) + .await + .map_err(|e| format!("Failed to download state: {:?}", e))? + .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? + .data; + + Ok((pre_state, Some(state_root), block)) + }) + .map_err(|e| format!("Failed to complete task: {:?}", e))? + } + _ => { + return Err( + "must supply *both* --pre-state-path and --block-path *or* only --beacon-url" + .into(), + ) + } + }; + + // Compute the block root. + let block_root = block.canonical_root(); + + /* + * Create a `BeaconStore` and `ValidatorPubkeyCache` for block signature verification. + */ + + let store = HotColdDB::open_ephemeral( + <_>::default(), + spec.clone(), + null_logger().map_err(|e| format!("Failed to create null_logger: {:?}", e))?, + ) + .map_err(|e| format!("Failed to create ephemeral store: {:?}", e))?; + let store = Arc::new(store); + + debug!("Building pubkey cache (might take some time)"); + let validator_pubkey_cache = ValidatorPubkeyCache::new(&pre_state, store) + .map_err(|e| format!("Failed to create pubkey cache: {:?}", e))?; + + /* + * If cache builds are excluded from the timings, build them early so they are available for + * each run. + */ + + if config.exclude_cache_builds { + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + let state_root = pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build THC: {:?}", e))?; + + if state_root_opt.map_or(false, |expected| expected != state_root) { + return Err(format!( + "State root mismatch! Expected {}, computed {}", + state_root_opt.unwrap(), + state_root + )); + } + state_root_opt = Some(state_root); + } + + /* + * Perform the core "runs". + */ + + let mut output_post_state = None; + for i in 0..runs { + let pre_state = pre_state.clone_with(CloneConfig::all()); + let block = block.clone(); + + let start = Instant::now(); + + let post_state = do_transition( + pre_state, + block_root, + block, + state_root_opt, + &config, + &validator_pubkey_cache, + spec, + )?; + + let duration = Instant::now().duration_since(start); + info!("Run {}: {:?}", i, duration); + + if output_post_state.is_none() { + output_post_state = Some(post_state) + } + } + + /* + * Write artifacts to disk, if required. + */ + + if let Some(path) = post_state_output_path { + let output_post_state = output_post_state.ok_or_else(|| { + format!( + "Post state was not computed, cannot save to disk (runs = {})", + runs + ) + })?; + + let mut output_file = + File::create(path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&output_post_state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } + + if let Some(path) = pre_state_output_path { + let mut output_file = + File::create(path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&pre_state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } + + if let Some(path) = block_output_path { + let mut output_file = + File::create(path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&block.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } Ok(()) } fn do_transition<T: EthSpec>( mut pre_state: BeaconState<T>, + block_root: Hash256, block: SignedBeaconBlock<T>, - signature_strategy: BlockSignatureStrategy, + mut state_root_opt: Option<Hash256>, + config: &Config, + validator_pubkey_cache: &ValidatorPubkeyCache<EphemeralHarnessType<T>>, spec: &ChainSpec, ) -> Result<BeaconState<T>, String> { - let t = Instant::now(); - pre_state - .build_all_caches(spec) - .map_err(|e| format!("Unable to build caches: {:?}", e))?; - println!("Build caches: {}ms", t.elapsed().as_millis()); + if !config.exclude_cache_builds { + let t = Instant::now(); + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + debug!("Build caches: {:?}", t.elapsed()); - let t = Instant::now(); - pre_state - .update_tree_hash_cache() - .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; - println!("Initial tree hash: {}ms", t.elapsed().as_millis()); + let t = Instant::now(); + let state_root = pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + debug!("Initial tree hash: {:?}", t.elapsed()); + + if state_root_opt.map_or(false, |expected| expected != state_root) { + return Err(format!( + "State root mismatch! Expected {}, computed {}", + state_root_opt.unwrap(), + state_root + )); + } + state_root_opt = Some(state_root); + } + + let state_root = state_root_opt.ok_or("Failed to compute state root, internal error")?; // Transition the parent state to the block slot. let t = Instant::now(); for i in pre_state.slot().as_u64()..block.slot().as_u64() { - per_slot_processing(&mut pre_state, None, spec) + per_slot_processing(&mut pre_state, Some(state_root), spec) .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; } - println!("Slot processing: {}ms", t.elapsed().as_millis()); - - let t = Instant::now(); - pre_state - .update_tree_hash_cache() - .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; - println!("Pre-block tree hash: {}ms", t.elapsed().as_millis()); + debug!("Slot processing: {:?}", t.elapsed()); let t = Instant::now(); pre_state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; - println!("Build all caches (again): {}ms", t.elapsed().as_millis()); + debug!("Build all caches (again): {:?}", t.elapsed()); + + if !config.no_signature_verification { + let get_pubkey = move |validator_index| { + validator_pubkey_cache + .get(validator_index) + .map(Cow::Borrowed) + }; + + let decompressor = move |pk_bytes| { + // Map compressed pubkey to validator index. + let validator_index = validator_pubkey_cache.get_index(pk_bytes)?; + // Map validator index to pubkey (respecting guard on unknown validators). + get_pubkey(validator_index) + }; + + let t = Instant::now(); + BlockSignatureVerifier::verify_entire_block( + &pre_state, + get_pubkey, + decompressor, + &block, + Some(block_root), + spec, + ) + .map_err(|e| format!("Invalid block signature: {:?}", e))?; + debug!("Batch verify block signatures: {:?}", t.elapsed()); + } let t = Instant::now(); per_block_processing( &mut pre_state, &block, None, - signature_strategy, + BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, spec, ) .map_err(|e| format!("State transition failed: {:?}", e))?; - println!("Process block: {}ms", t.elapsed().as_millis()); + debug!("Process block: {:?}", t.elapsed()); - let t = Instant::now(); - pre_state - .update_tree_hash_cache() - .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; - println!("Post-block tree hash: {}ms", t.elapsed().as_millis()); + if !config.exclude_post_block_thc { + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + debug!("Post-block tree hash: {:?}", t.elapsed()); + } Ok(pre_state) } @@ -136,10 +398,6 @@ pub fn load_from_ssz_with<T>( .map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?; let t = Instant::now(); let result = decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e)); - println!( - "SSZ decoding {}: {}ms", - path.display(), - t.elapsed().as_millis() - ); + debug!("SSZ decoding {}: {:?}", path.display(), t.elapsed()); result } From 5bb4aada92882ff25e2c33c31661cde91b651f13 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 9 Aug 2022 06:05:15 +0000 Subject: [PATCH 124/184] Update Prater ENRs (#3396) ## Issue Addressed NA ## Proposed Changes Update bootnodes for Prater. There are new IP addresses for the Sigma Prime nodes. Teku and Nimbus nodes were also added. ## Additional Info Related: https://github.com/eth-clients/goerli/commit/24760cd4b46c4f2274cf333375bfbf6133f44401 --- .../built_in_network_configs/prater/boot_enr.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml index fcb2d5342b..7000ff0bbc 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml @@ -7,4 +7,11 @@ # Prysm bootnode #1 - enr:-Ku4QFmUkNp0g9bsLX2PfVeIyT-9WO-PZlrqZBNtEyofOOfLMScDjaTzGxIb1Ns9Wo5Pm_8nlq-SZwcQfTH2cgO-s88Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQLV_jMOIxKbjHFKgrkFvwDvpexo6Nd58TK5k7ss4Vt0IoN1ZHCCG1g # Lighthouse bootnode #1 -- enr:-LK4QLINdtobGquK7jukLDAKmsrH2ZuHM4k0TklY5jDTD4ZgfxR9weZmo5Jwu81hlKu3qPAvk24xHGBDjYs4o8f1gZ0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhDRN_P6Jc2VjcDI1NmsxoQJuNujTgsJUHUgVZML3pzrtgNtYg7rQ4K1tkWERgl0DdoN0Y3CCIyiDdWRwgiMo +- enr:-Ly4QFPk-cTMxZ3jWTafiNblEZkQIXGF2aVzCIGW0uHp6KaEAvBMoctE8S7YU0qZtuS7By0AA4YMfKoN9ls_GJRccVpFh2F0dG5ldHOI__________-EZXRoMpCC9KcrAgAQIIS2AQAAAAAAgmlkgnY0gmlwhKh3joWJc2VjcDI1NmsxoQKrxz8M1IHwJqRIpDqdVW_U1PeixMW5SfnBD-8idYIQrIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA +# Lighthouse bootnode #2 +- enr:-L64QJmwSDtaHVgGiqIxJWUtxWg6uLCipsms6j-8BdsOJfTWAs7CLF9HJnVqFE728O-JYUDCxzKvRdeMqBSauHVCMdaCAVWHYXR0bmV0c4j__________4RldGgykIL0pysCABAghLYBAAAAAACCaWSCdjSCaXCEQWxOdolzZWNwMjU2azGhA7Qmod9fK86WidPOzLsn5_8QyzL7ZcJ1Reca7RnD54vuiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo +# Nimbus bootstrap nodes +- enr:-LK4QMzPq4Q7w5R-rnGQDcI8BYky6oPVBGQTbS1JJLVtNi_8PzBLV7Bdzsoame9nJK5bcJYpGHn4SkaDN2CM6tR5G_4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhAN4yvyJc2VjcDI1NmsxoQKa8Qnp_P2clLIP6VqLKOp_INvEjLszalEnW0LoBZo4YYN0Y3CCI4yDdWRwgiOM +- enr:-LK4QLM_pPHa78R8xlcU_s40Y3XhFjlb3kPddW9lRlY67N5qeFE2Wo7RgzDgRs2KLCXODnacVHMFw1SfpsW3R474RZEBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhANBY-yJc2VjcDI1NmsxoQNsZkFXgKbTzuxF7uwxlGauTGJelE6HD269CcFlZ_R7A4N0Y3CCI4yDdWRwgiOM +# Teku bootnode +- enr:-KK4QH0RsNJmIG0EX9LSnVxMvg-CAOr3ZFF92hunU63uE7wcYBjG1cFbUTvEa5G_4nDJkRhUq9q2ck9xY-VX1RtBsruBtIRldGgykIL0pysBABAg__________-CaWSCdjSCaXCEEnXQ0YlzZWNwMjU2azGhA1grTzOdMgBvjNrk-vqWtTZsYQIi0QawrhoZrsn5Hd56g3RjcIIjKIN1ZHCCIyg From 6f13727fbef75ff2501eb3248315842eaa83247b Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Tue, 9 Aug 2022 06:05:16 +0000 Subject: [PATCH 125/184] Don't use the builder network if the head is optimistic (#3412) ## Issue Addressed Resolves https://github.com/sigp/lighthouse/issues/3394 Adds a check in `is_healthy` about whether the head is optimistic when choosing whether to use the builder network. Co-authored-by: realbigsean <sean@sigmaprime.io> --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 +++++- beacon_node/execution_layer/src/lib.rs | 4 ++ beacon_node/http_api/tests/tests.rs | 57 ++++++++++++++++++++ 3 files changed, 74 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fec7fe25ff..d6503d687c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3333,7 +3333,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pubkey, slot: state.slot(), chain_health: self - .is_healthy() + .is_healthy(&parent_root) .map_err(BlockProductionError::BeaconChain)?, }; @@ -4562,7 +4562,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// /// Since we are likely calling this during the slot we are going to propose in, don't take into /// account the current slot when accounting for skips. - pub fn is_healthy(&self) -> Result<ChainHealth, Error> { + pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> { // Check if the merge has been finalized. if let Some(finalized_hash) = self .canonical_head @@ -4577,6 +4577,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> { return Ok(ChainHealth::PreMerge); }; + // Check that the parent is NOT optimistic. + if let Some(execution_status) = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(parent_root) + { + if execution_status.is_strictly_optimistic() { + return Ok(ChainHealth::Optimistic); + } + } + if self.config.builder_fallback_disable_checks { return Ok(ChainHealth::Healthy); } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 59c8f009fa..f56ea8f797 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -114,6 +114,7 @@ pub struct BuilderParams { pub enum ChainHealth { Healthy, Unhealthy(FailedCondition), + Optimistic, PreMerge, } @@ -695,6 +696,9 @@ impl<T: EthSpec> ExecutionLayer<T> { } // Intentional no-op, so we never attempt builder API proposals pre-merge. ChainHealth::PreMerge => (), + ChainHealth::Optimistic => info!(self.log(), "The local execution engine is syncing \ + so the builder network cannot safely be used. Attempting \ + to build a block with the local execution engine"), } } self.get_full_payload_caching( diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index cc0281e454..fa41102292 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3044,6 +3044,55 @@ impl ApiTester { self } + pub async fn test_builder_chain_health_optimistic_head(self) -> Self { + // Make sure the next payload verification will return optimistic before advancing the chain. + self.harness.mock_execution_layer.as_ref().map(|el| { + el.server.all_payloads_syncing(true); + el + }); + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -4000,6 +4049,14 @@ async fn builder_chain_health_epochs_since_finalization() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_optimistic_head() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_optimistic_head() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() From 052d5cf31f3028157cbf7812a092ccc409ba927a Mon Sep 17 00:00:00 2001 From: Brendan Timmons <brendantimmons@live.com.au> Date: Tue, 9 Aug 2022 06:05:17 +0000 Subject: [PATCH 126/184] fix: incorrectly formatted MEV link in Lighthouse Book (#3434) ## Issue Addressed N/A ## Proposed Changes Simply fix the incorrect formatting on markdown link. Co-authored-by: Michael Sproul <micsproul@gmail.com> --- book/src/builders.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/src/builders.md b/book/src/builders.md index 78a80899cc..1a034e0820 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -6,7 +6,7 @@ knowledge of the transactions included in the block. This enables Lighthouse to transaction gathering/ordering within a block to parties specialized in this particular task. For economic reasons, these parties will refuse to reveal the list of transactions to the validator before the validator has committed to (i.e. signed) the block. A primer on MEV can be found -[here]([MEV](https://ethereum.org/en/developers/docs/mev/)). +[here](https://ethereum.org/en/developers/docs/mev). Using the builder API is not known to introduce additional slashing risks, however a live-ness risk (i.e. the ability for the chain to produce valid blocks) is introduced because your node will be From 2de26b20f80579db6c794d5f25f6326783973372 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 10 Aug 2022 07:52:57 +0000 Subject: [PATCH 127/184] Don't return errors on HTTP API for already-known messages (#3341) ## Issue Addressed - Resolves #3266 ## Proposed Changes Return 200 OK rather than an error when a block, attestation or sync message is already known. Presently, we will log return an error which causes a BN to go "offline" from the VCs perspective which causes the fallback mechanism to do work to try and avoid and upcheck offline nodes. This can be observed as instability in the `vc_beacon_nodes_available_count` metric. The current behaviour also causes scary logs for the user. There's nothing to *actually* be concerned about when we see duplicate messages, this can happen on fallback systems (see code comments). ## Additional Info NA --- beacon_node/http_api/src/lib.rs | 53 +++++++++++++++++++++ beacon_node/http_api/src/publish_blocks.rs | 25 +++++++++- beacon_node/http_api/src/sync_committees.rs | 30 +++++++++++- 3 files changed, 105 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ba1dd01cc3..ec34dd0663 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1168,12 +1168,46 @@ pub fn serve<T: BeaconChainTypes>( blocking_json_task(move || { let seen_timestamp = timestamp_now(); let mut failures = Vec::new(); + let mut num_already_known = 0; for (index, attestation) in attestations.as_slice().iter().enumerate() { let attestation = match chain .verify_unaggregated_attestation_for_gossip(attestation, None) { Ok(attestation) => attestation, + Err(AttnError::PriorAttestationKnown { .. }) => { + num_already_known += 1; + + // Skip to the next attestation since an attestation for this + // validator is already known in this epoch. + // + // There's little value for the network in validating a second + // attestation for another validator since it is either: + // + // 1. A duplicate. + // 2. Slashable. + // 3. Invalid. + // + // We are likely to get duplicates in the case where a VC is using + // fallback BNs. If the first BN actually publishes some/all of a + // batch of attestations but fails to respond in a timely fashion, + // the VC is likely to try publishing the attestations on another + // BN. That second BN may have already seen the attestations from + // the first BN and therefore indicate that the attestations are + // "already seen". An attestation that has already been seen has + // been published on the network so there's no actual error from + // the perspective of the user. + // + // It's better to prevent slashable attestations from ever + // appearing on the network than trying to slash validators, + // especially those validators connected to the local API. + // + // There might be *some* value in determining that this attestation + // is invalid, but since a valid attestation already it exists it + // appears that this validator is capable of producing valid + // attestations and there's no immediate cause for concern. + continue; + } Err(e) => { error!(log, "Failure verifying attestation for gossip"; @@ -1240,6 +1274,15 @@ pub fn serve<T: BeaconChainTypes>( )); } } + + if num_already_known > 0 { + debug!( + log, + "Some unagg attestations already known"; + "count" => num_already_known + ); + } + if failures.is_empty() { Ok(()) } else { @@ -2234,6 +2277,16 @@ pub fn serve<T: BeaconChainTypes>( // identical aggregates, especially if they're using the same beacon // node. Err(AttnError::AttestationAlreadyKnown(_)) => continue, + // If we've already seen this aggregator produce an aggregate, just + // skip this one. + // + // We're likely to see this with VCs that use fallback BNs. The first + // BN might time-out *after* publishing the aggregate and then the + // second BN will indicate it's already seen the aggregate. + // + // There's no actual error for the user or the network since the + // aggregate has been successfully published by some other node. + Err(AttnError::AggregatorAlreadyKnown(_)) => continue, Err(e) => { error!(log, "Failure verifying aggregate and proofs"; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index b282e6f490..60ca8f2328 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,9 +1,9 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes, CountUnrealized}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{crit, error, info, Logger}; +use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; @@ -86,6 +86,27 @@ pub async fn publish_block<T: BeaconChainTypes>( Ok(()) } + Err(BlockError::BlockIsAlreadyKnown) => { + info!( + log, + "Block from HTTP API already known"; + "block" => ?block.canonical_root(), + "slot" => block.slot(), + ); + Ok(()) + } + Err(BlockError::RepeatProposal { proposer, slot }) => { + warn!( + log, + "Block ignored due to repeat proposal"; + "msg" => "this can happen when a VC uses fallback BNs. \ + whilst this is not necessarily an error, it can indicate issues with a BN \ + or between the VC and BN.", + "slot" => slot, + "proposer" => proposer, + ); + Ok(()) + } Err(e) => { let msg = format!("{:?}", e); error!( diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 77becef7df..a6acf308fa 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -11,7 +11,7 @@ use beacon_chain::{ use eth2::types::{self as api_types}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{error, warn, Logger}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use std::cmp::max; use std::collections::HashMap; @@ -189,6 +189,24 @@ pub fn process_sync_committee_signatures<T: BeaconChainTypes>( verified_for_pool = Some(verified); } + // If this validator has already published a sync message, just ignore this message + // without returning an error. + // + // This is likely to happen when a VC uses fallback BNs. If the first BN publishes + // the message and then fails to respond in a timely fashion then the VC will move + // to the second BN. The BN will then report that this message has already been + // seen, which is not actually an error as far as the network or user are concerned. + Err(SyncVerificationError::PriorSyncCommitteeMessageKnown { + validator_index, + slot, + }) => { + debug!( + log, + "Ignoring already-known sync message"; + "slot" => slot, + "validator_index" => validator_index, + ); + } Err(e) => { error!( log, @@ -283,6 +301,16 @@ pub fn process_signed_contribution_and_proofs<T: BeaconChainTypes>( // If we already know the contribution, don't broadcast it or attempt to // further verify it. Return success. Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue, + // If we've already seen this aggregator produce an aggregate, just + // skip this one. + // + // We're likely to see this with VCs that use fallback BNs. The first + // BN might time-out *after* publishing the aggregate and then the + // second BN will indicate it's already seen the aggregate. + // + // There's no actual error for the user or the network since the + // aggregate has been successfully published by some other node. + Err(SyncVerificationError::AggregatorAlreadyKnown(_)) => continue, Err(e) => { error!( log, From c25934956b30dc7beb915b860f02eb7f68ad3a24 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Wed, 10 Aug 2022 07:52:58 +0000 Subject: [PATCH 128/184] Remove INVALID_TERMINAL_BLOCK (#3385) ## Issue Addressed Resolves #3379 ## Proposed Changes Remove instances of `InvalidTerminalBlock` in lighthouse and use `Invalid {latest_valid_hash: "0x0000000000000000000000000000000000000000000000000000000000000000"}` to represent that status. --- beacon_node/beacon_chain/src/beacon_chain.rs | 35 ++++++++----- .../beacon_chain/src/execution_payload.rs | 7 ++- .../tests/payload_invalidation.rs | 52 ++++++++++--------- beacon_node/execution_layer/src/engine_api.rs | 1 - .../src/engine_api/json_structures.rs | 7 --- .../execution_layer/src/payload_status.rs | 19 ------- .../execution_layer/src/test_utils/mod.rs | 4 +- 7 files changed, 58 insertions(+), 67 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d6503d687c..54c961e34d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4084,21 +4084,32 @@ impl<T: BeaconChainTypes> BeaconChain<T> { "Fork choice update invalidated payload"; "status" => ?status ); - // The execution engine has stated that all blocks between the - // `head_execution_block_hash` and `latest_valid_hash` are invalid. - self.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: latest_valid_hash, - }, - ) - .await?; + + // This implies that the terminal block was invalid. We are being explicit in + // invalidating only the head block in this case. + if latest_valid_hash == ExecutionBlockHash::zero() { + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }, + ) + .await?; + } else { + // The execution engine has stated that all blocks between the + // `head_execution_block_hash` and `latest_valid_hash` are invalid. + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + .await?; + } Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } - PayloadStatus::InvalidTerminalBlock { .. } - | PayloadStatus::InvalidBlockHash { .. } => { + PayloadStatus::InvalidBlockHash { .. } => { warn!( self.log, "Fork choice update invalidated payload"; diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 3c530aaac8..7af171b794 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -114,6 +114,11 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( PayloadStatus::Invalid { latest_valid_hash, .. } => { + // latest_valid_hash == 0 implies that this was the terminal block + // Hence, we don't need to run `BeaconChain::process_invalid_execution_payload`. + if latest_valid_hash == ExecutionBlockHash::zero() { + return Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()); + } // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. let latest_root = block.parent_root(); @@ -127,7 +132,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } - PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { + PayloadStatus::InvalidBlockHash { .. } => { // Returning an error here should be sufficient to invalidate the block. We have no // information to indicate its parent is invalid, so no need to run // `BeaconChain::process_invalid_execution_payload`. diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 7728b319d9..027a708cfa 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -40,7 +40,6 @@ enum Payload { }, Syncing, InvalidBlockHash, - InvalidTerminalBlock, } struct InvalidPayloadRig { @@ -231,16 +230,20 @@ impl InvalidPayloadRig { Payload::Invalid { latest_valid_hash } => { let latest_valid_hash = latest_valid_hash .unwrap_or_else(|| self.block_hash(block.message().parent_root())); - mock_execution_layer - .server - .all_payloads_invalid_on_new_payload(latest_valid_hash) + if latest_valid_hash == ExecutionBlockHash::zero() { + mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_new_payload() + } else { + mock_execution_layer + .server + .all_payloads_invalid_on_new_payload(latest_valid_hash) + } } + Payload::InvalidBlockHash => mock_execution_layer .server .all_payloads_invalid_block_hash_on_new_payload(), - Payload::InvalidTerminalBlock => mock_execution_layer - .server - .all_payloads_invalid_terminal_block_on_new_payload(), }; let set_forkchoice_updated = |payload: Payload| match payload { Payload::Valid => mock_execution_layer @@ -252,16 +255,20 @@ impl InvalidPayloadRig { Payload::Invalid { latest_valid_hash } => { let latest_valid_hash = latest_valid_hash .unwrap_or_else(|| self.block_hash(block.message().parent_root())); - mock_execution_layer - .server - .all_payloads_invalid_on_forkchoice_updated(latest_valid_hash) + if latest_valid_hash == ExecutionBlockHash::zero() { + mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_forkchoice_updated() + } else { + mock_execution_layer + .server + .all_payloads_invalid_on_forkchoice_updated(latest_valid_hash) + } } + Payload::InvalidBlockHash => mock_execution_layer .server .all_payloads_invalid_block_hash_on_forkchoice_updated(), - Payload::InvalidTerminalBlock => mock_execution_layer - .server - .all_payloads_invalid_terminal_block_on_forkchoice_updated(), }; match (new_payload_response, forkchoice_response) { @@ -294,9 +301,7 @@ impl InvalidPayloadRig { match forkchoice_response { Payload::Syncing => assert!(execution_status.is_strictly_optimistic()), Payload::Valid => assert!(execution_status.is_valid_and_post_bellatrix()), - Payload::Invalid { .. } - | Payload::InvalidBlockHash - | Payload::InvalidTerminalBlock => unreachable!(), + Payload::Invalid { .. } | Payload::InvalidBlockHash => unreachable!(), } assert_eq!( @@ -310,14 +315,8 @@ impl InvalidPayloadRig { "block from db must match block imported" ); } - ( - Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, - _, - ) - | ( - _, - Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, - ) => { + (Payload::Invalid { .. } | Payload::InvalidBlockHash, _) + | (_, Payload::Invalid { .. } | Payload::InvalidBlockHash) => { set_new_payload(new_payload_response); set_forkchoice_updated(forkchoice_response); @@ -473,7 +472,10 @@ async fn immediate_forkchoice_update_payload_invalid_block_hash() { #[tokio::test] async fn immediate_forkchoice_update_payload_invalid_terminal_block() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock).await + immediate_forkchoice_update_invalid_test(|_| Payload::Invalid { + latest_valid_hash: Some(ExecutionBlockHash::zero()), + }) + .await } /// Ensure the client tries to exit when the justified checkpoint is invalidated. diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 4f957d6387..c370985ec0 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -78,7 +78,6 @@ pub enum PayloadStatusV1Status { Syncing, Accepted, InvalidBlockHash, - InvalidTerminalBlock, } #[derive(Clone, Debug, PartialEq)] diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 9ed38b61b0..31aa79f055 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -319,7 +319,6 @@ pub enum JsonPayloadStatusV1Status { Syncing, Accepted, InvalidBlockHash, - InvalidTerminalBlock, } #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -338,9 +337,6 @@ impl From<PayloadStatusV1Status> for JsonPayloadStatusV1Status { PayloadStatusV1Status::Syncing => JsonPayloadStatusV1Status::Syncing, PayloadStatusV1Status::Accepted => JsonPayloadStatusV1Status::Accepted, PayloadStatusV1Status::InvalidBlockHash => JsonPayloadStatusV1Status::InvalidBlockHash, - PayloadStatusV1Status::InvalidTerminalBlock => { - JsonPayloadStatusV1Status::InvalidTerminalBlock - } } } } @@ -352,9 +348,6 @@ impl From<JsonPayloadStatusV1Status> for PayloadStatusV1Status { JsonPayloadStatusV1Status::Syncing => PayloadStatusV1Status::Syncing, JsonPayloadStatusV1Status::Accepted => PayloadStatusV1Status::Accepted, JsonPayloadStatusV1Status::InvalidBlockHash => PayloadStatusV1Status::InvalidBlockHash, - JsonPayloadStatusV1Status::InvalidTerminalBlock => { - PayloadStatusV1Status::InvalidTerminalBlock - } } } } diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index 46917a0aa5..7db8e234d1 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -18,9 +18,6 @@ pub enum PayloadStatus { InvalidBlockHash { validation_error: Option<String>, }, - InvalidTerminalBlock { - validation_error: Option<String>, - }, } /// Processes the response from the execution engine. @@ -90,22 +87,6 @@ pub fn process_payload_status( validation_error: response.validation_error.clone(), }) } - PayloadStatusV1Status::InvalidTerminalBlock => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - Ok(PayloadStatus::InvalidTerminalBlock { - validation_error: response.validation_error.clone(), - }) - } PayloadStatusV1Status::Syncing => { // In the interests of being liberal with what we accept, only raise a // warning here. diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 462e34e910..18612bf303 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -245,8 +245,8 @@ impl<T: EthSpec> MockServer<T> { fn invalid_terminal_block_status() -> PayloadStatusV1 { PayloadStatusV1 { - status: PayloadStatusV1Status::InvalidTerminalBlock, - latest_valid_hash: None, + status: PayloadStatusV1Status::Invalid, + latest_valid_hash: Some(ExecutionBlockHash::zero()), validation_error: Some("static response".into()), } } From 4e05f19fb526237fd7423801a8bdee3b7e0ec0b7 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Wed, 10 Aug 2022 07:52:59 +0000 Subject: [PATCH 129/184] Serve Bellatrix preset in BN API (#3425) ## Issue Addressed Resolves #3388 Resolves #2638 ## Proposed Changes - Return the `BellatrixPreset` on `/eth/v1/config/spec` by default. - Allow users to opt out of this by providing `--http-spec-fork=altair` (unless there's a Bellatrix fork epoch set). - Add the Altair constants from #2638 and make serving the constants non-optional (the `http-disable-legacy-spec` flag is deprecated). - Modify the VC to only read the `Config` and not to log extra fields. This prevents it from having to muck around parsing the `ConfigAndPreset` fields it doesn't need. ## Additional Info This change is backwards-compatible for the VC and the BN, but is marked as a breaking change for the removal of `--http-disable-legacy-spec`. I tried making `Config` a `superstruct` too, but getting the automatic decoding to work was a huge pain and was going to require a lot of hacks, so I gave up in favour of keeping the default-based approach we have now. --- Cargo.lock | 1 + beacon_node/beacon_chain/src/test_utils.rs | 3 +- beacon_node/http_api/src/lib.rs | 13 +- beacon_node/http_api/tests/common.rs | 2 +- beacon_node/http_api/tests/tests.rs | 11 +- beacon_node/src/cli.rs | 10 +- beacon_node/src/config.rs | 9 +- common/eth2/src/lib.rs | 4 +- common/eth2/src/lighthouse_vc/http_client.rs | 4 +- common/eth2/src/mixin.rs | 6 +- consensus/types/Cargo.toml | 1 + consensus/types/src/chain_spec.rs | 21 +-- consensus/types/src/config_and_preset.rs | 143 ++++++++++--------- consensus/types/src/fork_name.rs | 12 +- consensus/types/src/lib.rs | 4 +- lighthouse/tests/beacon_node.rs | 17 ++- validator_client/src/beacon_node_fallback.rs | 30 ++-- validator_client/src/http_api/mod.rs | 3 +- validator_client/src/http_api/tests.rs | 15 +- 19 files changed, 167 insertions(+), 142 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3702b95485..a6b5f56374 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7073,6 +7073,7 @@ dependencies = [ "itertools", "lazy_static", "log", + "maplit", "parking_lot 0.12.1", "rand 0.8.5", "rand_xorshift", diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 411bd7b1fd..9b62590703 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -132,8 +132,7 @@ pub fn test_spec<E: EthSpec>() -> ChainSpec { FORK_NAME_ENV_VAR, e ) }); - let fork = ForkName::from_str(fork_name.as_str()) - .unwrap_or_else(|()| panic!("unknown FORK_NAME: {}", fork_name)); + let fork = ForkName::from_str(fork_name.as_str()).unwrap(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ec34dd0663..bcd8788465 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -104,9 +104,9 @@ pub struct Config { pub listen_addr: IpAddr, pub listen_port: u16, pub allow_origin: Option<String>, - pub serve_legacy_spec: bool, pub tls_config: Option<TlsConfig>, pub allow_sync_stalled: bool, + pub spec_fork_name: Option<ForkName>, } impl Default for Config { @@ -116,9 +116,9 @@ impl Default for Config { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 5052, allow_origin: None, - serve_legacy_spec: true, tls_config: None, allow_sync_stalled: false, + spec_fork_name: None, } } } @@ -1534,18 +1534,15 @@ pub fn serve<T: BeaconChainTypes>( }); // GET config/spec - let serve_legacy_spec = ctx.config.serve_legacy_spec; + let spec_fork_name = ctx.config.spec_fork_name; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) .and(chain_filter.clone()) .and_then(move |chain: Arc<BeaconChain<T>>| { blocking_json_task(move || { - let mut config_and_preset = - ConfigAndPreset::from_chain_spec::<T::EthSpec>(&chain.spec); - if serve_legacy_spec { - config_and_preset.make_backwards_compat(&chain.spec); - } + let config_and_preset = + ConfigAndPreset::from_chain_spec::<T::EthSpec>(&chain.spec, spec_fork_name); Ok(api_types::GenericResponse::from(config_and_preset)) }) }); diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 8f9856991f..1dd7aea923 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -141,9 +141,9 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: port, allow_origin: None, - serve_legacy_spec: true, tls_config: None, allow_sync_stalled: false, + spec_fork_name: None, }, chain: Some(chain.clone()), network_tx: Some(network_tx), diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index fa41102292..bd25450a47 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1253,10 +1253,13 @@ impl ApiTester { } pub async fn test_get_config_spec(self) -> Self { - let result = self.client.get_config_spec().await.unwrap().data; - - let mut expected = ConfigAndPreset::from_chain_spec::<E>(&self.chain.spec); - expected.make_backwards_compat(&self.chain.spec); + let result = self + .client + .get_config_spec::<ConfigAndPresetBellatrix>() + .await + .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .unwrap(); + let expected = ConfigAndPreset::from_chain_spec::<E>(&self.chain.spec, None); assert_eq!(result, expected); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 3515263878..edf79ad34f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -229,8 +229,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-disable-legacy-spec") .long("http-disable-legacy-spec") - .help("Disable serving of legacy data on the /config/spec endpoint. May be \ - disabled by default in a future release.") + .hidden(true) + ) + .arg( + Arg::with_name("http-spec-fork") + .long("http-spec-fork") + .help("Serve the spec for a specific hard fork on /eth/v1/config/spec. It should \ + not be necessary to set this flag.") + .takes_value(true) ) .arg( Arg::with_name("http-enable-tls") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6daee50de0..35d566d76e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -116,7 +116,14 @@ pub fn get_config<E: EthSpec>( } if cli_args.is_present("http-disable-legacy-spec") { - client_config.http_api.serve_legacy_spec = false; + warn!( + log, + "The flag --http-disable-legacy-spec is deprecated and will be removed" + ); + } + + if let Some(fork_name) = clap_utils::parse_optional(cli_args, "http-spec-fork")? { + client_config.http_api.spec_fork_name = Some(fork_name); } if cli_args.is_present("http-enable-tls") { diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 21608ba6dd..6317523fee 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -977,7 +977,9 @@ impl BeaconNodeHttpClient { } /// `GET config/spec` - pub async fn get_config_spec(&self) -> Result<GenericResponse<ConfigAndPreset>, Error> { + pub async fn get_config_spec<T: Serialize + DeserializeOwned>( + &self, + ) -> Result<GenericResponse<T>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index abed4fe5e7..5f83e81aa0 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -354,7 +354,9 @@ impl ValidatorClientHttpClient { } /// `GET lighthouse/spec` - pub async fn get_lighthouse_spec(&self) -> Result<GenericResponse<ConfigAndPreset>, Error> { + pub async fn get_lighthouse_spec<T: Serialize + DeserializeOwned>( + &self, + ) -> Result<GenericResponse<T>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() diff --git a/common/eth2/src/mixin.rs b/common/eth2/src/mixin.rs index 1de26961e6..a33cf8a40c 100644 --- a/common/eth2/src/mixin.rs +++ b/common/eth2/src/mixin.rs @@ -21,17 +21,17 @@ impl ResponseOptional for Result<Response, Error> { /// Trait for extracting the fork name from the headers of a response. pub trait ResponseForkName { #[allow(clippy::result_unit_err)] - fn fork_name_from_header(&self) -> Result<Option<ForkName>, ()>; + fn fork_name_from_header(&self) -> Result<Option<ForkName>, String>; } impl ResponseForkName for Response { - fn fork_name_from_header(&self) -> Result<Option<ForkName>, ()> { + fn fork_name_from_header(&self) -> Result<Option<ForkName>, String> { self.headers() .get(CONSENSUS_VERSION_HEADER) .map(|fork_name| { fork_name .to_str() - .map_err(|_| ()) + .map_err(|e| e.to_string()) .and_then(ForkName::from_str) }) .transpose() diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index c3e454fdfc..68fdbf7990 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -47,6 +47,7 @@ superstruct = "0.5.0" serde_json = "1.0.74" smallvec = "1.8.0" serde_with = "1.13.0" +maplit = "1.0.2" [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 3668d0524c..8d56ce2da9 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -803,6 +803,10 @@ impl Default for ChainSpec { } /// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON). +/// +/// Fields relevant to hard forks after Altair should be optional so that we can continue +/// to parse Altair configs. This default approach turns out to be much simpler than trying to +/// make `Config` a superstruct because of the hassle of deserializing an untagged enum. #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] pub struct Config { @@ -813,17 +817,13 @@ pub struct Config { #[serde(default)] pub preset_base: String, - // TODO(merge): remove this default #[serde(default = "default_terminal_total_difficulty")] #[serde(with = "eth2_serde_utils::quoted_u256")] pub terminal_total_difficulty: Uint256, - // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash")] pub terminal_block_hash: ExecutionBlockHash, - // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, - // TODO(merge): remove this default #[serde(default = "default_safe_slots_to_import_optimistically")] #[serde(with = "eth2_serde_utils::quoted_u64")] pub safe_slots_to_import_optimistically: u64, @@ -843,12 +843,10 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option<MaybeQuoted<Epoch>>, - // TODO(merge): remove this default #[serde(default = "default_bellatrix_fork_version")] #[serde(with = "eth2_serde_utils::bytes_4_hex")] bellatrix_fork_version: [u8; 4], - // TODO(merge): remove this default - #[serde(default = "default_bellatrix_fork_epoch")] + #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub bellatrix_fork_epoch: Option<MaybeQuoted<Epoch>>, @@ -890,10 +888,6 @@ fn default_bellatrix_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } -fn default_bellatrix_fork_epoch() -> Option<MaybeQuoted<Epoch>> { - None -} - /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1335,10 +1329,7 @@ mod yaml_tests { default_safe_slots_to_import_optimistically() ); - assert_eq!( - chain_spec.bellatrix_fork_epoch, - default_bellatrix_fork_epoch() - ); + assert_eq!(chain_spec.bellatrix_fork_epoch, None); assert_eq!( chain_spec.bellatrix_fork_version, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 8b3a753bd5..e624afe2db 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,12 +1,21 @@ -use crate::{AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec}; +use crate::{ + consts::altair, AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec, ForkName, +}; +use maplit::hashmap; use serde_derive::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; +use superstruct::superstruct; /// Fusion of a runtime-config with the compile-time preset values. /// /// Mostly useful for the API. +#[superstruct( + variants(Altair, Bellatrix), + variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) +)] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +#[serde(untagged)] pub struct ConfigAndPreset { #[serde(flatten)] pub config: Config, @@ -15,80 +24,75 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - // TODO(merge): re-enable - // #[serde(flatten)] - // pub bellatrix_preset: BellatrixPreset, + #[superstruct(only(Bellatrix))] + #[serde(flatten)] + pub bellatrix_preset: BellatrixPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap<String, Value>, } impl ConfigAndPreset { - pub fn from_chain_spec<T: EthSpec>(spec: &ChainSpec) -> Self { + pub fn from_chain_spec<T: EthSpec>(spec: &ChainSpec, fork_name: Option<ForkName>) -> Self { let config = Config::from_chain_spec::<T>(spec); let base_preset = BasePreset::from_chain_spec::<T>(spec); let altair_preset = AltairPreset::from_chain_spec::<T>(spec); - // TODO(merge): re-enable - let _bellatrix_preset = BellatrixPreset::from_chain_spec::<T>(spec); - let extra_fields = HashMap::new(); + let extra_fields = get_extra_fields(spec); - Self { - config, - base_preset, - altair_preset, - extra_fields, + if spec.bellatrix_fork_epoch.is_some() + || fork_name == None + || fork_name == Some(ForkName::Merge) + { + let bellatrix_preset = BellatrixPreset::from_chain_spec::<T>(spec); + + ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { + config, + base_preset, + altair_preset, + bellatrix_preset, + extra_fields, + }) + } else { + ConfigAndPreset::Altair(ConfigAndPresetAltair { + config, + base_preset, + altair_preset, + extra_fields, + }) } } +} - /// Add fields that were previously part of the config but are now constants. - pub fn make_backwards_compat(&mut self, spec: &ChainSpec) { - let hex_string = |value: &[u8]| format!("0x{}", hex::encode(&value)); - let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); - let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); - let fields = vec![ - ( - "bls_withdrawal_prefix", - u8_hex(spec.bls_withdrawal_prefix_byte), - ), - ( - "domain_beacon_proposer", - u32_hex(spec.domain_beacon_proposer), - ), - ( - "domain_beacon_attester", - u32_hex(spec.domain_beacon_attester), - ), - ("domain_randao", u32_hex(spec.domain_randao)), - ("domain_deposit", u32_hex(spec.domain_deposit)), - ("domain_voluntary_exit", u32_hex(spec.domain_voluntary_exit)), - ( - "domain_selection_proof", - u32_hex(spec.domain_selection_proof), - ), - ( - "domain_aggregate_and_proof", - u32_hex(spec.domain_aggregate_and_proof), - ), - ( - "domain_application_mask", - u32_hex(spec.domain_application_mask), - ), - ( - "target_aggregators_per_committee", - spec.target_aggregators_per_committee.to_string(), - ), - ( - "random_subnets_per_validator", - spec.random_subnets_per_validator.to_string(), - ), - ( - "epochs_per_random_subnet_subscription", - spec.epochs_per_random_subnet_subscription.to_string(), - ), - ]; - for (key, value) in fields { - self.extra_fields.insert(key.to_uppercase(), value.into()); - } +/// Get a hashmap of constants to add to the `PresetAndConfig` +pub fn get_extra_fields(spec: &ChainSpec) -> HashMap<String, Value> { + let hex_string = |value: &[u8]| format!("0x{}", hex::encode(&value)).into(); + let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); + let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); + hashmap! { + "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), + "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), + "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), + "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), + "domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit), + "domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit), + "domain_selection_proof".to_uppercase() => u32_hex(spec.domain_selection_proof), + "domain_aggregate_and_proof".to_uppercase() => u32_hex(spec.domain_aggregate_and_proof), + "domain_application_mask".to_uppercase()=> u32_hex(spec.domain_application_mask), + "target_aggregators_per_committee".to_uppercase() => + spec.target_aggregators_per_committee.to_string().into(), + "random_subnets_per_validator".to_uppercase() => + spec.random_subnets_per_validator.to_string().into(), + "epochs_per_random_subnet_subscription".to_uppercase() => + spec.epochs_per_random_subnet_subscription.to_string().into(), + "domain_contribution_and_proof".to_uppercase() => + u32_hex(spec.domain_contribution_and_proof), + "domain_sync_committee".to_uppercase() => u32_hex(spec.domain_sync_committee), + "domain_sync_committee_selection_proof".to_uppercase() => + u32_hex(spec.domain_sync_committee_selection_proof), + "sync_committee_subnet_count".to_uppercase() => + altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), + "target_aggregators_per_sync_subcommittee".to_uppercase() => + altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), } } @@ -108,15 +112,16 @@ mod test { .open(tmp_file.as_ref()) .expect("error opening file"); let mainnet_spec = ChainSpec::mainnet(); - let mut yamlconfig = ConfigAndPreset::from_chain_spec::<MainnetEthSpec>(&mainnet_spec); + let mut yamlconfig = + ConfigAndPreset::from_chain_spec::<MainnetEthSpec>(&mainnet_spec, None); let (k1, v1) = ("SAMPLE_HARDFORK_KEY1", "123456789"); let (k2, v2) = ("SAMPLE_HARDFORK_KEY2", "987654321"); let (k3, v3) = ("SAMPLE_HARDFORK_KEY3", 32); let (k4, v4) = ("SAMPLE_HARDFORK_KEY4", Value::Null); - yamlconfig.extra_fields.insert(k1.into(), v1.into()); - yamlconfig.extra_fields.insert(k2.into(), v2.into()); - yamlconfig.extra_fields.insert(k3.into(), v3.into()); - yamlconfig.extra_fields.insert(k4.into(), v4); + yamlconfig.extra_fields_mut().insert(k1.into(), v1.into()); + yamlconfig.extra_fields_mut().insert(k2.into(), v2.into()); + yamlconfig.extra_fields_mut().insert(k3.into(), v3.into()); + yamlconfig.extra_fields_mut().insert(k4.into(), v4); serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); @@ -125,8 +130,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPreset = + let from: ConfigAndPresetBellatrix = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(from, yamlconfig); + assert_eq!(ConfigAndPreset::Bellatrix(from), yamlconfig); } } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 4a2e762087..e97b08309b 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -106,14 +106,14 @@ macro_rules! map_fork_name_with { } impl FromStr for ForkName { - type Err = (); + type Err = String; - fn from_str(fork_name: &str) -> Result<Self, ()> { + fn from_str(fork_name: &str) -> Result<Self, String> { Ok(match fork_name.to_lowercase().as_ref() { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, - _ => return Err(()), + _ => return Err(format!("unknown fork name: {}", fork_name)), }) } } @@ -138,7 +138,7 @@ impl TryFrom<String> for ForkName { type Error = String; fn try_from(s: String) -> Result<Self, Self::Error> { - Self::from_str(&s).map_err(|()| format!("Invalid fork name: {}", s)) + Self::from_str(&s) } } @@ -178,8 +178,8 @@ mod test { assert_eq!(ForkName::from_str("AlTaIr"), Ok(ForkName::Altair)); assert_eq!(ForkName::from_str("altair"), Ok(ForkName::Altair)); - assert_eq!(ForkName::from_str("NO_NAME"), Err(())); - assert_eq!(ForkName::from_str("no_name"), Err(())); + assert!(ForkName::from_str("NO_NAME").is_err()); + assert!(ForkName::from_str("no_name").is_err()); } #[test] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7823ec223c..f05012c0b7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -110,7 +110,9 @@ pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::ConfigAndPreset; +pub use crate::config_and_preset::{ + ConfigAndPreset, ConfigAndPresetAltair, ConfigAndPresetBellatrix, +}; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 0236ba6589..9d952e5cc5 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -11,7 +11,7 @@ use std::process::Command; use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; +use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp_port, unused_udp_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -949,6 +949,21 @@ fn http_tls_flags() { }); } +#[test] +fn http_spec_fork_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); +} + +#[test] +fn http_spec_fork_override() { + CommandLineTest::new() + .flag("http-spec-fork", Some("altair")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.spec_fork_name, Some(ForkName::Altair))); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index d4f7c6c874..0b808e71bb 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -7,7 +7,7 @@ use crate::http_metrics::metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_RE use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; -use slog::{debug, error, info, warn, Logger}; +use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -16,7 +16,7 @@ use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use tokio::{sync::RwLock, time::sleep}; -use types::{ChainSpec, EthSpec}; +use types::{ChainSpec, Config, EthSpec}; /// The number of seconds *prior* to slot start that we will try and update the state of fallback /// nodes. @@ -213,9 +213,9 @@ impl<E: EthSpec> CandidateBeaconNode<E> { /// Checks if the node has the correct specification. async fn is_compatible(&self, spec: &ChainSpec, log: &Logger) -> Result<(), CandidateError> { - let config_and_preset = self + let config = self .beacon_node - .get_config_spec() + .get_config_spec::<Config>() .await .map_err(|e| { error!( @@ -228,25 +228,15 @@ impl<E: EthSpec> CandidateBeaconNode<E> { })? .data; - let beacon_node_spec = - ChainSpec::from_config::<E>(&config_and_preset.config).ok_or_else(|| { - error!( - log, - "The minimal/mainnet spec type of the beacon node does not match the validator \ - client. See the --network command."; - "endpoint" => %self.beacon_node, - ); - CandidateError::Incompatible - })?; - - if !config_and_preset.extra_fields.is_empty() { - debug!( + let beacon_node_spec = ChainSpec::from_config::<E>(&config).ok_or_else(|| { + error!( log, - "Beacon spec includes unknown fields"; + "The minimal/mainnet spec type of the beacon node does not match the validator \ + client. See the --network command."; "endpoint" => %self.beacon_node, - "fields" => ?config_and_preset.extra_fields, ); - } + CandidateError::Incompatible + })?; if beacon_node_spec.genesis_fork_version != spec.genesis_fork_version { error!( diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index a5d8d0e71c..1e48e86c05 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -217,8 +217,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .and(signer.clone()) .and_then(|spec: Arc<_>, signer| { blocking_signed_json_task(signer, move || { - let mut config = ConfigAndPreset::from_chain_spec::<E>(&spec); - config.make_backwards_compat(&spec); + let config = ConfigAndPreset::from_chain_spec::<E>(&spec, None); Ok(api_types::GenericResponse::from(config)) }) }); diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index e67a82634c..b121dda5b1 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -208,10 +208,13 @@ impl ApiTester { } pub async fn test_get_lighthouse_spec(self) -> Self { - let result = self.client.get_lighthouse_spec().await.unwrap().data; - - let mut expected = ConfigAndPreset::from_chain_spec::<E>(&E::default_spec()); - expected.make_backwards_compat(&E::default_spec()); + let result = self + .client + .get_lighthouse_spec::<ConfigAndPresetBellatrix>() + .await + .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .unwrap(); + let expected = ConfigAndPreset::from_chain_spec::<E>(&E::default_spec(), None); assert_eq!(result, expected); @@ -623,7 +626,9 @@ fn routes_with_invalid_auth() { .await .test_with_invalid_auth(|client| async move { client.get_lighthouse_health().await }) .await - .test_with_invalid_auth(|client| async move { client.get_lighthouse_spec().await }) + .test_with_invalid_auth(|client| async move { + client.get_lighthouse_spec::<types::Config>().await + }) .await .test_with_invalid_auth( |client| async move { client.get_lighthouse_validators().await }, From 4fc0cb121c6f90b12ff10a87d4d091cc11285270 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 10 Aug 2022 13:06:46 +0000 Subject: [PATCH 130/184] Remove some "wontfix" TODOs for the merge (#3449) ## Issue Addressed NA ## Proposed Changes Removes three types of TODOs: 1. `execution_layer/src/lib.rs`: It was [determined](https://github.com/ethereum/consensus-specs/issues/2636#issuecomment-988688742) that there is no action required here. 2. `beacon_processor/worker/gossip_methods.rs`: Removed TODOs relating to peer scoring that have already been addressed via `epe.penalize_peer()`. - It seems `cargo fmt` wanted to adjust some things here as well :shrug: 3. `proto_array_fork_choice.rs`: it would be nice to remove that useless `bool` for cleanliness, but I don't think it's something we need to do and the TODO just makes things look messier IMO. ## Additional Info There should be no functional changes to the code in this PR. There are still some TODOs lingering, those ones require actual changes or more thought. --- beacon_node/execution_layer/src/lib.rs | 8 -------- .../beacon_processor/worker/gossip_methods.rs | 16 +++++++++++----- .../proto_array/src/proto_array_fork_choice.rs | 2 +- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f56ea8f797..e7bbc6cd5e 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1267,14 +1267,6 @@ impl<T: EthSpec> ExecutionLayer<T> { } /// Maps to the `eth_getBlockByHash` JSON-RPC call. - /// - /// ## TODO(merge) - /// - /// This will return an execution block regardless of whether or not it was created by a PoW - /// miner (pre-merge) or a PoS validator (post-merge). It's not immediately clear if this is - /// correct or not, see the discussion here: - /// - /// https://github.com/ethereum/consensus-specs/issues/2636 async fn get_pow_block( &self, engine: &Engine, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 12172e0e53..e6625e43f8 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -771,12 +771,15 @@ impl<T: BeaconChainTypes> Worker<T> { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError, "gossip_block_high"); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_block_high", + ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } - // TODO(merge): reconsider peer scoring for this event. - Err(ref e @BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { + Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -795,7 +798,6 @@ impl<T: BeaconChainTypes> Worker<T> { | Err(e @ BlockError::TooManySkippedSlots { .. }) | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) - // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ExecutionPayloadError(_)) // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) @@ -803,7 +805,11 @@ impl<T: BeaconChainTypes> Worker<T> { warn!(self.log, "Could not verify block for gossip, rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError, "gossip_block_low"); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_block_low", + ); return None; } }; diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 306c986018..9902ccb1cc 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -38,7 +38,7 @@ pub enum ExecutionStatus { /// /// This `bool` only exists to satisfy our SSZ implementation which requires all variants /// to have a value. It can be set to anything. - Irrelevant(bool), // TODO(merge): fix bool. + Irrelevant(bool), } impl ExecutionStatus { From e0f86588e634c186c0ab493694a8e4804fcbbf93 Mon Sep 17 00:00:00 2001 From: Alex Wied <centromere@users.noreply.github.com> Date: Thu, 11 Aug 2022 07:50:32 +0000 Subject: [PATCH 131/184] lighthouse_version: Fix version string regex (#3451) ## Issue Addressed N/A ## Proposed Changes If the build tree is not a git repository, the unit test will fail. This PR fixes the issue. ## Additional Info N/A --- common/lighthouse_version/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index f5d4d44878..c499bf0498 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -37,8 +37,9 @@ mod test { #[test] fn version_formatting() { - let re = Regex::new(r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-rc.[0-9])?-[[:xdigit:]]{7}\+?$") - .unwrap(); + let re = + Regex::new(r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-rc.[0-9])?(-[[:xdigit:]]{7})?\+?$") + .unwrap(); assert!( re.is_match(VERSION), "version doesn't match regex: {}", From a476ae490705eb3e7ea58a57bdf1a88ffec289f6 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Thu, 11 Aug 2022 10:08:36 +0000 Subject: [PATCH 132/184] Linkcheck fix (#3452) ## Issue Addressed I think we're running into this in our linkcheck, so I'm going to frist verify linkcheck fails on the current version, and then try downgrading it to see if it passes https://github.com/chronotope/chrono/issues/755 Co-authored-by: realbigsean <sean@sigmaprime.io> --- .github/workflows/linkcheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index c23ee8df36..30a891febf 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -21,7 +21,7 @@ jobs: run: docker network create book - name: Run mdbook server - run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0 + run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:v0.4.20-rust serve --hostname 0.0.0.0 - name: Print logs run: docker logs book From f4ffa9e0b4acbe3cc3b50f9eeeb6b3d87e58a1a5 Mon Sep 17 00:00:00 2001 From: Divma <divma@protonmail.com> Date: Fri, 12 Aug 2022 00:56:38 +0000 Subject: [PATCH 133/184] Handle processing results of non faulty batches (#3439) ## Issue Addressed Solves #3390 So after checking some logs @pawanjay176 got, we conclude that this happened because we blacklisted a chain after trying it "too much". Now here, in all occurrences it seems that "too much" means we got too many download failures. This happened very slowly, exactly because the batch is allowed to stay alive for very long times after not counting penalties when the ee is offline. The error here then was not that the batch failed because of offline ee errors, but that we blacklisted a chain because of download errors, which we can't pin on the chain but on the peer. This PR fixes that. ## Proposed Changes Adds a missing piece of logic so that if a chain fails for errors that can't be attributed to an objectively bad behavior from the peer, it is not blacklisted. The issue at hand occurred when new peers arrived claiming a head that had wrongfully blacklisted, even if the original peers participating in the chain were not penalized. Another notable change is that we need to consider a batch invalid if it processed correctly but its next non empty batch fails processing. Now since a batch can fail processing in non empty ways, there is no need to mark as invalid previous batches. Improves some logging as well. ## Additional Info We should do this regardless of pausing sync on ee offline/unsynced state. This is because I think it's almost impossible to ensure a processing result will reach in a predictable order with a synced notification from the ee. Doing this handles what I think are inevitable data races when we actually pause sync This also fixes a return that reports which batch failed and caused us some confusion checking the logs --- .../network/src/beacon_processor/mod.rs | 4 +- .../src/beacon_processor/worker/mod.rs | 2 +- .../beacon_processor/worker/sync_methods.rs | 65 +++--- .../network/src/sync/backfill_sync/mod.rs | 131 ++++++------ .../network/src/sync/block_lookups/mod.rs | 31 +-- .../network/src/sync/block_lookups/tests.rs | 20 +- beacon_node/network/src/sync/manager.rs | 16 +- beacon_node/network/src/sync/mod.rs | 2 +- .../network/src/sync/range_sync/batch.rs | 101 +++++---- .../network/src/sync/range_sync/chain.rs | 194 ++++++++++-------- .../network/src/sync/range_sync/mod.rs | 2 +- .../network/src/sync/range_sync/range.rs | 4 +- 12 files changed, 298 insertions(+), 274 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index a08f34f707..e9a115904d 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -76,9 +76,7 @@ mod work_reprocessing_queue; mod worker; use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; -pub use worker::{ - ChainSegmentProcessId, FailureMode, GossipAggregatePackage, GossipAttestationPackage, -}; +pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; /// The maximum size of the channel for work events to the `BeaconProcessor`. /// diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index 04147245ea..f907c49b7d 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -10,7 +10,7 @@ mod rpc_methods; mod sync_methods; pub use gossip_methods::{GossipAggregatePackage, GossipAttestationPackage}; -pub use sync_methods::{ChainSegmentProcessId, FailureMode}; +pub use sync_methods::ChainSegmentProcessId; pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 3b2429ee9b..760896e0e9 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -34,15 +34,6 @@ struct ChainSegmentFailed { message: String, /// Used to penalize peers. peer_action: Option<PeerAction>, - /// Failure mode - mode: FailureMode, -} - -/// Represents if a block processing failure was on the consensus or execution side. -#[derive(Debug)] -pub enum FailureMode { - ExecutionLayer { pause_sync: bool }, - ConsensusLayer, } impl<T: BeaconChainTypes> Worker<T> { @@ -150,7 +141,9 @@ impl<T: BeaconChainTypes> Worker<T> { "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "service"=> "sync"); - BatchProcessResult::Success(sent_blocks > 0) + BatchProcessResult::Success { + was_non_empty: sent_blocks > 0, + } } (imported_blocks, Err(e)) => { debug!(self.log, "Batch processing failed"; @@ -161,11 +154,12 @@ impl<T: BeaconChainTypes> Worker<T> { "imported_blocks" => imported_blocks, "error" => %e.message, "service" => "sync"); - - BatchProcessResult::Failed { - imported_blocks: imported_blocks > 0, - peer_action: e.peer_action, - mode: e.mode, + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks: imported_blocks > 0, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } } @@ -184,7 +178,9 @@ impl<T: BeaconChainTypes> Worker<T> { "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "service"=> "sync"); - BatchProcessResult::Success(sent_blocks > 0) + BatchProcessResult::Success { + was_non_empty: sent_blocks > 0, + } } (_, Err(e)) => { debug!(self.log, "Backfill batch processing failed"; @@ -193,10 +189,12 @@ impl<T: BeaconChainTypes> Worker<T> { "last_block_slot" => end_slot, "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed { - imported_blocks: false, - peer_action: e.peer_action, - mode: e.mode, + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks: false, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } } @@ -216,15 +214,19 @@ impl<T: BeaconChainTypes> Worker<T> { { (imported_blocks, Err(e)) => { debug!(self.log, "Parent lookup failed"; "error" => %e.message); - BatchProcessResult::Failed { - imported_blocks: imported_blocks > 0, - peer_action: e.peer_action, - mode: e.mode, + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks: imported_blocks > 0, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } (imported_blocks, Ok(_)) => { debug!(self.log, "Parent lookup processed successfully"); - BatchProcessResult::Success(imported_blocks > 0) + BatchProcessResult::Success { + was_non_empty: imported_blocks > 0, + } } } } @@ -307,7 +309,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: String::from("mismatched_block_root"), // The peer is faulty if they send blocks with bad roots. peer_action: Some(PeerAction::LowToleranceError), - mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::InvalidSignature @@ -322,7 +323,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: "invalid_signature".into(), // The peer is faulty if they bad signatures. peer_action: Some(PeerAction::LowToleranceError), - mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::ValidatorPubkeyCacheTimeout => { @@ -336,7 +336,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: "pubkey_cache_timeout".into(), // This is an internal error, do not penalize the peer. peer_action: None, - mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::NoAnchorInfo => { @@ -347,7 +346,6 @@ impl<T: BeaconChainTypes> Worker<T> { // There is no need to do a historical sync, this is not a fault of // the peer. peer_action: None, - mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::IndexOutOfBounds => { @@ -360,7 +358,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: String::from("logic_error"), // This should never occur, don't penalize the peer. peer_action: None, - mode: FailureMode::ConsensusLayer, } } HistoricalBlockError::BlockOutOfRange { .. } => { @@ -373,7 +370,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: String::from("unexpected_error"), // This should never occur, don't penalize the peer. peer_action: None, - mode: FailureMode::ConsensusLayer, } } }, @@ -383,7 +379,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: format!("{:?}", other), // This is an internal error, don't penalize the peer. peer_action: None, - mode: FailureMode::ConsensusLayer, } } }; @@ -404,7 +399,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: format!("Block has an unknown parent: {}", block.parent_root()), // Peers are faulty if they send non-sequential blocks. peer_action: Some(PeerAction::LowToleranceError), - mode: FailureMode::ConsensusLayer, }) } BlockError::BlockIsAlreadyKnown => { @@ -442,7 +436,6 @@ impl<T: BeaconChainTypes> Worker<T> { ), // Peers are faulty if they send blocks from the future. peer_action: Some(PeerAction::LowToleranceError), - mode: FailureMode::ConsensusLayer, }) } BlockError::WouldRevertFinalizedSlot { .. } => { @@ -464,7 +457,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: format!("Internal error whilst processing block: {:?}", e), // Do not penalize peers for internal errors. peer_action: None, - mode: FailureMode::ConsensusLayer, }) } ref err @ BlockError::ExecutionPayloadError(ref epe) => { @@ -480,7 +472,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: format!("Execution layer offline. Reason: {:?}", err), // Do not penalize peers for internal errors. peer_action: None, - mode: FailureMode::ExecutionLayer { pause_sync: true }, }) } else { debug!(self.log, @@ -493,7 +484,6 @@ impl<T: BeaconChainTypes> Worker<T> { err ), peer_action: Some(PeerAction::LowToleranceError), - mode: FailureMode::ExecutionLayer { pause_sync: false }, }) } } @@ -508,7 +498,6 @@ impl<T: BeaconChainTypes> Worker<T> { message: format!("Peer sent invalid block. Reason: {:?}", other), // Do not penalize peers for internal errors. peer_action: None, - mode: FailureMode::ConsensusLayer, }) } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 7ff640065a..6767350ce3 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -8,10 +8,12 @@ //! If a batch fails, the backfill sync cannot progress. In this scenario, we mark the backfill //! sync as failed, log an error and attempt to retry once a new peer joins the node. -use crate::beacon_processor::{ChainSegmentProcessId, FailureMode, WorkEvent as BeaconWorkEvent}; +use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::network_context::SyncNetworkContext; -use crate::sync::range_sync::{BatchConfig, BatchId, BatchInfo, BatchProcessingResult, BatchState}; +use crate::sync::range_sync::{ + BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, +}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; @@ -324,10 +326,10 @@ impl<T: BeaconChainTypes> BackFillSync<T> { for id in batch_ids { if let Some(batch) = self.batches.get_mut(&id) { match batch.download_failed(false) { - Ok(true) => { + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { self.fail_sync(BackFillError::BatchDownloadFailed(id))?; } - Ok(false) => {} + Ok(BatchOperationOutcome::Continue) => {} Err(e) => { self.fail_sync(BackFillError::BatchInvalidState(id, e.0))?; } @@ -371,8 +373,10 @@ impl<T: BeaconChainTypes> BackFillSync<T> { } match batch.download_failed(true) { Err(e) => self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)), - Ok(true) => self.fail_sync(BackFillError::BatchDownloadFailed(batch_id)), - Ok(false) => self.retry_batch_download(network, batch_id), + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { + self.fail_sync(BackFillError::BatchDownloadFailed(batch_id)) + } + Ok(BatchOperationOutcome::Continue) => self.retry_batch_download(network, batch_id), } } else { // this could be an error for an old batch, removed when the chain advances @@ -439,7 +443,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { self.process_completed_batches(network) } Err(result) => { - let (expected_boundary, received_boundary, is_failed) = match result { + let (expected_boundary, received_boundary, outcome) = match result { Err(e) => { return self .fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) @@ -450,7 +454,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, "peer_id" => %peer_id, batch); - if is_failed { + if let BatchOperationOutcome::Failed { blacklist: _ } = outcome { error!(self.log, "Backfill failed"; "epoch" => batch_id, "received_boundary" => received_boundary, "expected_boundary" => expected_boundary); return self .fail_sync(BackFillError::BatchDownloadFailed(batch_id)) @@ -547,16 +551,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result( - network, - batch_id, - &BatchProcessResult::Failed { - imported_blocks: false, - // The beacon processor queue is full, no need to penalize the peer. - peer_action: None, - mode: FailureMode::ConsensusLayer, - }, - ) + self.on_batch_process_result(network, batch_id, &BatchProcessResult::NonFaultyFailure) } else { Ok(ProcessResult::Successful) } @@ -575,7 +570,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { // The first two cases are possible in regular sync, should not occur in backfill, but we // keep this logic for handling potential processing race conditions. // result - match &self.current_processing_batch { + let batch = match &self.current_processing_batch { Some(processing_id) if *processing_id != batch_id => { debug!(self.log, "Unexpected batch result"; "batch_epoch" => batch_id, "expected_batch_epoch" => processing_id); @@ -589,13 +584,9 @@ impl<T: BeaconChainTypes> BackFillSync<T> { _ => { // batch_id matches, continue self.current_processing_batch = None; - } - } - match result { - BatchProcessResult::Success(was_non_empty) => { - let batch = match self.batches.get_mut(&batch_id) { - Some(v) => v, + match self.batches.get_mut(&batch_id) { + Some(batch) => batch, None => { // This is an error. Fail the sync algorithm. return self @@ -605,8 +596,27 @@ impl<T: BeaconChainTypes> BackFillSync<T> { ))) .map(|_| ProcessResult::Successful); } - }; + } + } + }; + let peer = match batch.current_peer() { + Some(v) => *v, + None => { + return self + .fail_sync(BackFillError::BatchInvalidState( + batch_id, + String::from("Peer does not exist"), + )) + .map(|_| ProcessResult::Successful) + } + }; + + debug!(self.log, "Backfill batch processed"; "result" => ?result, &batch, + "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); + + match result { + BatchProcessResult::Success { was_non_empty } => { if let Err(e) = batch.processing_completed(BatchProcessingResult::Success) { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; } @@ -636,45 +646,17 @@ impl<T: BeaconChainTypes> BackFillSync<T> { self.process_completed_batches(network) } } - BatchProcessResult::Failed { + BatchProcessResult::FaultyFailure { imported_blocks, - peer_action, - mode: _, + penalty, } => { - let batch = match self.batches.get_mut(&batch_id) { - Some(v) => v, - None => { - return self - .fail_sync(BackFillError::InvalidSyncState(format!( - "Batch not found for current processing target {}", - batch_id - ))) - .map(|_| ProcessResult::Successful) - } - }; - - let peer = match batch.current_peer() { - Some(v) => *v, - None => { - return self - .fail_sync(BackFillError::BatchInvalidState( - batch_id, - String::from("Peer does not exist"), - )) - .map(|_| ProcessResult::Successful) - } - }; - debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, - "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); - match batch.processing_completed(BatchProcessingResult::Failed { - count_attempt: peer_action.is_some(), - }) { + match batch.processing_completed(BatchProcessingResult::FaultyFailure) { Err(e) => { // Batch was in the wrong state self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) .map(|_| ProcessResult::Successful) } - Ok(true) => { + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { // check that we have not exceeded the re-process retry counter // If a batch has exceeded the invalid batch lookup attempts limit, it means // that it is likely all peers are sending invalid batches @@ -683,23 +665,18 @@ impl<T: BeaconChainTypes> BackFillSync<T> { warn!( self.log, "Backfill batch failed to download. Penalizing peers"; - "score_adjustment" => %peer_action - .as_ref() - .map(ToString::to_string) - .unwrap_or_else(|| "None".into()), + "score_adjustment" => %penalty, "batch_epoch"=> batch_id ); - if let Some(peer_action) = peer_action { - for peer in self.participating_peers.drain() { - network.report_peer(peer, *peer_action, "backfill_batch_failed"); - } + for peer in self.participating_peers.drain() { + network.report_peer(peer, *penalty, "backfill_batch_failed"); } self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)) .map(|_| ProcessResult::Successful) } - Ok(false) => { + Ok(BatchOperationOutcome::Continue) => { // chain can continue. Check if it can be progressed if *imported_blocks { // At least one block was successfully verified and imported, then we can be sure all @@ -713,6 +690,14 @@ impl<T: BeaconChainTypes> BackFillSync<T> { } } } + BatchProcessResult::NonFaultyFailure => { + if let Err(e) = batch.processing_completed(BatchProcessingResult::NonFaultyFailure) + { + self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; + } + self.retry_batch_download(network, batch_id) + .map(|_| ProcessResult::Successful) + } } } @@ -905,11 +890,11 @@ impl<T: BeaconChainTypes> BackFillSync<T> { .validation_failed() .map_err(|e| BackFillError::BatchInvalidState(batch_id, e.0))? { - true => { + BatchOperationOutcome::Failed { blacklist: _ } => { // Batch has failed and cannot be redownloaded. return self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)); } - false => { + BatchOperationOutcome::Continue => { redownload_queue.push(*id); } } @@ -1010,8 +995,12 @@ impl<T: BeaconChainTypes> BackFillSync<T> { Err(e) => { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))? } - Ok(true) => self.fail_sync(BackFillError::BatchDownloadFailed(batch_id))?, - Ok(false) => return self.retry_batch_download(network, batch_id), + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { + self.fail_sync(BackFillError::BatchDownloadFailed(batch_id))? + } + Ok(BatchOperationOutcome::Continue) => { + return self.retry_batch_download(network, batch_id) + } } } } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 2aa4acdb5a..9f2a5fdce7 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; use tokio::sync::mpsc; -use crate::beacon_processor::{ChainSegmentProcessId, FailureMode, WorkEvent}; +use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; use self::{ @@ -610,35 +610,26 @@ impl<T: BeaconChainTypes> BlockLookups<T> { chain_hash ); #[cfg(not(debug_assertions))] - return crit!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); match result { - BatchProcessResult::Success(_) => { + BatchProcessResult::Success { .. } => { // nothing to do. } - BatchProcessResult::Failed { + BatchProcessResult::FaultyFailure { imported_blocks: _, - peer_action, - mode, + penalty, } => { - if let FailureMode::ExecutionLayer { pause_sync: _ } = mode { - debug!( - self.log, - "Chain segment processing failed. Execution layer is offline"; - "chain_hash" => %chain_hash, - "error" => ?mode - ); - } else { - self.failed_chains.insert(parent_lookup.chain_hash()); - if let Some(peer_action) = peer_action { - for &peer_id in parent_lookup.used_peers() { - cx.report_peer(peer_id, peer_action, "parent_chain_failure") - } - } + self.failed_chains.insert(parent_lookup.chain_hash()); + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, penalty, "parent_chain_failure") } } + BatchProcessResult::NonFaultyFailure => { + // We might request this chain again if there is need but otherwise, don't try again + } } metrics::set_gauge( diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index b3afadda2c..2f2720fd1e 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -284,7 +284,10 @@ fn test_parent_lookup_happy_path() { // Processing succeeds, now the rest of the chain should be sent for processing. bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } @@ -318,7 +321,10 @@ fn test_parent_lookup_wrong_response() { // Processing succeeds, now the rest of the chain should be sent for processing. bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } @@ -347,7 +353,10 @@ fn test_parent_lookup_empty_response() { // Processing succeeds, now the rest of the chain should be sent for processing. bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } @@ -375,7 +384,10 @@ fn test_parent_lookup_rpc_failure() { // Processing succeeds, now the rest of the chain should be sent for processing. bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index fe27a33c5c..64755300c3 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,7 @@ use super::block_lookups::BlockLookups; use super::network_context::SyncNetworkContext; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use crate::beacon_processor::{ChainSegmentProcessId, FailureMode, WorkEvent as BeaconWorkEvent}; +use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; @@ -139,13 +139,15 @@ pub enum BlockProcessResult<T: EthSpec> { #[derive(Debug)] pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. - Success(bool), - /// The batch processing failed. It carries whether the processing imported any block. - Failed { - imported_blocks: bool, - peer_action: Option<PeerAction>, - mode: FailureMode, + Success { + was_non_empty: bool, }, + /// The batch processing failed. It carries whether the processing imported any block. + FaultyFailure { + imported_blocks: bool, + penalty: PeerAction, + }, + NonFaultyFailure, } /// The primary object for handling and driving all the current syncing logic. It maintains the diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 7a891de728..dc18a5c981 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -9,4 +9,4 @@ mod peer_sync_info; mod range_sync; pub use manager::{BatchProcessResult, SyncMessage}; -pub use range_sync::ChainId; +pub use range_sync::{BatchOperationOutcome, ChainId}; diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index c642d81db8..3eee7223db 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -70,12 +70,16 @@ impl BatchConfig for RangeSyncBatchConfig { // Such errors should never be encountered. pub struct WrongState(pub(crate) String); -/// Auxiliary type alias for readability. -type IsFailed = bool; +/// After batch operations, we use this to communicate whether a batch can continue or not +pub enum BatchOperationOutcome { + Continue, + Failed { blacklist: bool }, +} pub enum BatchProcessingResult { Success, - Failed { count_attempt: bool }, + FaultyFailure, + NonFaultyFailure, } /// A segment of a chain. @@ -87,7 +91,7 @@ pub struct BatchInfo<T: EthSpec, B: BatchConfig = RangeSyncBatchConfig> { /// The `Attempts` that have been made and failed to send us this batch. failed_processing_attempts: Vec<Attempt>, /// Number of processing attempts that have failed but we do not count. - other_failed_processing_attempts: u8, + non_faulty_processing_attempts: u8, /// The number of download retries this batch has undergone due to a failed request. failed_download_attempts: Vec<PeerId>, /// State of the batch. @@ -124,14 +128,6 @@ impl<T: EthSpec> BatchState<T> { pub fn poison(&mut self) -> BatchState<T> { std::mem::replace(self, BatchState::Poisoned) } - - pub fn is_failed(&self) -> IsFailed { - match self { - BatchState::Failed => true, - BatchState::Poisoned => unreachable!("Poisoned batch"), - _ => false, - } - } } impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { @@ -151,7 +147,7 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { end_slot, failed_processing_attempts: Vec::new(), failed_download_attempts: Vec::new(), - other_failed_processing_attempts: 0, + non_faulty_processing_attempts: 0, state: BatchState::AwaitingDownload, marker: std::marker::PhantomData, } @@ -175,7 +171,16 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { peers } - /// Verifies if an incomming block belongs to this batch. + /// Return the number of times this batch has failed downloading and failed processing, in this + /// order. + pub fn failed_attempts(&self) -> (usize, usize) { + ( + self.failed_download_attempts.len(), + self.failed_processing_attempts.len(), + ) + } + + /// Verifies if an incoming block belongs to this batch. pub fn is_expecting_block(&self, peer_id: &PeerId, request_id: &Id) -> bool { if let BatchState::Downloading(expected_peer, _, expected_id) = &self.state { return peer_id == expected_peer && expected_id == request_id; @@ -203,6 +208,20 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { } } + /// After different operations over a batch, this could be in a state that allows it to + /// continue, or in failed state. When the batch has failed, we check if it did mainly due to + /// processing failures. In this case the batch is considered failed and faulty. + pub fn outcome(&self) -> BatchOperationOutcome { + match self.state { + BatchState::Poisoned => unreachable!("Poisoned batch"), + BatchState::Failed => BatchOperationOutcome::Failed { + blacklist: self.failed_processing_attempts.len() + > self.failed_download_attempts.len(), + }, + _ => BatchOperationOutcome::Continue, + } + } + pub fn state(&self) -> &BatchState<T> { &self.state } @@ -235,7 +254,10 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { #[must_use = "Batch may have failed"] pub fn download_completed( &mut self, - ) -> Result<usize /* Received blocks */, Result<(Slot, Slot, IsFailed), WrongState>> { + ) -> Result< + usize, /* Received blocks */ + Result<(Slot, Slot, BatchOperationOutcome), WrongState>, + > { match self.state.poison() { BatchState::Downloading(peer, blocks, _request_id) => { // verify that blocks are in range @@ -264,7 +286,7 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { BatchState::AwaitingDownload }; - return Err(Ok((expected, received, self.state.is_failed()))); + return Err(Ok((expected, received, self.outcome()))); } } @@ -289,7 +311,10 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { /// THe `mark_failed` parameter, when set to false, does not increment the failed attempts of /// this batch and register the peer, rather attempts a re-download. #[must_use = "Batch may have failed"] - pub fn download_failed(&mut self, mark_failed: bool) -> Result<IsFailed, WrongState> { + pub fn download_failed( + &mut self, + mark_failed: bool, + ) -> Result<BatchOperationOutcome, WrongState> { match self.state.poison() { BatchState::Downloading(peer, _, _request_id) => { // register the attempt and check if the batch can be tried again @@ -304,7 +329,7 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { // drop the blocks BatchState::AwaitingDownload }; - Ok(self.state.is_failed()) + Ok(self.outcome()) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -359,32 +384,31 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { pub fn processing_completed( &mut self, procesing_result: BatchProcessingResult, - ) -> Result<IsFailed, WrongState> { + ) -> Result<BatchOperationOutcome, WrongState> { match self.state.poison() { BatchState::Processing(attempt) => { self.state = match procesing_result { BatchProcessingResult::Success => BatchState::AwaitingValidation(attempt), - BatchProcessingResult::Failed { count_attempt } => { - if count_attempt { - // register the failed attempt - self.failed_processing_attempts.push(attempt); + BatchProcessingResult::FaultyFailure => { + // register the failed attempt + self.failed_processing_attempts.push(attempt); - // check if the batch can be downloaded again - if self.failed_processing_attempts.len() - >= B::max_batch_processing_attempts() as usize - { - BatchState::Failed - } else { - BatchState::AwaitingDownload - } + // check if the batch can be downloaded again + if self.failed_processing_attempts.len() + >= B::max_batch_processing_attempts() as usize + { + BatchState::Failed } else { - self.other_failed_processing_attempts = - self.other_failed_processing_attempts.saturating_add(1); BatchState::AwaitingDownload } } + BatchProcessingResult::NonFaultyFailure => { + self.non_faulty_processing_attempts = + self.non_faulty_processing_attempts.saturating_add(1); + BatchState::AwaitingDownload + } }; - Ok(self.state.is_failed()) + Ok(self.outcome()) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -398,7 +422,7 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { } #[must_use = "Batch may have failed"] - pub fn validation_failed(&mut self) -> Result<IsFailed, WrongState> { + pub fn validation_failed(&mut self) -> Result<BatchOperationOutcome, WrongState> { match self.state.poison() { BatchState::AwaitingValidation(attempt) => { self.failed_processing_attempts.push(attempt); @@ -411,7 +435,7 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> { } else { BatchState::AwaitingDownload }; - Ok(self.state.is_failed()) + Ok(self.outcome()) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -472,10 +496,7 @@ impl<T: EthSpec, B: BatchConfig> slog::KV for BatchInfo<T, B> { )?; serializer.emit_usize("downloaded", self.failed_download_attempts.len())?; serializer.emit_usize("processed", self.failed_processing_attempts.len())?; - serializer.emit_u8( - "processed_no_penalty", - self.other_failed_processing_attempts, - )?; + serializer.emit_u8("processed_no_penalty", self.non_faulty_processing_attempts)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; slog::Result::Ok(()) } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index caa08165a9..a54105f5cb 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,7 +1,8 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; -use crate::beacon_processor::WorkEvent as BeaconWorkEvent; -use crate::beacon_processor::{ChainSegmentProcessId, FailureMode}; -use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; +use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::sync::{ + manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, +}; use beacon_chain::{BeaconChainTypes, CountUnrealized}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -37,7 +38,11 @@ pub type ProcessingResult = Result<KeepChain, RemoveChain>; pub enum RemoveChain { EmptyPeerPool, ChainCompleted, - ChainFailed(BatchId), + /// A chain has failed. This boolean signals whether the chain should be blacklisted. + ChainFailed { + blacklist: bool, + failing_batch: BatchId, + }, WrongBatchState(String), WrongChainState(String), } @@ -187,8 +192,13 @@ impl<T: BeaconChainTypes> SyncingChain<T> { // fail the batches for id in batch_ids { if let Some(batch) = self.batches.get_mut(&id) { - if batch.download_failed(true)? { - return Err(RemoveChain::ChainFailed(id)); + if let BatchOperationOutcome::Failed { blacklist } = + batch.download_failed(true)? + { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: id, + }); } self.retry_batch_download(network, id)?; } else { @@ -265,12 +275,15 @@ impl<T: BeaconChainTypes> SyncingChain<T> { self.process_completed_batches(network) } Err(result) => { - let (expected_boundary, received_boundary, is_failed) = result?; + let (expected_boundary, received_boundary, outcome) = result?; warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, "peer_id" => %peer_id, batch); - if is_failed { - return Err(RemoveChain::ChainFailed(batch_id)); + if let BatchOperationOutcome::Failed { blacklist } = outcome { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }); } // this batch can't be used, so we need to request it again. self.retry_batch_download(network, batch_id) @@ -324,15 +337,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result( - network, - batch_id, - &BatchProcessResult::Failed { - imported_blocks: false, - peer_action: None, - mode: FailureMode::ConsensusLayer, - }, - ) + self.on_batch_process_result(network, batch_id, &BatchProcessResult::NonFaultyFailure) } else { Ok(KeepChain) } @@ -448,7 +453,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { ) -> ProcessingResult { // the first two cases are possible if the chain advances while waiting for a processing // result - match &self.current_processing_batch { + let batch = match &self.current_processing_batch { Some(processing_id) if *processing_id != batch_id => { debug!(self.log, "Unexpected batch result"; "batch_epoch" => batch_id, "expected_batch_epoch" => processing_id); @@ -462,22 +467,35 @@ impl<T: BeaconChainTypes> SyncingChain<T> { _ => { // batch_id matches, continue self.current_processing_batch = None; - } - } - - match result { - BatchProcessResult::Success(was_non_empty) => { - let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { + self.batches.get_mut(&batch_id).ok_or_else(|| { RemoveChain::WrongChainState(format!( "Current processing batch not found: {}", batch_id )) - })?; + })? + } + }; + let peer = batch.current_peer().cloned().ok_or_else(|| { + RemoveChain::WrongBatchState(format!( + "Processing target is in wrong state: {:?}", + batch.state(), + )) + })?; + + // Log the process result and the batch for debugging purposes. + debug!(self.log, "Batch processing result"; "result" => ?result, &batch, + "batch_epoch" => batch_id, "client" => %network.client_type(&peer)); + + // We consider three cases. Batch was successfully processed, Batch failed processing due + // to a faulty peer, or batch failed processing but the peer can't be deemed faulty. + match result { + BatchProcessResult::Success { was_non_empty } => { batch.processing_completed(BatchProcessingResult::Success)?; - // If the processed batch was not empty, we can validate previous unvalidated - // blocks. + if *was_non_empty { + // If the processed batch was not empty, we can validate previous unvalidated + // blocks. self.advance_chain(network, batch_id); // we register so that on chain switching we don't try it again self.attempted_optimistic_starts.insert(batch_id); @@ -507,64 +525,56 @@ impl<T: BeaconChainTypes> SyncingChain<T> { self.process_completed_batches(network) } } - BatchProcessResult::Failed { + BatchProcessResult::FaultyFailure { imported_blocks, - peer_action, - mode: _, + penalty, } => { - let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { - RemoveChain::WrongChainState(format!( - "Batch not found for current processing target {}", - batch_id - )) - })?; - let peer = batch.current_peer().cloned().ok_or_else(|| { - RemoveChain::WrongBatchState(format!( - "Processing target is in wrong state: {:?}", - batch.state(), - )) - })?; - debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, "peer_penalty" => ?peer_action, - "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); + // Penalize the peer appropiately. + network.report_peer(peer, *penalty, "faulty_batch"); - if batch.processing_completed(BatchProcessingResult::Failed { - count_attempt: peer_action.is_some(), - })? { - // check that we have not exceeded the re-process retry counter - // If a batch has exceeded the invalid batch lookup attempts limit, it means - // that it is likely all peers in this chain are are sending invalid batches - // repeatedly and are either malicious or faulty. We drop the chain and - // report all peers. - // There are some edge cases with forks that could land us in this situation. - // This should be unlikely, so we tolerate these errors, but not often. - warn!( - self.log, - "Batch failed to download. Dropping chain scoring peers"; - "score_adjustment" => %peer_action - .as_ref() - .map(ToString::to_string) - .unwrap_or_else(|| "None".into()), - "batch_epoch"=> batch_id - ); - - if let Some(peer_action) = peer_action { - for (peer, _) in self.peers.drain() { - network.report_peer(peer, *peer_action, "batch_failed"); + // Check if this batch is allowed to continue + match batch.processing_completed(BatchProcessingResult::FaultyFailure)? { + BatchOperationOutcome::Continue => { + // Chain can continue. Check if it can be moved forward. + if *imported_blocks { + // At least one block was successfully verified and imported, so we can be sure all + // previous batches are valid and we only need to download the current failed + // batch. + self.advance_chain(network, batch_id); } + // Handle this invalid batch, that is within the re-process retries limit. + self.handle_invalid_batch(network, batch_id) } - Err(RemoveChain::ChainFailed(batch_id)) - } else { - // chain can continue. Check if it can be moved forward - if *imported_blocks { - // At least one block was successfully verified and imported, so we can be sure all - // previous batches are valid and we only need to download the current failed - // batch. - self.advance_chain(network, batch_id); + BatchOperationOutcome::Failed { blacklist } => { + // Check that we have not exceeded the re-process retry counter, + // If a batch has exceeded the invalid batch lookup attempts limit, it means + // that it is likely all peers in this chain are are sending invalid batches + // repeatedly and are either malicious or faulty. We drop the chain and + // report all peers. + // There are some edge cases with forks that could land us in this situation. + // This should be unlikely, so we tolerate these errors, but not often. + warn!( + self.log, + "Batch failed to download. Dropping chain scoring peers"; + "score_adjustment" => %penalty, + "batch_epoch"=> batch_id, + ); + + for (peer, _) in self.peers.drain() { + network.report_peer(peer, *penalty, "faulty_chain"); + } + Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }) } - // Handle this invalid batch, that is within the re-process retries limit. - self.handle_invalid_batch(network, batch_id) } } + BatchProcessResult::NonFaultyFailure => { + batch.processing_completed(BatchProcessingResult::NonFaultyFailure)?; + // Simply redownload the batch. + self.retry_batch_download(network, batch_id) + } } } @@ -737,9 +747,12 @@ impl<T: BeaconChainTypes> SyncingChain<T> { let mut redownload_queue = Vec::new(); for (id, batch) in self.batches.range_mut(..batch_id) { - if batch.validation_failed()? { + if let BatchOperationOutcome::Failed { blacklist } = batch.validation_failed()? { // remove the chain early - return Err(RemoveChain::ChainFailed(batch_id)); + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: *id, + }); } redownload_queue.push(*id); } @@ -836,8 +849,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> { if let Some(active_requests) = self.peers.get_mut(peer_id) { active_requests.remove(&batch_id); } - if batch.download_failed(true)? { - return Err(RemoveChain::ChainFailed(batch_id)); + if let BatchOperationOutcome::Failed { blacklist } = batch.download_failed(true)? { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }); } self.retry_batch_download(network, batch_id) } else { @@ -925,10 +941,16 @@ impl<T: BeaconChainTypes> SyncingChain<T> { self.peers .get_mut(&peer) .map(|request| request.remove(&batch_id)); - if batch.download_failed(true)? { - return Err(RemoveChain::ChainFailed(batch_id)); - } else { - return self.retry_batch_download(network, batch_id); + match batch.download_failed(true)? { + BatchOperationOutcome::Failed { blacklist } => { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }) + } + BatchOperationOutcome::Continue => { + return self.retry_batch_download(network, batch_id) + } } } } diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 31122d59a1..f4db32bc96 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,7 +8,7 @@ mod chain_collection; mod range; mod sync_type; -pub use batch::{BatchConfig, BatchInfo, BatchProcessingResult, BatchState}; +pub use batch::{BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState}; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index f08f8eb82a..4b29d31295 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -356,8 +356,8 @@ where debug!(self.log, "Chain removed"; "sync_type" => ?sync_type, &chain, "reason" => ?remove_reason, "op" => op); } - if let RemoveChain::ChainFailed(_) = remove_reason { - if RangeSyncType::Finalized == sync_type { + if let RemoveChain::ChainFailed { blacklist, .. } = remove_reason { + if RangeSyncType::Finalized == sync_type && blacklist { warn!(self.log, "Chain failed! Syncing to its head won't be retried for at least the next {} seconds", FAILED_CHAINS_EXPIRY_SECONDS; &chain); self.failed_chains.insert(chain.target_head_root); } From 71fd0b42f2ac06073aaf23c4148c7beb036ef555 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Fri, 12 Aug 2022 00:56:39 +0000 Subject: [PATCH 134/184] Fix lints for Rust 1.63 (#3459) ## Issue Addressed N/A ## Proposed Changes Fix clippy lints for latest rust version 1.63. I have allowed the [derive_partial_eq_without_eq](https://rust-lang.github.io/rust-clippy/master/index.html#derive_partial_eq_without_eq) lint as satisfying this lint would result in more code that we might not want and I feel it's not required. Happy to fix this lint across lighthouse if required though. --- Makefile | 1 + beacon_node/http_api/src/block_id.rs | 4 ++-- beacon_node/store/src/iter.rs | 2 +- common/deposit_contract/build.rs | 10 ++++------ common/sensitive_url/src/lib.rs | 2 +- consensus/ssz_types/src/fixed_vector.rs | 2 +- consensus/ssz_types/src/variable_list.rs | 2 +- testing/state_transition_vectors/src/exit.rs | 1 + 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index 55e987be8b..df90ba3e76 100644 --- a/Makefile +++ b/Makefile @@ -142,6 +142,7 @@ lint: cargo clippy --workspace --tests -- \ -D clippy::fn_to_numeric_cast_any \ -D warnings \ + -A clippy::derive_partial_eq_without_eq \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index e418849040..5c785fe651 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -90,10 +90,10 @@ impl BlockId { .map_err(warp_utils::reject::beacon_chain_error)?; Ok((*root, execution_optimistic)) } else { - return Err(warp_utils::reject::custom_not_found(format!( + Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", root - ))); + ))) } } } diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 9109792478..07c99e5a4e 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -212,7 +212,7 @@ impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> RootsIterator<'a, T, (Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => { // Read a `BeaconState` from the store that has access to prior historical roots. if let Some(beacon_state) = - next_historical_root_backtrack_state(&*self.store, &self.beacon_state) + next_historical_root_backtrack_state(self.store, &self.beacon_state) .handle_unavailable()? { self.beacon_state = Cow::Owned(beacon_state); diff --git a/common/deposit_contract/build.rs b/common/deposit_contract/build.rs index ac05a53e31..cae1d480c8 100644 --- a/common/deposit_contract/build.rs +++ b/common/deposit_contract/build.rs @@ -54,12 +54,10 @@ fn read_contract_file_from_url(url: Url) -> Result<Value, String> { .map_err(|e| format!("Respsonse is not a valid json {:?}", e))?; Ok(contract) } - Err(e) => { - return Err(format!( - "No abi file found. Failed to download from github: {:?}", - e - )) - } + Err(e) => Err(format!( + "No abi file found. Failed to download from github: {:?}", + e + )), } } } diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index aac4cb5500..b6705eb602 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -46,7 +46,7 @@ impl Serialize for SensitiveUrl { where S: Serializer, { - serializer.serialize_str(&self.full.to_string()) + serializer.serialize_str(self.full.as_ref()) } } diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index ca5d40f14f..5f7a4af962 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -353,7 +353,7 @@ mod test { let vec = vec![0, 2, 4, 6]; let fixed: FixedVector<u64, U4> = FixedVector::from(vec); - assert_eq!(fixed.get(0), Some(&0)); + assert_eq!(fixed.first(), Some(&0)); assert_eq!(fixed.get(3), Some(&6)); assert_eq!(fixed.get(4), None); } diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 5acf74608a..49f8004b22 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -335,7 +335,7 @@ mod test { let vec = vec![0, 2, 4, 6]; let fixed: VariableList<u64, U4> = VariableList::from(vec); - assert_eq!(fixed.get(0), Some(&0)); + assert_eq!(fixed.first(), Some(&0)); assert_eq!(fixed.get(3), Some(&6)); assert_eq!(fixed.get(4), None); } diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 3e4bb7bf3f..f485e1a268 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -15,6 +15,7 @@ struct ExitTest { validator_index: u64, exit_epoch: Epoch, state_epoch: Epoch, + #[allow(clippy::type_complexity)] state_modifier: Box<dyn FnOnce(&mut BeaconState<E>)>, #[allow(clippy::type_complexity)] block_modifier: From 92d597ad23e66a8c14fdc5394567336d74532a0a Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 15 Aug 2022 01:30:56 +0000 Subject: [PATCH 135/184] Modularise slasher backend (#3443) ## Proposed Changes Enable multiple database backends for the slasher, either MDBX (default) or LMDB. The backend can be selected using `--slasher-backend={lmdb,mdbx}`. ## Additional Info In order to abstract over the two library's different handling of database lifetimes I've used `Box::leak` to give the `Environment` type a `'static` lifetime. This was the only way I could think of using 100% safe code to construct a self-referential struct `SlasherDB`, where the `OpenDatabases` refers to the `Environment`. I think this is OK, as the `Environment` is expected to live for the life of the program, and both database engines leave the database in a consistent state after each write. The memory claimed for memory-mapping will be freed by the OS and appropriately flushed regardless of whether the `Environment` is actually dropped. We are depending on two `sigp` forks of `libmdbx-rs` and `lmdb-rs`, to give us greater control over MDBX OS support and LMDB's version. --- .github/workflows/test-suite.yml | 10 ++ Cargo.lock | 32 +++- Makefile | 18 ++- beacon_node/Cargo.toml | 1 + beacon_node/src/cli.rs | 9 ++ beacon_node/src/config.rs | 4 + book/src/cross-compiling.md | 6 + book/src/installation-source.md | 17 ++ book/src/slasher.md | 34 +++- lighthouse/Cargo.toml | 6 + lighthouse/tests/beacon_node.rs | 26 ++++ slasher/Cargo.toml | 13 +- slasher/src/array.rs | 39 ++--- slasher/src/config.rs | 25 ++- slasher/src/database.rs | 216 +++++++------------------- slasher/src/database/interface.rs | 230 ++++++++++++++++++++++++++++ slasher/src/database/lmdb_impl.rs | 203 ++++++++++++++++++++++++ slasher/src/database/mdbx_impl.rs | 186 ++++++++++++++++++++++ slasher/src/error.rs | 20 ++- slasher/src/lib.rs | 16 +- slasher/src/utils.rs | 16 -- slasher/tests/attester_slashings.rs | 2 + slasher/tests/proposer_slashings.rs | 2 + slasher/tests/random.rs | 2 + slasher/tests/wrap_around.rs | 2 + 25 files changed, 905 insertions(+), 230 deletions(-) create mode 100644 slasher/src/database/interface.rs create mode 100644 slasher/src/database/lmdb_impl.rs create mode 100644 slasher/src/database/mdbx_impl.rs delete mode 100644 slasher/src/utils.rs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index f26eadc398..3be8097ddf 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -102,6 +102,16 @@ jobs: run: rustup update stable - name: Run operation_pool tests for all known forks run: make test-op-pool + slasher-tests: + name: slasher-tests + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run slasher tests for all supported backends + run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu runs-on: ubuntu-22.04 diff --git a/Cargo.lock b/Cargo.lock index a6b5f56374..a406df149f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -462,6 +462,7 @@ dependencies = [ "slasher", "slog", "store", + "strum", "task_executor", "types", "unused_port", @@ -3204,8 +3205,7 @@ checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" [[package]] name = "libmdbx" version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002d7890ec770d222903165b6ba279b0fa3dba8e82610820833184066b006ce0" +source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ "bitflags", "byteorder", @@ -3629,6 +3629,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "slasher", "slashing_protection", "slog", "sloggers", @@ -3712,6 +3713,27 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "lmdb-rkv" +version = "0.14.0" +source = "git+https://github.com/sigp/lmdb-rs?rev=f33845c6469b94265319aac0ed5085597862c27e#f33845c6469b94265319aac0ed5085597862c27e" +dependencies = [ + "bitflags", + "byteorder", + "libc", + "lmdb-rkv-sys", +] + +[[package]] +name = "lmdb-rkv-sys" +version = "0.11.2" +source = "git+https://github.com/sigp/lmdb-rs?rev=f33845c6469b94265319aac0ed5085597862c27e#f33845c6469b94265319aac0ed5085597862c27e" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "lock_api" version = "0.4.7" @@ -3830,8 +3852,7 @@ checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "mdbx-sys" version = "0.11.6-4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dde320ea35df4678486346065386943ed6c5920f2ab445dff8dd5d9c8cd04ad" +source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ "bindgen", "cc", @@ -5965,6 +5986,8 @@ dependencies = [ "lazy_static", "libmdbx", "lighthouse_metrics", + "lmdb-rkv", + "lmdb-rkv-sys", "logging", "lru", "maplit", @@ -5976,6 +5999,7 @@ dependencies = [ "serde_derive", "slog", "sloggers", + "strum", "tempfile", "tree_hash", "tree_hash_derive", diff --git a/Makefile b/Makefile index df90ba3e76..6b5c6b3e5d 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,9 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 +# List of features to use when cross-compiling. Can be overridden via the environment. +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx + # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. FORKS=phase0 altair merge @@ -42,13 +45,13 @@ install-lcli: # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features modern,gnosis + cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" build-x86_64-portable: - cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features portable,gnosis + cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" build-aarch64: - cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features gnosis + cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" build-aarch64-portable: - cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features portable,gnosis + cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -77,7 +80,7 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --release --exclude ef_tests --exclude beacon_chain + cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. @@ -118,6 +121,11 @@ test-op-pool-%: --features 'beacon_chain/fork_from_env'\ -p operation_pool +# Run the tests in the `slasher` crate for all supported database backends. +test-slasher: + cargo test --release -p slasher --features mdbx + cargo test --release -p slasher --no-default-features --features lmdb + # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: make -C $(STATE_TRANSITION_VECTORS) test diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9c6385e8ed..417acf3d9e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -41,3 +41,4 @@ monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } http_api = { path = "http_api" } unused_port = { path = "../common/unused_port" } +strum = "0.24.1" diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index edf79ad34f..7a91530252 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,4 +1,5 @@ use clap::{App, Arg}; +use strum::VariantNames; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("beacon_node") @@ -628,6 +629,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [disabled by default].") .requires("slasher") ) + .arg( + Arg::with_name("slasher-backend") + .long("slasher-backend") + .help("Set the database backend to be used by the slasher.") + .takes_value(true) + .possible_values(slasher::DatabaseBackend::VARIANTS) + .requires("slasher") + ) .arg( Arg::with_name("wss-checkpoint") .long("wss-checkpoint") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 35d566d76e..e885275b04 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -591,6 +591,10 @@ pub fn get_config<E: EthSpec>( slasher_config.broadcast = cli_args.is_present("slasher-broadcast"); + if let Some(backend) = clap_utils::parse_optional(cli_args, "slasher-backend")? { + slasher_config.backend = backend; + } + client_config.slasher = Some(slasher_config); } diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index 9b458078e2..8ccf23da9d 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -38,3 +38,9 @@ make build-aarch64 The `lighthouse` binary will be compiled inside a Docker container and placed in `lighthouse/target/aarch64-unknown-linux-gnu/release`. + +## Feature Flags + +When using the makefile the set of features used for building can be controlled with +the environment variable `CROSS_FEATURES`. See [Feature + Flags](./installation-source.md#feature-flags) for available features. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index fc1ac4c092..1f8477260f 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -107,6 +107,23 @@ git checkout ${VERSION} make ``` +## Feature Flags + +You can customise the features that Lighthouse is built with using the `FEATURES` environment +variable. E.g. + +``` +env FEATURES="gnosis,slasher-lmdb" make +``` + +Commonly used features include: + +* `gnosis`: support for the Gnosis Beacon Chain. +* `portable`: support for legacy hardware. +* `modern`: support for exclusively modern hardware. +* `slasher-mdbx`: support for the MDBX slasher backend (enabled by default). +* `slasher-lmdb`: support for the LMDB slasher backend. + ## Troubleshooting ### Command is not found diff --git a/book/src/slasher.md b/book/src/slasher.md index 889f9c6cbc..61dc4b327f 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -43,6 +43,34 @@ By default the slasher stores data in the `slasher_db` directory inside the beac e.g. `~/.lighthouse/{network}/beacon/slasher_db`. You can use this flag to change that storage directory. +### Database Backend + +* Flag: `--slasher-backend NAME` +* Argument: one of `mdbx`, `lmdb` or `disabled` +* Default: `mdbx` + +Since Lighthouse v2.6.0 it is possible to use one of several database backends with the slasher: + +- MDBX (default) +- LMDB + +The advantage of MDBX is that it performs compaction, resulting in less disk usage over time. The +disadvantage is that upstream MDBX has removed support for Windows and macOS, so Lighthouse is stuck +on an older version. If bugs are found in our pinned version of MDBX it may be deprecated in future. + +LMDB does not have compaction but is more stable upstream than MDBX. It is not currently recommended +to use the LMDB backend on Windows. + +More backends may be added in future. + +### Switching Backends + +If you change database backends and want to reclaim the space used by the old backend you can +delete the following files from your `slasher_db` directory: + +* removing MDBX: delete `mdbx.dat` and `mdbx.lck` +* removing LMDB: delete `data.mdb` and `lock.mdb` + ### History Length * Flag: `--slasher-history-length EPOCHS` @@ -65,7 +93,7 @@ changed after initialization. * Argument: maximum size of the database in gigabytes * Default: 256 GB -The slasher uses MDBX as its backing store, which places a hard limit on the size of the database +Both database backends LMDB and MDBX place a hard limit on the size of the database file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after initialization if the limit is reached. @@ -85,10 +113,6 @@ where `V` is the validator count and `N` is the history length. You should set the maximum size higher than the estimate to allow room for growth in the validator count. -> NOTE: In Lighthouse v2.1.0 the slasher database was switched from LMDB to MDBX. Unlike LMDB, MDBX -> does garbage collection of free pages and is capable of shrinking the database file and preventing -> it from growing indefinitely. - ### Update Period * Flag: `--slasher-update-period SECONDS` diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 7792ad074e..805b4eca26 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -7,6 +7,7 @@ autotests = false rust-version = "1.62" [features] +default = ["slasher-mdbx"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. @@ -19,6 +20,10 @@ milagro = ["bls/milagro"] spec-minimal = [] # Support Gnosis spec and Gnosis Beacon Chain. gnosis = [] +# Support slasher MDBX backend. +slasher-mdbx = ["slasher/mdbx"] +# Support slasher LMDB backend. +slasher-lmdb = ["slasher/lmdb"] [dependencies] beacon_node = { "path" = "../beacon_node" } @@ -48,6 +53,7 @@ malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } database_manager = { path = "../database_manager" } +slasher = { path = "../slasher" } [dev-dependencies] tempfile = "3.1.0" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 9d952e5cc5..7fd4ad91cf 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1288,6 +1288,32 @@ fn slasher_broadcast_flag() { assert!(slasher_config.broadcast); }); } + +#[test] +fn slasher_backend_default() { + CommandLineTest::new() + .flag("slasher", None) + .run_with_zero_port() + .with_config(|config| { + let slasher_config = config.slasher.as_ref().unwrap(); + assert_eq!(slasher_config.backend, slasher::DatabaseBackend::Mdbx); + }); +} + +#[test] +fn slasher_backend_override_to_default() { + // Hard to test this flag because all but one backend is disabled by default and the backend + // called "disabled" results in a panic. + CommandLineTest::new() + .flag("slasher", None) + .flag("slasher-backend", Some("mdbx")) + .run_with_zero_port() + .with_config(|config| { + let slasher_config = config.slasher.as_ref().unwrap(); + assert_eq!(slasher_config.backend, slasher::DatabaseBackend::Mdbx); + }); +} + #[test] pub fn malloc_tuning_flag() { CommandLineTest::new() diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 368350f11b..0f24fe9f04 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -4,6 +4,11 @@ version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] edition = "2021" +[features] +default = ["mdbx"] +mdbx = ["dep:mdbx"] +lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] + [dependencies] bincode = "1.3.1" byteorder = "1.3.4" @@ -13,8 +18,6 @@ flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } -# MDBX is pinned at the last version with Windows and macOS support. This is only viable short-term. -mdbx = { package = "libmdbx", version = "=0.1.4" } lru = "0.7.1" parking_lot = "0.12.0" rand = "0.8.5" @@ -26,6 +29,12 @@ sloggers = { version = "2.1.1", features = ["json"] } tree_hash = "0.4.1" tree_hash_derive = "0.4.0" types = { path = "../consensus/types" } +strum = { version = "0.24.1", features = ["derive"] } + +# MDBX is pinned at the last version with Windows and macOS support. +mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", tag = "v0.1.4", optional = true } +lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } [dev-dependencies] maplit = "1.0.2" diff --git a/slasher/src/array.rs b/slasher/src/array.rs index d9f1fab819..d9cb8a4ec6 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,9 +1,11 @@ use crate::metrics::{self, SLASHER_COMPRESSION_RATIO, SLASHER_NUM_CHUNKS_UPDATED}; -use crate::RwTransaction; -use crate::{AttesterSlashingStatus, Config, Error, IndexedAttesterRecord, SlasherDB}; +use crate::{ + AttesterSlashingStatus, Config, Database, Error, IndexedAttesterRecord, RwTransaction, + SlasherDB, +}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use serde_derive::{Deserialize, Serialize}; -use std::borrow::{Borrow, Cow}; +use std::borrow::Borrow; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::convert::TryFrom; use std::io::Read; @@ -147,10 +149,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch; - fn select_db<'txn, E: EthSpec>( - db: &SlasherDB<E>, - txn: &'txn RwTransaction<'txn>, - ) -> Result<mdbx::Database<'txn>, Error>; + fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> &Database; fn load<E: EthSpec>( db: &SlasherDB<E>, @@ -160,11 +159,10 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn config: &Config, ) -> Result<Option<Self>, Error> { let disk_key = config.disk_key(validator_chunk_index, chunk_index); - let chunk_bytes: Cow<[u8]> = - match txn.get(&Self::select_db(db, txn)?, &disk_key.to_be_bytes())? { - Some(chunk_bytes) => chunk_bytes, - None => return Ok(None), - }; + let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes())? { + Some(chunk_bytes) => chunk_bytes, + None => return Ok(None), + }; let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?; @@ -189,10 +187,9 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn metrics::set_float_gauge(&SLASHER_COMPRESSION_RATIO, compression_ratio); txn.put( - &Self::select_db(db, txn)?, + Self::select_db(db), &disk_key.to_be_bytes(), &compressed_value, - SlasherDB::<E>::write_flags(), )?; Ok(()) } @@ -296,11 +293,8 @@ impl TargetArrayChunk for MinTargetChunk { start_epoch / chunk_size * chunk_size - 1 } - fn select_db<'txn, E: EthSpec>( - db: &SlasherDB<E>, - txn: &'txn RwTransaction<'txn>, - ) -> Result<mdbx::Database<'txn>, Error> { - db.min_targets_db(txn) + fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> &Database { + &db.databases.min_targets_db } } @@ -398,11 +392,8 @@ impl TargetArrayChunk for MaxTargetChunk { (start_epoch / chunk_size + 1) * chunk_size } - fn select_db<'txn, E: EthSpec>( - db: &SlasherDB<E>, - txn: &'txn RwTransaction<'txn>, - ) -> Result<mdbx::Database<'txn>, Error> { - db.max_targets_db(txn) + fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> &Database { + &db.databases.max_targets_db } } diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 81aa4b597d..e2a58a406a 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -1,6 +1,7 @@ use crate::Error; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; +use strum::{Display, EnumString, EnumVariantNames}; use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; @@ -12,8 +13,15 @@ pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: usize = 100_000; pub const DEFAULT_BROADCAST: bool = false; +#[cfg(feature = "mdbx")] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Mdbx; +#[cfg(all(feature = "lmdb", not(feature = "mdbx")))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Lmdb; +#[cfg(not(any(feature = "mdbx", feature = "lmdb")))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Disabled; + pub const MAX_HISTORY_LENGTH: usize = 1 << 16; -pub const MDBX_GROWTH_STEP: isize = 256 * (1 << 20); // 256 MiB +pub const MEGABYTE: usize = 1 << 20; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -32,6 +40,8 @@ pub struct Config { pub attestation_root_cache_size: usize, /// Whether to broadcast slashings found to the network. pub broadcast: bool, + /// Database backend to use. + pub backend: DatabaseBackend, } /// Immutable configuration parameters which are stored on disk and checked for consistency. @@ -42,6 +52,18 @@ pub struct DiskConfig { pub history_length: usize, } +#[derive( + Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, +)] +#[strum(serialize_all = "lowercase")] +pub enum DatabaseBackend { + #[cfg(feature = "mdbx")] + Mdbx, + #[cfg(feature = "lmdb")] + Lmdb, + Disabled, +} + impl Config { pub fn new(database_path: PathBuf) -> Self { Self { @@ -54,6 +76,7 @@ impl Config { max_db_size_mbs: DEFAULT_MAX_DB_SIZE, attestation_root_cache_size: DEFAULT_ATTESTATION_ROOT_CACHE_SIZE, broadcast: DEFAULT_BROADCAST, + backend: DEFAULT_BACKEND, } } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 653eccfa72..c8046c80dc 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,19 +1,20 @@ -use crate::config::MDBX_GROWTH_STEP; +pub mod interface; +mod lmdb_impl; +mod mdbx_impl; + use crate::{ - metrics, utils::TxnMapFull, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, - Config, Environment, Error, ProposerSlashingStatus, RwTransaction, + metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Error, + ProposerSlashingStatus, }; use byteorder::{BigEndian, ByteOrder}; +use interface::{Environment, OpenDatabases, RwTransaction}; use lru::LruCache; -use mdbx::{Database, DatabaseFlags, Geometry, WriteFlags}; use parking_lot::Mutex; use serde::de::DeserializeOwned; use slog::{info, Logger}; use ssz::{Decode, Encode}; use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; -use std::ops::Range; -use std::path::Path; use std::sync::Arc; use tree_hash::TreeHash; use types::{ @@ -50,10 +51,6 @@ const PROPOSERS_DB: &str = "proposers"; /// The number of DBs for MDBX to use (equal to the number of DBs defined above). const MAX_NUM_DBS: usize = 9; -/// Filename for the legacy (LMDB) database file, so that it may be deleted. -const LEGACY_DB_FILENAME: &str = "data.mdb"; -const LEGACY_DB_LOCK_FILENAME: &str = "lock.mdb"; - /// Constant key under which the schema version is stored in the `metadata_db`. const METADATA_VERSION_KEY: &[u8] = &[0]; /// Constant key under which the slasher configuration is stored in the `metadata_db`. @@ -64,11 +61,11 @@ const PROPOSER_KEY_SIZE: usize = 16; const CURRENT_EPOCH_KEY_SIZE: usize = 8; const INDEXED_ATTESTATION_ID_SIZE: usize = 6; const INDEXED_ATTESTATION_ID_KEY_SIZE: usize = 40; -const MEGABYTE: usize = 1 << 20; #[derive(Debug)] pub struct SlasherDB<E: EthSpec> { - pub(crate) env: Environment, + pub(crate) env: &'static Environment, + pub(crate) databases: OpenDatabases<'static>, /// LRU cache mapping indexed attestation IDs to their attestation data roots. attestation_root_cache: Mutex<LruCache<IndexedAttestationId, Hash256>>, pub(crate) config: Arc<Config>, @@ -249,42 +246,26 @@ fn ssz_decode<T: Decode>(bytes: Cow<[u8]>) -> Result<T, Error> { impl<E: EthSpec> SlasherDB<E> { pub fn open(config: Arc<Config>, log: Logger) -> Result<Self, Error> { - // Delete any legacy LMDB database. - Self::delete_legacy_file(&config.database_path, LEGACY_DB_FILENAME, &log)?; - Self::delete_legacy_file(&config.database_path, LEGACY_DB_LOCK_FILENAME, &log)?; + info!(log, "Opening slasher database"; "backend" => %config.backend); std::fs::create_dir_all(&config.database_path)?; - let env = Environment::new() - .set_max_dbs(MAX_NUM_DBS) - .set_geometry(Self::geometry(&config)) - .open_with_permissions(&config.database_path, 0o600)?; - - let txn = env.begin_rw_txn()?; - txn.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; - txn.create_db(Some(INDEXED_ATTESTATION_ID_DB), Self::db_flags())?; - txn.create_db(Some(ATTESTERS_DB), Self::db_flags())?; - txn.create_db(Some(ATTESTERS_MAX_TARGETS_DB), Self::db_flags())?; - txn.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; - txn.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; - txn.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; - txn.create_db(Some(PROPOSERS_DB), Self::db_flags())?; - txn.create_db(Some(METADATA_DB), Self::db_flags())?; - txn.commit()?; + let env = Box::leak(Box::new(Environment::new(&config)?)); + let databases = env.create_databases()?; #[cfg(windows)] { - use filesystem::restrict_file_permissions; - let data = config.database_path.join("mdbx.dat"); - let lock = config.database_path.join("mdbx.lck"); - restrict_file_permissions(data).map_err(Error::DatabasePermissionsError)?; - restrict_file_permissions(lock).map_err(Error::DatabasePermissionsError)?; + for database_file in env.filenames(&config) { + filesystem::restrict_file_permissions(database_file) + .map_err(Error::DatabasePermissionsError)?; + } } let attestation_root_cache = Mutex::new(LruCache::new(config.attestation_root_cache_size)); let mut db = Self { env, + databases, attestation_root_cache, config, _phantom: PhantomData, @@ -307,102 +288,21 @@ impl<E: EthSpec> SlasherDB<E> { Ok(db) } - fn delete_legacy_file(slasher_dir: &Path, filename: &str, log: &Logger) -> Result<(), Error> { - let path = slasher_dir.join(filename); - - if path.is_file() { - info!( - log, - "Deleting legacy slasher DB"; - "file" => ?path.display(), - ); - std::fs::remove_file(&path)?; - } - Ok(()) - } - - fn open_db<'a>(&self, txn: &'a RwTransaction<'a>, name: &str) -> Result<Database<'a>, Error> { - Ok(txn.open_db(Some(name))?) - } - - pub fn indexed_attestation_db<'a>( - &self, - txn: &'a RwTransaction<'a>, - ) -> Result<Database<'a>, Error> { - self.open_db(txn, INDEXED_ATTESTATION_DB) - } - - pub fn indexed_attestation_id_db<'a>( - &self, - txn: &'a RwTransaction<'a>, - ) -> Result<Database<'a>, Error> { - self.open_db(txn, INDEXED_ATTESTATION_ID_DB) - } - - pub fn attesters_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, ATTESTERS_DB) - } - - pub fn attesters_max_targets_db<'a>( - &self, - txn: &'a RwTransaction<'a>, - ) -> Result<Database<'a>, Error> { - self.open_db(txn, ATTESTERS_MAX_TARGETS_DB) - } - - pub fn min_targets_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, MIN_TARGETS_DB) - } - - pub fn max_targets_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, MAX_TARGETS_DB) - } - - pub fn current_epochs_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, CURRENT_EPOCHS_DB) - } - - pub fn proposers_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, PROPOSERS_DB) - } - - pub fn metadata_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, METADATA_DB) - } - - pub fn db_flags() -> DatabaseFlags { - DatabaseFlags::default() - } - - pub fn write_flags() -> WriteFlags { - WriteFlags::default() - } - - pub fn begin_rw_txn(&self) -> Result<RwTransaction<'_>, Error> { - Ok(self.env.begin_rw_txn()?) - } - - pub fn geometry(config: &Config) -> Geometry<Range<usize>> { - Geometry { - size: Some(0..config.max_db_size_mbs * MEGABYTE), - growth_step: Some(MDBX_GROWTH_STEP), - shrink_threshold: None, - page_size: None, - } + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + self.env.begin_rw_txn() } pub fn load_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<Option<u64>, Error> { - txn.get(&self.metadata_db(txn)?, METADATA_VERSION_KEY)? + txn.get(&self.databases.metadata_db, METADATA_VERSION_KEY)? .map(bincode_deserialize) .transpose() } pub fn store_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( - &self.metadata_db(txn)?, + &self.databases.metadata_db, &METADATA_VERSION_KEY, &bincode::serialize(&CURRENT_SCHEMA_VERSION)?, - Self::write_flags(), )?; Ok(()) } @@ -415,17 +315,16 @@ impl<E: EthSpec> SlasherDB<E> { &self, txn: &mut RwTransaction<'_>, ) -> Result<Option<T>, Error> { - txn.get(&self.metadata_db(txn)?, METADATA_CONFIG_KEY)? + txn.get(&self.databases.metadata_db, METADATA_CONFIG_KEY)? .map(bincode_deserialize) .transpose() } pub fn store_config(&self, config: &Config, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( - &self.metadata_db(txn)?, + &self.databases.metadata_db, &METADATA_CONFIG_KEY, &bincode::serialize(config)?, - Self::write_flags(), )?; Ok(()) } @@ -436,7 +335,7 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, ) -> Result<Option<Epoch>, Error> { txn.get( - &self.attesters_max_targets_db(txn)?, + &self.databases.attesters_max_targets_db, CurrentEpochKey::new(validator_index).as_ref(), )? .map(ssz_decode) @@ -466,19 +365,17 @@ impl<E: EthSpec> SlasherDB<E> { ); for target_epoch in (start_epoch..max_target.as_u64()).map(Epoch::new) { txn.put( - &self.attesters_db(txn)?, + &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), &CompactAttesterRecord::null().as_bytes(), - Self::write_flags(), )?; } } txn.put( - &self.attesters_max_targets_db(txn)?, + &self.databases.attesters_max_targets_db, &CurrentEpochKey::new(validator_index), &max_target.as_ssz_bytes(), - Self::write_flags(), )?; Ok(()) } @@ -489,7 +386,7 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, ) -> Result<Option<Epoch>, Error> { txn.get( - &self.current_epochs_db(txn)?, + &self.databases.current_epochs_db, CurrentEpochKey::new(validator_index).as_ref(), )? .map(ssz_decode) @@ -503,10 +400,9 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, ) -> Result<(), Error> { txn.put( - &self.current_epochs_db(txn)?, + &self.databases.current_epochs_db, &CurrentEpochKey::new(validator_index), ¤t_epoch.as_ssz_bytes(), - Self::write_flags(), )?; Ok(()) } @@ -516,7 +412,7 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, key: &IndexedAttestationIdKey, ) -> Result<Option<u64>, Error> { - txn.get(&self.indexed_attestation_id_db(txn)?, key.as_ref())? + txn.get(&self.databases.indexed_attestation_id_db, key.as_ref())? .map(IndexedAttestationId::parse) .transpose() } @@ -527,12 +423,7 @@ impl<E: EthSpec> SlasherDB<E> { key: &IndexedAttestationIdKey, value: IndexedAttestationId, ) -> Result<(), Error> { - txn.put( - &self.indexed_attestation_id_db(txn)?, - key, - &value, - Self::write_flags(), - )?; + txn.put(&self.databases.indexed_attestation_id_db, key, &value)?; Ok(()) } @@ -556,18 +447,19 @@ impl<E: EthSpec> SlasherDB<E> { } // Store the new indexed attestation at the end of the current table. - let mut cursor = txn.cursor(&self.indexed_attestation_db(txn)?)?; + let db = &self.databases.indexed_attestation_db; + let mut cursor = txn.cursor(db)?; - let indexed_att_id = match cursor.last::<_, ()>()? { + let indexed_att_id = match cursor.last_key()? { // First ID is 1 so that 0 can be used to represent `null` in `CompactAttesterRecord`. None => 1, - Some((key_bytes, _)) => IndexedAttestationId::parse(key_bytes)? + 1, + Some(key_bytes) => IndexedAttestationId::parse(key_bytes)? + 1, }; let attestation_key = IndexedAttestationId::new(indexed_att_id); let data = indexed_attestation.as_ssz_bytes(); - cursor.put(attestation_key.as_ref(), &data, Self::write_flags())?; + cursor.put(attestation_key.as_ref(), &data)?; drop(cursor); // Update the (epoch, hash) to ID mapping. @@ -583,7 +475,7 @@ impl<E: EthSpec> SlasherDB<E> { ) -> Result<IndexedAttestation<E>, Error> { let bytes = txn .get( - &self.indexed_attestation_db(txn)?, + &self.databases.indexed_attestation_db, indexed_attestation_id.as_ref(), )? .ok_or(Error::MissingIndexedAttestation { @@ -685,10 +577,9 @@ impl<E: EthSpec> SlasherDB<E> { self.update_attester_max_target(validator_index, prev_max_target, target_epoch, txn)?; txn.put( - &self.attesters_db(txn)?, + &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), &indexed_attestation_id, - Self::write_flags(), )?; Ok(AttesterSlashingStatus::NotSlashable) @@ -725,7 +616,7 @@ impl<E: EthSpec> SlasherDB<E> { let attester_key = AttesterKey::new(validator_index, target, &self.config); Ok(txn - .get(&self.attesters_db(txn)?, attester_key.as_ref())? + .get(&self.databases.attesters_db, attester_key.as_ref())? .map(CompactAttesterRecord::parse) .transpose()? .filter(|record| !record.is_null())) @@ -738,7 +629,7 @@ impl<E: EthSpec> SlasherDB<E> { slot: Slot, ) -> Result<Option<SignedBeaconBlockHeader>, Error> { let proposer_key = ProposerKey::new(proposer_index, slot); - txn.get(&self.proposers_db(txn)?, proposer_key.as_ref())? + txn.get(&self.databases.proposers_db, proposer_key.as_ref())? .map(ssz_decode) .transpose() } @@ -764,10 +655,9 @@ impl<E: EthSpec> SlasherDB<E> { } } else { txn.put( - &self.proposers_db(txn)?, + &self.databases.proposers_db, &ProposerKey::new(proposer_index, slot), &block_header.as_ssz_bytes(), - Self::write_flags(), )?; Ok(ProposerSlashingStatus::NotSlashable) } @@ -776,14 +666,12 @@ impl<E: EthSpec> SlasherDB<E> { /// Attempt to prune the database, deleting old blocks and attestations. pub fn prune(&self, current_epoch: Epoch) -> Result<(), Error> { let mut txn = self.begin_rw_txn()?; - self.try_prune(current_epoch, &mut txn).allow_map_full()?; + self.try_prune(current_epoch, &mut txn)?; txn.commit()?; Ok(()) } /// Try to prune the database. - /// - /// This is a separate method from `prune` so that `allow_map_full` may be used. pub fn try_prune( &self, current_epoch: Epoch, @@ -804,22 +692,22 @@ impl<E: EthSpec> SlasherDB<E> { .saturating_sub(self.config.history_length) .start_slot(E::slots_per_epoch()); - let mut cursor = txn.cursor(&self.proposers_db(txn)?)?; + let mut cursor = txn.cursor(&self.databases.proposers_db)?; // Position cursor at first key, bailing out if the database is empty. - if cursor.first::<(), ()>()?.is_none() { + if cursor.first_key()?.is_none() { return Ok(()); } loop { - let (key_bytes, ()) = cursor.get_current()?.ok_or(Error::MissingProposerKey)?; + let (key_bytes, _) = cursor.get_current()?.ok_or(Error::MissingProposerKey)?; let (slot, _) = ProposerKey::parse(key_bytes)?; if slot < min_slot { - cursor.del(Self::write_flags())?; + cursor.delete_current()?; // End the loop if there is no next entry. - if cursor.next::<(), ()>()?.is_none() { + if cursor.next_key()?.is_none() { break; } } else { @@ -842,10 +730,10 @@ impl<E: EthSpec> SlasherDB<E> { // Collect indexed attestation IDs to delete. let mut indexed_attestation_ids = vec![]; - let mut cursor = txn.cursor(&self.indexed_attestation_id_db(txn)?)?; + let mut cursor = txn.cursor(&self.databases.indexed_attestation_id_db)?; // Position cursor at first key, bailing out if the database is empty. - if cursor.first::<(), ()>()?.is_none() { + if cursor.first_key()?.is_none() { return Ok(()); } @@ -861,9 +749,9 @@ impl<E: EthSpec> SlasherDB<E> { IndexedAttestationId::parse(value)?, )); - cursor.del(Self::write_flags())?; + cursor.delete_current()?; - if cursor.next::<(), ()>()?.is_none() { + if cursor.next_key()?.is_none() { break; } } else { @@ -874,9 +762,9 @@ impl<E: EthSpec> SlasherDB<E> { // Delete the indexed attestations. // Optimisation potential: use a cursor here. - let indexed_attestation_db = self.indexed_attestation_db(txn)?; + let indexed_attestation_db = &self.databases.indexed_attestation_db; for indexed_attestation_id in &indexed_attestation_ids { - txn.del(&indexed_attestation_db, indexed_attestation_id, None)?; + txn.del(indexed_attestation_db, indexed_attestation_id)?; } self.delete_attestation_data_roots(indexed_attestation_ids); diff --git a/slasher/src/database/interface.rs b/slasher/src/database/interface.rs new file mode 100644 index 0000000000..5bb920383c --- /dev/null +++ b/slasher/src/database/interface.rs @@ -0,0 +1,230 @@ +use crate::{Config, DatabaseBackend, Error}; +use std::borrow::Cow; +use std::marker::PhantomData; +use std::path::PathBuf; + +#[cfg(feature = "lmdb")] +use crate::database::lmdb_impl; +#[cfg(feature = "mdbx")] +use crate::database::mdbx_impl; + +#[derive(Debug)] +pub enum Environment { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::Environment), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::Environment), + Disabled, +} + +#[derive(Debug)] +pub enum RwTransaction<'env> { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::RwTransaction<'env>), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::RwTransaction<'env>), + Disabled(PhantomData<&'env ()>), +} + +#[derive(Debug)] +pub enum Database<'env> { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::Database<'env>), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::Database<'env>), + Disabled(PhantomData<&'env ()>), +} + +#[derive(Debug)] +pub struct OpenDatabases<'env> { + pub indexed_attestation_db: Database<'env>, + pub indexed_attestation_id_db: Database<'env>, + pub attesters_db: Database<'env>, + pub attesters_max_targets_db: Database<'env>, + pub min_targets_db: Database<'env>, + pub max_targets_db: Database<'env>, + pub current_epochs_db: Database<'env>, + pub proposers_db: Database<'env>, + pub metadata_db: Database<'env>, +} + +#[derive(Debug)] +pub enum Cursor<'env> { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::Cursor<'env>), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::Cursor<'env>), + Disabled(PhantomData<&'env ()>), +} + +pub type Key<'a> = Cow<'a, [u8]>; +pub type Value<'a> = Cow<'a, [u8]>; + +impl Environment { + pub fn new(config: &Config) -> Result<Environment, Error> { + match config.backend { + #[cfg(feature = "mdbx")] + DatabaseBackend::Mdbx => mdbx_impl::Environment::new(config).map(Environment::Mdbx), + #[cfg(feature = "lmdb")] + DatabaseBackend::Lmdb => lmdb_impl::Environment::new(config).map(Environment::Lmdb), + DatabaseBackend::Disabled => Err(Error::SlasherDatabaseBackendDisabled), + } + } + + pub fn create_databases(&self) -> Result<OpenDatabases, Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(env) => env.create_databases(), + #[cfg(feature = "lmdb")] + Self::Lmdb(env) => env.create_databases(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(env) => env.begin_rw_txn().map(RwTransaction::Mdbx), + #[cfg(feature = "lmdb")] + Self::Lmdb(env) => env.begin_rw_txn().map(RwTransaction::Lmdb), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + /// List of all files used by the database. + pub fn filenames(&self, config: &Config) -> Vec<PathBuf> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(env) => env.filenames(config), + #[cfg(feature = "lmdb")] + Self::Lmdb(env) => env.filenames(config), + _ => vec![], + } + } +} + +impl<'env> RwTransaction<'env> { + pub fn get<K: AsRef<[u8]> + ?Sized>( + &'env self, + db: &Database<'env>, + key: &K, + ) -> Result<Option<Cow<'env, [u8]>>, Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.get(db, key), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.get(db, key), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>( + &mut self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.put(db, key, value), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.put(db, key, value), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn del<K: AsRef<[u8]>>(&mut self, db: &Database, key: K) -> Result<(), Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.del(db, key), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.del(db, key), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn cursor<'a>(&'a mut self, db: &Database) -> Result<Cursor<'a>, Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.cursor(db).map(Cursor::Mdbx), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.cursor(db).map(Cursor::Lmdb), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn commit(self) -> Result<(), Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(txn) => txn.commit(), + #[cfg(feature = "lmdb")] + Self::Lmdb(txn) => txn.commit(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } +} + +impl<'env> Cursor<'env> { + /// Return the first key in the current database while advancing the cursor's position. + pub fn first_key(&mut self) -> Result<Option<Key>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.first_key(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.first_key(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + /// Return the last key in the current database while advancing the cursor's position. + pub fn last_key(&mut self) -> Result<Option<Key>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.last_key(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.last_key(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn next_key(&mut self) -> Result<Option<Key>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.next_key(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.next_key(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + /// Get the key value pair at the current position. + pub fn get_current(&mut self) -> Result<Option<(Key, Value)>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.get_current(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.get_current(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn delete_current(&mut self) -> Result<(), Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.delete_current(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.delete_current(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(cursor) => cursor.put(key, value), + #[cfg(feature = "lmdb")] + Self::Lmdb(cursor) => cursor.put(key, value), + _ => Err(Error::MismatchedDatabaseVariant), + } + } +} diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs new file mode 100644 index 0000000000..98839fcc46 --- /dev/null +++ b/slasher/src/database/lmdb_impl.rs @@ -0,0 +1,203 @@ +#![cfg(feature = "lmdb")] + +use crate::{ + config::MEGABYTE, + database::{ + interface::{Key, OpenDatabases, Value}, + *, + }, + Config, Error, +}; +use lmdb::{Cursor as _, DatabaseFlags, Transaction, WriteFlags}; +use lmdb_sys::{MDB_FIRST, MDB_GET_CURRENT, MDB_LAST, MDB_NEXT}; +use std::borrow::Cow; +use std::marker::PhantomData; +use std::path::PathBuf; + +#[derive(Debug)] +pub struct Environment { + env: lmdb::Environment, +} + +#[derive(Debug)] +pub struct RwTransaction<'env> { + txn: lmdb::RwTransaction<'env>, +} + +#[derive(Debug)] +pub struct Database<'env> { + db: lmdb::Database, + _phantom: PhantomData<&'env ()>, +} + +#[derive(Debug)] +pub struct Cursor<'env> { + cursor: lmdb::RwCursor<'env>, +} + +impl Environment { + pub fn new(config: &Config) -> Result<Environment, Error> { + let env = lmdb::Environment::new() + .set_max_dbs(MAX_NUM_DBS as u32) + .set_map_size(config.max_db_size_mbs * MEGABYTE) + .open_with_permissions(&config.database_path, 0o600)?; + Ok(Environment { env }) + } + + pub fn create_databases(&self) -> Result<OpenDatabases, Error> { + let indexed_attestation_db = self + .env + .create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; + let indexed_attestation_id_db = self + .env + .create_db(Some(INDEXED_ATTESTATION_ID_DB), Self::db_flags())?; + let attesters_db = self.env.create_db(Some(ATTESTERS_DB), Self::db_flags())?; + let attesters_max_targets_db = self + .env + .create_db(Some(ATTESTERS_MAX_TARGETS_DB), Self::db_flags())?; + let min_targets_db = self.env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; + let max_targets_db = self.env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; + let current_epochs_db = self + .env + .create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; + let proposers_db = self.env.create_db(Some(PROPOSERS_DB), Self::db_flags())?; + let metadata_db = self.env.create_db(Some(METADATA_DB), Self::db_flags())?; + + let wrap = |db| { + crate::Database::Lmdb(Database { + db, + _phantom: PhantomData, + }) + }; + + Ok(OpenDatabases { + indexed_attestation_db: wrap(indexed_attestation_db), + indexed_attestation_id_db: wrap(indexed_attestation_id_db), + attesters_db: wrap(attesters_db), + attesters_max_targets_db: wrap(attesters_max_targets_db), + min_targets_db: wrap(min_targets_db), + max_targets_db: wrap(max_targets_db), + current_epochs_db: wrap(current_epochs_db), + proposers_db: wrap(proposers_db), + metadata_db: wrap(metadata_db), + }) + } + + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + let txn = self.env.begin_rw_txn()?; + Ok(RwTransaction { txn }) + } + + pub fn filenames(&self, config: &Config) -> Vec<PathBuf> { + vec![ + config.database_path.join("data.mdb"), + config.database_path.join("lock.mdb"), + ] + } + + fn db_flags() -> DatabaseFlags { + DatabaseFlags::default() + } +} + +impl<'env> RwTransaction<'env> { + pub fn get<K: AsRef<[u8]> + ?Sized>( + &'env self, + db: &Database<'env>, + key: &K, + ) -> Result<Option<Cow<'env, [u8]>>, Error> { + Ok(self.txn.get(db.db, key).optional()?.map(Cow::Borrowed)) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>( + &mut self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + self.txn.put(db.db, &key, &value, Self::write_flags())?; + Ok(()) + } + + pub fn del<K: AsRef<[u8]>>(&mut self, db: &Database, key: K) -> Result<(), Error> { + self.txn.del(db.db, &key, None)?; + Ok(()) + } + + pub fn cursor<'a>(&'a mut self, db: &Database) -> Result<Cursor<'a>, Error> { + let cursor = self.txn.open_rw_cursor(db.db)?; + Ok(Cursor { cursor }) + } + + pub fn commit(self) -> Result<(), Error> { + self.txn.commit()?; + Ok(()) + } + + fn write_flags() -> WriteFlags { + WriteFlags::default() + } +} + +impl<'env> Cursor<'env> { + pub fn first_key(&mut self) -> Result<Option<Key>, Error> { + let opt_key = self + .cursor + .get(None, None, MDB_FIRST) + .optional()? + .and_then(|(key, _)| Some(Cow::Borrowed(key?))); + Ok(opt_key) + } + + pub fn last_key(&mut self) -> Result<Option<Key<'env>>, Error> { + let opt_key = self + .cursor + .get(None, None, MDB_LAST) + .optional()? + .and_then(|(key, _)| Some(Cow::Borrowed(key?))); + Ok(opt_key) + } + + pub fn next_key(&mut self) -> Result<Option<Key<'env>>, Error> { + let opt_key = self + .cursor + .get(None, None, MDB_NEXT) + .optional()? + .and_then(|(key, _)| Some(Cow::Borrowed(key?))); + Ok(opt_key) + } + + pub fn get_current(&mut self) -> Result<Option<(Key<'env>, Value<'env>)>, Error> { + if let Some((Some(key), value)) = self.cursor.get(None, None, MDB_GET_CURRENT).optional()? { + Ok(Some((Cow::Borrowed(key), Cow::Borrowed(value)))) + } else { + Ok(None) + } + } + + pub fn delete_current(&mut self) -> Result<(), Error> { + self.cursor.del(RwTransaction::write_flags())?; + Ok(()) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + self.cursor + .put(&key, &value, RwTransaction::write_flags())?; + Ok(()) + } +} + +/// Mix-in trait for loading values from LMDB that may or may not exist. +pub trait TxnOptional<T, E> { + fn optional(self) -> Result<Option<T>, E>; +} + +impl<T> TxnOptional<T, Error> for Result<T, lmdb::Error> { + fn optional(self) -> Result<Option<T>, Error> { + match self { + Ok(x) => Ok(Some(x)), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e.into()), + } + } +} diff --git a/slasher/src/database/mdbx_impl.rs b/slasher/src/database/mdbx_impl.rs new file mode 100644 index 0000000000..d25f17e7ac --- /dev/null +++ b/slasher/src/database/mdbx_impl.rs @@ -0,0 +1,186 @@ +#![cfg(feature = "mdbx")] + +use crate::{ + config::MEGABYTE, + database::{ + interface::{Key, OpenDatabases, Value}, + *, + }, + Config, Error, +}; +use mdbx::{DatabaseFlags, Geometry, WriteFlags}; +use std::borrow::Cow; +use std::ops::Range; +use std::path::PathBuf; + +pub const MDBX_GROWTH_STEP: isize = 256 * (1 << 20); // 256 MiB + +#[derive(Debug)] +pub struct Environment { + env: mdbx::Environment<mdbx::NoWriteMap>, +} + +#[derive(Debug)] +pub struct RwTransaction<'env> { + txn: mdbx::Transaction<'env, mdbx::RW, mdbx::NoWriteMap>, +} + +#[derive(Debug)] +pub struct Database<'env> { + db: mdbx::Database<'env>, +} + +#[derive(Debug)] +pub struct Cursor<'env> { + cursor: mdbx::Cursor<'env, mdbx::RW>, +} + +impl Environment { + pub fn new(config: &Config) -> Result<Environment, Error> { + let env = mdbx::Environment::new() + .set_max_dbs(MAX_NUM_DBS) + .set_geometry(Self::geometry(config)) + .open_with_permissions(&config.database_path, 0o600)?; + Ok(Environment { env }) + } + + pub fn create_databases(&self) -> Result<OpenDatabases, Error> { + let txn = self.begin_rw_txn()?; + txn.create_db(INDEXED_ATTESTATION_DB)?; + txn.create_db(INDEXED_ATTESTATION_ID_DB)?; + txn.create_db(ATTESTERS_DB)?; + txn.create_db(ATTESTERS_MAX_TARGETS_DB)?; + txn.create_db(MIN_TARGETS_DB)?; + txn.create_db(MAX_TARGETS_DB)?; + txn.create_db(CURRENT_EPOCHS_DB)?; + txn.create_db(PROPOSERS_DB)?; + txn.create_db(METADATA_DB)?; + + // This is all rather nasty + let (_, mut databases) = txn.txn.commit_and_rebind_open_dbs()?; + let mut next_db = || { + crate::Database::Mdbx(Database { + db: databases.remove(0), + }) + }; + + Ok(OpenDatabases { + indexed_attestation_db: next_db(), + indexed_attestation_id_db: next_db(), + attesters_db: next_db(), + attesters_max_targets_db: next_db(), + min_targets_db: next_db(), + max_targets_db: next_db(), + current_epochs_db: next_db(), + proposers_db: next_db(), + metadata_db: next_db(), + }) + } + + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + let txn = self.env.begin_rw_txn()?; + Ok(RwTransaction { txn }) + } + + pub fn filenames(&self, config: &Config) -> Vec<PathBuf> { + vec![ + config.database_path.join("mdbx.dat"), + config.database_path.join("mdbx.lck"), + ] + } + + fn geometry(config: &Config) -> Geometry<Range<usize>> { + Geometry { + size: Some(0..config.max_db_size_mbs * MEGABYTE), + growth_step: Some(MDBX_GROWTH_STEP), + shrink_threshold: None, + page_size: None, + } + } +} + +impl<'env> RwTransaction<'env> { + pub fn create_db(&self, name: &'static str) -> Result<(), Error> { + let db = self.txn.create_db(Some(name), Self::db_flags())?; + self.txn.prime_for_permaopen(db); + Ok(()) + } + + pub fn open_db(&self, name: &'static str) -> Result<Database, Error> { + let db = self.txn.open_db(Some(name))?; + Ok(Database { db }) + } + + pub fn get<K: AsRef<[u8]> + ?Sized>( + &'env self, + db: &Database<'env>, + key: &K, + ) -> Result<Option<Cow<'env, [u8]>>, Error> { + Ok(self.txn.get(&db.db, key.as_ref())?) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>( + &self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + self.txn.put(&db.db, key, value, Self::write_flags())?; + Ok(()) + } + + pub fn del<K: AsRef<[u8]>>(&self, db: &Database, key: K) -> Result<(), Error> { + self.txn.del(&db.db, key, None)?; + Ok(()) + } + + pub fn cursor<'a>(&'a self, db: &Database) -> Result<Cursor<'a>, Error> { + let cursor = self.txn.cursor(&db.db)?; + Ok(Cursor { cursor }) + } + + pub fn commit(self) -> Result<(), Error> { + self.txn.commit()?; + Ok(()) + } + + fn db_flags() -> DatabaseFlags { + DatabaseFlags::default() + } + + fn write_flags() -> WriteFlags { + WriteFlags::default() + } +} + +impl<'env> Cursor<'env> { + pub fn first_key(&mut self) -> Result<Option<Cow<'env, [u8]>>, Error> { + let opt_key = self.cursor.first()?.map(|(key_bytes, ())| key_bytes); + Ok(opt_key) + } + + pub fn last_key(&mut self) -> Result<Option<Cow<'env, [u8]>>, Error> { + let opt_key = self.cursor.last()?.map(|(key_bytes, ())| key_bytes); + Ok(opt_key) + } + + pub fn next_key(&mut self) -> Result<Option<Cow<'env, [u8]>>, Error> { + let opt_key = self.cursor.next()?.map(|(key_bytes, ())| key_bytes); + Ok(opt_key) + } + + pub fn get_current(&mut self) -> Result<Option<(Key<'env>, Value<'env>)>, Error> { + Ok(self.cursor.get_current()?) + } + + pub fn delete_current(&mut self) -> Result<(), Error> { + self.cursor.del(RwTransaction::write_flags())?; + Ok(()) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + self.cursor + .put(key.as_ref(), value.as_ref(), RwTransaction::write_flags())?; + Ok(()) + } +} diff --git a/slasher/src/error.rs b/slasher/src/error.rs index 7e689022e4..b939c281e9 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -4,7 +4,12 @@ use types::Epoch; #[derive(Debug)] pub enum Error { - DatabaseError(mdbx::Error), + #[cfg(feature = "mdbx")] + DatabaseMdbxError(mdbx::Error), + #[cfg(feature = "lmdb")] + DatabaseLmdbError(lmdb::Error), + SlasherDatabaseBackendDisabled, + MismatchedDatabaseVariant, DatabaseIOError(io::Error), DatabasePermissionsError(filesystem::Error), SszDecodeError(ssz::DecodeError), @@ -63,11 +68,22 @@ pub enum Error { InconsistentAttestationDataRoot, } +#[cfg(feature = "mdbx")] impl From<mdbx::Error> for Error { fn from(e: mdbx::Error) -> Self { match e { mdbx::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)), - _ => Error::DatabaseError(e), + _ => Error::DatabaseMdbxError(e), + } + } +} + +#[cfg(feature = "lmdb")] +impl From<lmdb::Error> for Error { + fn from(e: lmdb::Error) -> Self { + match e { + lmdb::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)), + _ => Error::DatabaseLmdbError(e), } } } diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 184e3080e5..132ce8b235 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -1,4 +1,8 @@ #![deny(missing_debug_implementations)] +#![cfg_attr( + not(any(feature = "mdbx", feature = "lmdb")), + allow(unused, clippy::drop_non_drop) +)] mod array; mod attestation_queue; @@ -12,22 +16,20 @@ pub mod metrics; mod migrate; mod slasher; pub mod test_utils; -mod utils; pub use crate::slasher::Slasher; pub use attestation_queue::{AttestationBatch, AttestationQueue, SimpleBatch}; pub use attester_record::{AttesterRecord, CompactAttesterRecord, IndexedAttesterRecord}; pub use block_queue::BlockQueue; -pub use config::Config; -pub use database::{IndexedAttestationId, SlasherDB}; +pub use config::{Config, DatabaseBackend}; +pub use database::{ + interface::{Database, Environment, RwTransaction}, + IndexedAttestationId, SlasherDB, +}; pub use error::Error; use types::{AttesterSlashing, EthSpec, IndexedAttestation, ProposerSlashing}; -/// LMDB-to-MDBX compatibility shims. -pub type Environment = mdbx::Environment<mdbx::NoWriteMap>; -pub type RwTransaction<'env> = mdbx::Transaction<'env, mdbx::RW, mdbx::NoWriteMap>; - #[derive(Debug, PartialEq)] pub enum AttesterSlashingStatus<E: EthSpec> { NotSlashable, diff --git a/slasher/src/utils.rs b/slasher/src/utils.rs deleted file mode 100644 index ccd31e74e2..0000000000 --- a/slasher/src/utils.rs +++ /dev/null @@ -1,16 +0,0 @@ -use crate::Error; - -/// Transform a transaction that would fail with a `MapFull` error into an optional result. -pub trait TxnMapFull<T, E> { - fn allow_map_full(self) -> Result<Option<T>, E>; -} - -impl<T> TxnMapFull<T, Error> for Result<T, Error> { - fn allow_map_full(self) -> Result<Option<T>, Error> { - match self { - Ok(x) => Ok(Some(x)), - Err(Error::DatabaseError(mdbx::Error::MapFull)) => Ok(None), - Err(e) => Err(e), - } - } -} diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index a2abbc55b1..5cf3fe6c2a 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use maplit::hashset; use rayon::prelude::*; diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index e8b052e664..3b7b8ed583 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use slasher::{ test_utils::{block as test_block, E}, diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 8126602f37..968a4dbb68 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use rand::prelude::*; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index b256840ee5..d2c876d363 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use slasher::{test_utils::indexed_att, Config, Slasher}; use tempfile::tempdir; From dd93aa870155a5cf628637d9afa128ff080f77c2 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Mon, 15 Aug 2022 01:30:58 +0000 Subject: [PATCH 136/184] Standard gas limit api (#3450) ## Issue Addressed Resolves https://github.com/sigp/lighthouse/issues/3403 ## Proposed Changes Implements https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit ## Additional Info N/A Co-authored-by: realbigsean <sean@sigmaprime.io> --- book/src/builders.md | 16 +- common/eth2/src/lighthouse_vc/http_client.rs | 41 +++- common/eth2/src/lighthouse_vc/std_types.rs | 7 + common/eth2/src/lighthouse_vc/types.rs | 6 + validator_client/src/http_api/mod.rs | 116 +++++++++++- .../src/http_api/tests/keystores.rs | 176 ++++++++++++++++++ .../src/initialized_validators.rs | 72 +++++++ validator_client/src/validator_store.rs | 2 +- 8 files changed, 430 insertions(+), 6 deletions(-) diff --git a/book/src/builders.md b/book/src/builders.md index 1a034e0820..110f2450b0 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -59,8 +59,16 @@ so a discrepancy in fee recipient might not indicate that there is something afo only create blocks with a `fee_recipient` field matching the one suggested, you can use the [strict fee recipient](suggested-fee-recipient.md#strict-fee-recipient) flag. -### Enable/Disable builder proposals and set Gas Limit -Use the [lighthouse API](api-vc-endpoints.md) to configure these fields per-validator. +### Set Gas Limit via HTTP + +To update gas limit per-validator you can use the [standard key manager API][gas-limit-api]. + +Alternatively, you can use the [lighthouse API](api-vc-endpoints.md). See below for an example. + +### Enable/Disable builder proposals via HTTP + +Use the [lighthouse API](api-vc-endpoints.md) to enable/disable use of the builder API on a per-validator basis. +You can also update the configured gas limit with these requests. #### `PATCH /lighthouse/validators/:voting_pubkey` @@ -99,6 +107,9 @@ null Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. ### Validator definitions example + +You can also directly configure these fields in the `validator_definitions.yml` file. + ``` --- - enabled: true @@ -142,3 +153,4 @@ By default, Lighthouse is strict with these conditions, but we encourage users t [mev-rs]: https://github.com/ralexstokes/mev-rs [mev-boost]: https://github.com/flashbots/mev-boost +[gas-limit-api]: https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 5f83e81aa0..88b5b68401 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -519,6 +519,18 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_gas_limit_url(&self, pubkey: &PublicKeyBytes) -> Result<Url, Error> { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("gas_limit"); + Ok(url) + } + /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result<AuthResponse, Error> { let mut url = self.server.full.clone(); @@ -598,11 +610,38 @@ impl ValidatorClientHttpClient { self.post_with_raw_response(url, req).await } - /// `POST /eth/v1/validator/{pubkey}/feerecipient` + /// `DELETE /eth/v1/validator/{pubkey}/feerecipient` pub async fn delete_fee_recipient(&self, pubkey: &PublicKeyBytes) -> Result<Response, Error> { let url = self.make_fee_recipient_url(pubkey)?; self.delete_with_raw_response(url, &()).await } + + /// `GET /eth/v1/validator/{pubkey}/gas_limit` + pub async fn get_gas_limit( + &self, + pubkey: &PublicKeyBytes, + ) -> Result<GetGasLimitResponse, Error> { + let url = self.make_gas_limit_url(pubkey)?; + self.get(url) + .await + .map(|generic: GenericResponse<GetGasLimitResponse>| generic.data) + } + + /// `POST /eth/v1/validator/{pubkey}/gas_limit` + pub async fn post_gas_limit( + &self, + pubkey: &PublicKeyBytes, + req: &UpdateGasLimitRequest, + ) -> Result<Response, Error> { + let url = self.make_gas_limit_url(pubkey)?; + self.post_with_raw_response(url, req).await + } + + /// `DELETE /eth/v1/validator/{pubkey}/gas_limit` + pub async fn delete_gas_limit(&self, pubkey: &PublicKeyBytes) -> Result<Response, Error> { + let url = self.make_gas_limit_url(pubkey)?; + self.delete_with_raw_response(url, &()).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response or a diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 62987c1368..887bcb99ea 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -10,6 +10,13 @@ pub struct GetFeeRecipientResponse { pub ethaddress: Address, } +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct GetGasLimitResponse { + pub pubkey: PublicKeyBytes, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, +} + #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct AuthResponse { pub token_path: String, diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index d829c97cc7..92439337f6 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -138,3 +138,9 @@ pub struct Web3SignerValidatorRequest { pub struct UpdateFeeRecipientRequest { pub ethaddress: Address, } + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct UpdateGasLimitRequest { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, +} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 1e48e86c05..e9c7bf69d4 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -12,7 +12,7 @@ use account_utils::{ pub use api_secret::ApiSecret; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ - std_types::{AuthResponse, GetFeeRecipientResponse}, + std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; @@ -626,8 +626,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let post_fee_recipient = eth_v1 .and(warp::path("validator")) .and(warp::path::param::<PublicKey>()) - .and(warp::body::json()) .and(warp::path("feerecipient")) + .and(warp::body::json()) .and(warp::path::end()) .and(validator_store_filter.clone()) .and(signer.clone()) @@ -700,6 +700,115 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( ) .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // GET /eth/v1/validator/{pubkey}/gas_limit + let get_gas_limit = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("gas_limit")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + Ok(GenericResponse::from(GetGasLimitResponse { + pubkey: PublicKeyBytes::from(validator_pubkey.clone()), + gas_limit: validator_store + .get_gas_limit(&PublicKeyBytes::from(&validator_pubkey)), + })) + }) + }, + ); + + // POST /eth/v1/validator/{pubkey}/gas_limit + let post_gas_limit = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("gas_limit")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, + request: api_types::UpdateGasLimitRequest, + validator_store: Arc<ValidatorStore<T, E>>, + signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .set_validator_gas_limit(&validator_pubkey, request.gas_limit) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting gas limit: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + + // DELETE /eth/v1/validator/{pubkey}/gas_limit + let delete_gas_limit = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("gas_limit")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .delete_validator_gas_limit(&validator_pubkey) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting gas limit removal: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -786,6 +895,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) .or(get_fee_recipient) + .or(get_gas_limit) .or(get_std_keystores) .or(get_std_remotekeys), ) @@ -795,12 +905,14 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(post_validators_mnemonic) .or(post_validators_web3signer) .or(post_fee_recipient) + .or(post_gas_limit) .or(post_std_keystores) .or(post_std_remotekeys), )) .or(warp::patch().and(patch_validators)) .or(warp::delete().and( delete_fee_recipient + .or(delete_gas_limit) .or(delete_std_keystores) .or(delete_std_remotekeys), )), diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index c3b5f0bb90..5cc755db53 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -1,3 +1,4 @@ +use super::super::super::validator_store::DEFAULT_GAS_LIMIT; use super::*; use account_utils::random_password_string; use bls::PublicKeyBytes; @@ -769,6 +770,181 @@ fn check_get_set_fee_recipient() { }) } +#[test] +fn check_get_set_gas_limit() { + run_test(|tester: ApiTester| async move { + let _ = &tester; + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::<Vec<_>>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::<Vec<_>>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response(&import_res, all_imported(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_keystore_get_response(&get_res, &keystores); + + // Before setting anything, every gas limit should be set to DEFAULT_GAS_LIMIT + for pubkey in &all_pubkeys { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: DEFAULT_GAS_LIMIT, + } + ); + } + + let gas_limit_public_key_1 = 40_000_000; + let gas_limit_public_key_2 = 42; + let gas_limit_override = 100; + + // set the gas limit for pubkey[1] using the API + tester + .client + .post_gas_limit( + &all_pubkeys[1], + &UpdateGasLimitRequest { + gas_limit: gas_limit_public_key_1, + }, + ) + .await + .expect("should update gas limit"); + // now everything but pubkey[1] should be DEFAULT_GAS_LIMIT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 1 { + gas_limit_public_key_1.clone() + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + + // set the gas limit for pubkey[2] using the API + tester + .client + .post_gas_limit( + &all_pubkeys[2], + &UpdateGasLimitRequest { + gas_limit: gas_limit_public_key_2, + }, + ) + .await + .expect("should update gas limit"); + // now everything but pubkey[1] & pubkey[2] should be DEFAULT_GAS_LIMIT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 1 { + gas_limit_public_key_1 + } else if i == 2 { + gas_limit_public_key_2 + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + + // should be able to override previous gas_limit + tester + .client + .post_gas_limit( + &all_pubkeys[1], + &UpdateGasLimitRequest { + gas_limit: gas_limit_override, + }, + ) + .await + .expect("should update gas limit"); + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 1 { + gas_limit_override + } else if i == 2 { + gas_limit_public_key_2 + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + + // delete gas limit for pubkey[1] using the API + tester + .client + .delete_gas_limit(&all_pubkeys[1]) + .await + .expect("should delete gas limit"); + // now everything but pubkey[2] should be DEFAULT_GAS_LIMIT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 2 { + gas_limit_public_key_2 + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + }) +} + fn all_indices(count: usize) -> Vec<usize> { (0..count).collect() } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 66a621eb77..8d9fbe281f 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -795,6 +795,78 @@ impl InitializedValidators { Ok(()) } + /// Sets the `InitializedValidator` and `ValidatorDefinition` `gas_limit` values. + /// + /// ## Notes + /// + /// Setting a validator `gas_limit` will cause `self.definitions` to be updated and saved to + /// disk. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn set_validator_gas_limit( + &mut self, + voting_public_key: &PublicKey, + gas_limit: u64, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.gas_limit = Some(gas_limit); + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.gas_limit = Some(gas_limit); + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Removes the `InitializedValidator` and `ValidatorDefinition` `gas_limit` values. + /// + /// ## Notes + /// + /// Removing a validator `gas_limit` will cause `self.definitions` to be updated and saved to + /// disk. The gas_limit for the validator will then fall back to the process level default if + /// it is set. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn delete_validator_gas_limit( + &mut self, + voting_public_key: &PublicKey, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.gas_limit = None; + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.gas_limit = None; + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + /// Tries to decrypt the key cache. /// /// Returns the decrypted cache if decryption was successful, or an error if a required password diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index f883d0201f..292b49ac3a 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -57,7 +57,7 @@ const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; /// Currently used as the default gas limit in execution clients. /// /// https://github.com/ethereum/builder-specs/issues/17 -const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; struct LocalValidator { validator_dir: ValidatorDir, From f03f9ba68009514dc4da48759828c047c4c0b3dd Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Mon, 15 Aug 2022 01:30:59 +0000 Subject: [PATCH 137/184] Increase merge-readiness lookhead (#3463) ## Issue Addressed NA ## Proposed Changes Start issuing merge-readiness logs 2 weeks before the Bellatrix fork epoch. Additionally, if the Bellatrix epoch is specified and the use has configured an EL, always log merge readiness logs, this should benefit pro-active users. ### Lookahead Reasoning - Bellatrix fork is: - epoch 144896 - slot 4636672 - Unix timestamp: `1606824023 + (4636672 * 12) = 1662464087` - GMT: Tue Sep 06 2022 11:34:47 GMT+0000 - Warning start time is: - Unix timestamp: `1662464087 - 604800 * 2 = 1661254487` - GMT: Tue Aug 23 2022 11:34:47 GMT+0000 The [current expectation](https://discord.com/channels/595666850260713488/745077610685661265/1007445305198911569) is that EL and CL clients will releases out by Aug 22nd at the latest, then an EF announcement will go out on the 23rd. If all goes well, LH will start alerting users about merge-readiness just after the announcement. ## Additional Info NA --- beacon_node/beacon_chain/src/merge_readiness.rs | 16 +++++++++++----- beacon_node/client/src/notifier.rs | 1 + 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs index 4a7b38bdb4..4ef2102fd5 100644 --- a/beacon_node/beacon_chain/src/merge_readiness.rs +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -9,7 +9,7 @@ use types::*; /// The time before the Bellatrix fork when we will start issuing warnings about preparation. const SECONDS_IN_A_WEEK: u64 = 604800; -pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK; +pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; #[derive(Default, Debug, Serialize, Deserialize)] pub struct MergeConfig { @@ -130,16 +130,22 @@ impl fmt::Display for MergeReadiness { } impl<T: BeaconChainTypes> BeaconChain<T> { - /// Returns `true` if the Bellatrix fork has occurred or will occur within - /// `MERGE_READINESS_PREPARATION_SECONDS`. + /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will + /// occur within `MERGE_READINESS_PREPARATION_SECONDS`. pub fn is_time_to_prepare_for_bellatrix(&self, current_slot: Slot) -> bool { if let Some(bellatrix_epoch) = self.spec.bellatrix_fork_epoch { let bellatrix_slot = bellatrix_epoch.start_slot(T::EthSpec::slots_per_epoch()); let merge_readiness_preparation_slots = MERGE_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; - // Return `true` if Bellatrix has happened or is within the preparation time. - current_slot + merge_readiness_preparation_slots > bellatrix_slot + if self.execution_layer.is_some() { + // The user has already configured an execution layer, start checking for readiness + // right away. + true + } else { + // Return `true` if Bellatrix has happened or is within the preparation time. + current_slot + merge_readiness_preparation_slots > bellatrix_slot + } } else { // The Bellatrix fork epoch has not been defined yet, no need to prepare. false diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 9f82cd2012..ae8f024b71 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -387,6 +387,7 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( log, "Not ready for merge"; "info" => %readiness, + "hint" => "try updating Lighthouse and/or the execution layer", ) } readiness @ MergeReadiness::NotSynced => warn!( From 25e3dc930025a1847b86fa03845dc3c40df4d8f6 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 15 Aug 2022 01:31:00 +0000 Subject: [PATCH 138/184] Fix block verification and checkpoint sync caches (#3466) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/2962 ## Proposed Changes Build all caches on the checkpoint state before storing it in the database. Additionally, fix a bug in `signature_verify_chain_segment` which prevented block verification from succeeding unless the previous epoch cache was already built. The previous epoch cache is required to verify the signatures of attestations included from previous epochs, even when all the blocks in the segment are from the same epoch. The comments around `signature_verify_chain_segment` have also been updated to reflect the fact that it should only be used on a chain of blocks from a single epoch. I believe this restriction had already been added at some point in the past and that the current comments were just outdated (and I think because the proposer shuffling can change in the next epoch based on the blocks applied in the current epoch that this limitation is essential). --- beacon_node/beacon_chain/src/beacon_chain.rs | 9 ++++----- beacon_node/beacon_chain/src/block_verification.rs | 8 ++++++-- beacon_node/beacon_chain/src/builder.rs | 6 ++++++ 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 54c961e34d..f7d08c395d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2293,8 +2293,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { // Determine the epoch of the first block in the remaining segment. let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - // The `last_index` indicates the position of the last block that is in the current - // epoch of `start_epoch`. + // The `last_index` indicates the position of the first block in an epoch greater + // than the current epoch: partitioning the blocks into a run of blocks in the same + // epoch and everything else. These same-epoch blocks can all be signature-verified with + // the same `BeaconState`. let last_index = filtered_chain_segment .iter() .position(|(_root, block)| { @@ -2302,9 +2304,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> { }) .unwrap_or(filtered_chain_segment.len()); - // Split off the first section blocks that are all either within the current epoch of - // the first block. These blocks can all be signature-verified with the same - // `BeaconState`. let mut blocks = filtered_chain_segment.split_off(last_index); std::mem::swap(&mut blocks, &mut filtered_chain_segment); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 73330e7b56..4d84fe35e0 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -504,8 +504,8 @@ fn process_block_slash_info<T: BeaconChainTypes>( /// /// ## Errors /// -/// The given `chain_segment` must span no more than two epochs, otherwise an error will be -/// returned. +/// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error +/// will be returned. pub fn signature_verify_chain_segment<T: BeaconChainTypes>( mut chain_segment: Vec<(Hash256, Arc<SignedBeaconBlock<T::EthSpec>>)>, chain: &BeaconChain<T>, @@ -1702,6 +1702,9 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( let block_epoch = block_slot.epoch(E::slots_per_epoch()); if state.current_epoch() == block_epoch { + // Build both the current and previous epoch caches, as the previous epoch caches are + // useful for verifying attestations in blocks from the current epoch. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; Ok(Cow::Borrowed(state)) @@ -1719,6 +1722,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( partial_state_advance(&mut state, state_root_opt, target_slot, spec) .map_err(|e| BlockError::BeaconChainError(BeaconChainError::from(e)))?; + state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; Ok(Cow::Owned(state)) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 252b7cef5a..cba9a56982 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -403,6 +403,12 @@ where )); } + // Prime all caches before storing the state in the database and computing the tree hash + // root. + weak_subj_state + .build_all_caches(&self.spec) + .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; + let computed_state_root = weak_subj_state .update_tree_hash_cache() .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; From e5fc9f26bcccde9c48396eb4ea7d10fcd9fed13e Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 15 Aug 2022 01:31:02 +0000 Subject: [PATCH 139/184] Log if no execution endpoint is configured (#3467) ## Issue Addressed Fixes an issue whereby syncing a post-merge network without an execution endpoint would silently stall. Sync swallows the errors from block verification so previously there was no indication in the logs for why the node couldn't sync. ## Proposed Changes Add an error log to the merge-readiness notifier for the case where the merge has already completed but no execution endpoint is configured. --- beacon_node/client/src/notifier.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index ae8f024b71..11f0f6e2a2 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -339,7 +339,21 @@ async fn merge_readiness_logging<T: BeaconChainTypes>( payload.parent_hash() != ExecutionBlockHash::zero() }); - if merge_completed || !beacon_chain.is_time_to_prepare_for_bellatrix(current_slot) { + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if merge_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_bellatrix(current_slot) + { + return; + } + + if merge_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need an execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); return; } From d9d128815622da4160565e580350087c83699d08 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 17 Aug 2022 02:36:38 +0000 Subject: [PATCH 140/184] =?UTF-8?q?Add=20mainnet=20merge=20values=20?= =?UTF-8?q?=F0=9F=90=BC=20(#3462)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed NA ## Proposed Changes Adds **tentative** values for the merge TTD and Bellatrix as per https://github.com/ethereum/consensus-specs/pull/2969 ## Additional Info - ~~Blocked on https://github.com/ethereum/consensus-specs/pull/2969~~ --- .../mainnet/config.yaml | 6 +-- consensus/types/src/beacon_block.rs | 6 +-- consensus/types/src/beacon_state/tests.rs | 44 +++++++++---------- consensus/types/src/chain_spec.rs | 18 ++++---- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 8 ++++ 6 files changed, 45 insertions(+), 39 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index cc4e7dcab4..6e87a708f8 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,8 +6,8 @@ PRESET_BASE: 'mainnet' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# Estimated on Sept 15, 2022 +TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +35,7 @@ ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 74240 # Merge BELLATRIX_FORK_VERSION: 0x02000000 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 2d7e68a5c4..da8566dcb2 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -651,19 +651,17 @@ mod tests { #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; + let spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = Epoch::from_ssz_bytes(&[7, 6, 5, 4, 3, 2, 1, 0]).unwrap(); + let fork_epoch = spec.altair_fork_epoch.unwrap(); let base_epoch = fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); let altair_epoch = fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(fork_epoch); - // BeaconBlockBase { let good_base_block = BeaconBlock::Base(BeaconBlockBase { diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index d65d0a9e6c..5898bfe214 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -11,7 +11,7 @@ use beacon_chain::types::{ MinimalEthSpec, RelativeEpoch, Slot, }; use safe_arith::SafeArith; -use ssz::{Decode, Encode}; +use ssz::Encode; use state_processing::per_slot_processing; use std::ops::Mul; use swap_or_not_shuffle::compute_shuffled_index; @@ -438,62 +438,60 @@ mod get_outstanding_deposit_len { #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; + let spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = Epoch::from_ssz_bytes(&[7, 6, 5, 4, 3, 2, 1, 0]).unwrap(); + let fork_epoch = spec.altair_fork_epoch.unwrap(); let base_epoch = fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); let altair_epoch = fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_epoch); - // BeaconStateBase { - let good_base_block: BeaconState<MainnetEthSpec> = BeaconState::Base(BeaconStateBase { + let good_base_state: BeaconState<MainnetEthSpec> = BeaconState::Base(BeaconStateBase { slot: base_slot, ..<_>::random_for_test(rng) }); - // It's invalid to have a base block with a slot higher than the fork slot. - let bad_base_block = { - let mut bad = good_base_block.clone(); + // It's invalid to have a base state with a slot higher than the fork slot. + let bad_base_state = { + let mut bad = good_base_state.clone(); *bad.slot_mut() = altair_slot; bad }; assert_eq!( - BeaconState::from_ssz_bytes(&good_base_block.as_ssz_bytes(), &spec) - .expect("good base block can be decoded"), - good_base_block + BeaconState::from_ssz_bytes(&good_base_state.as_ssz_bytes(), &spec) + .expect("good base state can be decoded"), + good_base_state ); - <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_base_block.as_ssz_bytes(), &spec) - .expect_err("bad base block cannot be decoded"); + <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_base_state.as_ssz_bytes(), &spec) + .expect_err("bad base state cannot be decoded"); } // BeaconStateAltair { - let good_altair_block: BeaconState<MainnetEthSpec> = + let good_altair_state: BeaconState<MainnetEthSpec> = BeaconState::Altair(BeaconStateAltair { slot: altair_slot, ..<_>::random_for_test(rng) }); - // It's invalid to have an Altair block with a slot lower than the fork slot. - let bad_altair_block = { - let mut bad = good_altair_block.clone(); + // It's invalid to have an Altair state with a slot lower than the fork slot. + let bad_altair_state = { + let mut bad = good_altair_state.clone(); *bad.slot_mut() = base_slot; bad }; assert_eq!( - BeaconState::from_ssz_bytes(&good_altair_block.as_ssz_bytes(), &spec) - .expect("good altair block can be decoded"), - good_altair_block + BeaconState::from_ssz_bytes(&good_altair_state.as_ssz_bytes(), &spec) + .expect("good altair state can be decoded"), + good_altair_state ); - <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) - .expect_err("bad altair block cannot be decoded"); + <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_altair_state.as_ssz_bytes(), &spec) + .expect_err("bad altair state cannot be decoded"); } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 8d56ce2da9..b2ba24ac3e 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -561,14 +561,9 @@ impl ChainSpec { .expect("pow does not overflow"), proportional_slashing_multiplier_bellatrix: 3, bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], - bellatrix_fork_epoch: None, - terminal_total_difficulty: Uint256::MAX - .checked_sub(Uint256::from(2u64.pow(10))) - .expect("subtraction does not overflow") - // Add 1 since the spec declares `2**256 - 2**10` and we use - // `Uint256::MAX` which is `2*256- 1`. - .checked_add(Uint256::one()) - .expect("addition does not overflow"), + bellatrix_fork_epoch: Some(Epoch::new(144896)), + terminal_total_difficulty: Uint256::from_dec_str("58750000000000000000000") + .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, @@ -621,6 +616,13 @@ impl ChainSpec { // Merge bellatrix_fork_version: [0x02, 0x00, 0x00, 0x01], bellatrix_fork_epoch: None, + terminal_total_difficulty: Uint256::MAX + .checked_sub(Uint256::from(2u64.pow(10))) + .expect("subtraction does not overflow") + // Add 1 since the spec declares `2**256 - 2**10` and we use + // `Uint256::MAX` which is `2*256- 1`. + .checked_add(Uint256::one()) + .expect("addition does not overflow"), // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index b237bfb761..9127093310 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0-rc.1 +TESTS_TAG := v1.2.0-rc.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 87953a6141..88567c688e 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -25,12 +25,20 @@ excluded_paths = [ # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", + # light_client + "tests/.*/.*/light_client", # LightClientStore "tests/.*/.*/ssz_static/LightClientStore", # LightClientUpdate "tests/.*/.*/ssz_static/LightClientUpdate", # LightClientSnapshot "tests/.*/.*/ssz_static/LightClientSnapshot", + # LightClientBootstrap + "tests/.*/.*/ssz_static/LightClientBootstrap", + # LightClientOptimistic + "tests/.*/.*/ssz_static/LightClientOptimistic", + # LightClientFinalityUpdate + "tests/.*/.*/ssz_static/LightClientFinalityUpdate", # Merkle-proof tests for light clients "tests/.*/.*/merkle/single_proof", # Capella tests are disabled for now. From 8255c8682e57545c43ff26d33b59e12bd45606f7 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Wed, 17 Aug 2022 02:36:39 +0000 Subject: [PATCH 141/184] Align engine API timeouts with spec (#3470) ## Proposed Changes Match the timeouts from the `execution-apis` spec. Our existing values were already quite close so I don't imagine this change to be very disruptive. The spec sets the timeout for `engine_getPayloadV1` to only 1 second, but we were already using a longer value of 2 seconds. I've kept the 2 second timeout as I don't think there's any need to fail faster when producing a payload. There's no timeout specified for `eth_syncing` so I've matched it to the shortest timeout from the spec (1 second). I think the previous value of 250ms was likely too low and could have been contributing to spurious timeouts, particularly for remote ELs. ## Additional Info The timeouts are defined on each endpoint in this document: https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md --- beacon_node/execution_layer/src/engine_api/http.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index a8eb42971e..0f848a7716 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -26,21 +26,20 @@ pub const ETH_GET_BLOCK_BY_HASH: &str = "eth_getBlockByHash"; pub const ETH_GET_BLOCK_BY_HASH_TIMEOUT: Duration = Duration::from_secs(1); pub const ETH_SYNCING: &str = "eth_syncing"; -pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_millis(250); +pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; -pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(6); +pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; -pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(6); +pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; -pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = - Duration::from_millis(500); +pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; From 7664776fc410cbd42ccb68b606d38a54cdaa1941 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 17 Aug 2022 02:36:40 +0000 Subject: [PATCH 142/184] Add test for exits spanning epochs (#3476) ## Issue Addressed NA ## Proposed Changes Adds a test that was written whilst doing some testing. This PR does not make changes to production code, it just adds a test for already existing functionality. ## Additional Info NA --- .../src/per_block_processing/tests.rs | 71 ++++++++++++++++++- 1 file changed, 69 insertions(+), 2 deletions(-) diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 2a84d1d2d2..e244e02c2d 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -7,8 +7,8 @@ use crate::per_block_processing::errors::{ ProposerSlashingInvalid, }; use crate::{ - per_block_processing::process_operations, BlockSignatureStrategy, VerifyBlockRoot, - VerifySignatures, + per_block_processing::{process_operations, verify_exit::verify_exit}, + BlockSignatureStrategy, VerifyBlockRoot, VerifySignatures, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use lazy_static::lazy_static; @@ -910,3 +910,70 @@ async fn invalid_proposer_slashing_proposal_epoch_mismatch() { }) ); } + +#[tokio::test] +async fn fork_spanning_exit() { + let mut spec = MainnetEthSpec::default_spec(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + + spec.altair_fork_epoch = Some(Epoch::new(2)); + spec.bellatrix_fork_epoch = Some(Epoch::new(4)); + spec.shard_committee_period = 0; + + let harness = BeaconChainHarness::builder(MainnetEthSpec::default()) + .spec(spec.clone()) + .deterministic_keypairs(VALIDATOR_COUNT) + .mock_execution_layer() + .fresh_ephemeral_store() + .build(); + + harness.extend_to_slot(slots_per_epoch.into()).await; + + /* + * Produce an exit *before* Altair. + */ + + let signed_exit = harness.make_voluntary_exit(0, Epoch::new(1)); + assert!(signed_exit.message.epoch < spec.altair_fork_epoch.unwrap()); + + /* + * Ensure the exit verifies before Altair. + */ + + let head = harness.chain.canonical_head.cached_head(); + let head_state = &head.snapshot.beacon_state; + assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap()); + verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) + .expect("phase0 exit verifies against phase0 state"); + + /* + * Ensure the exit verifies after Altair. + */ + + harness + .extend_to_slot(spec.altair_fork_epoch.unwrap().start_slot(slots_per_epoch)) + .await; + let head = harness.chain.canonical_head.cached_head(); + let head_state = &head.snapshot.beacon_state; + assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap()); + assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap()); + verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) + .expect("phase0 exit verifies against altair state"); + + /* + * Ensure the exit no longer verifies after Bellatrix. + */ + + harness + .extend_to_slot( + spec.bellatrix_fork_epoch + .unwrap() + .start_slot(slots_per_epoch), + ) + .await; + let head = harness.chain.canonical_head.cached_head(); + let head_state = &head.snapshot.beacon_state; + assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap()); + verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) + .expect_err("phase0 exit does not verify against bellatrix state"); +} From c2604c47d6d286728f36563bb503a7a0d4a7bf57 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Wed, 17 Aug 2022 02:36:41 +0000 Subject: [PATCH 143/184] Optimistic sync: remove justified block check (#3477) ## Issue Addressed Implements spec change https://github.com/ethereum/consensus-specs/pull/2881 ## Proposed Changes Remove the justified block check from `is_optimistic_candidate_block`. --- consensus/fork_choice/src/fork_choice.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index c8d119a99b..3341fc5c22 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1332,15 +1332,6 @@ where return Ok(true); } - // If the justified block has execution enabled, then optimistically import any block. - if self - .get_justified_block()? - .execution_status - .is_execution_enabled() - { - return Ok(true); - } - // If the parent block has execution enabled, always import the block. // // See: From 726d1b0d9b6ace4cddcbd7efe22fc4ca5ae5a72d Mon Sep 17 00:00:00 2001 From: Mac L <mjladson@pm.me> Date: Fri, 19 Aug 2022 04:27:20 +0000 Subject: [PATCH 144/184] Unblock CI by updating git submodules directly in execution integration tests (#3479) ## Issue Addressed Recent changes to the Nethermind codebase removed the `rocksdb` git submodule in favour of a `nuget` package. This appears to have broken our ability to build the latest release of Nethermind inside our integration tests. ## Proposed Changes ~Temporarily pin the version used for the Nethermind integration tests to `master`. This ensures we use the packaged version of `rocksdb`. This is only necessary until a new release of Nethermind is available.~ Use `git submodule update --init --recursive` to ensure the required submodules are pulled before building. Co-authored-by: Diva M <divma@protonmail.com> --- .../src/build_utils.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/testing/execution_engine_integration/src/build_utils.rs b/testing/execution_engine_integration/src/build_utils.rs index 966a3bfb43..15e7fdc0f1 100644 --- a/testing/execution_engine_integration/src/build_utils.rs +++ b/testing/execution_engine_integration/src/build_utils.rs @@ -20,7 +20,6 @@ pub fn clone_repo(repo_dir: &Path, repo_url: &str) -> Result<(), String> { Command::new("git") .arg("clone") .arg(repo_url) - .arg("--recursive") .current_dir(repo_dir) .output() .map_err(|_| format!("failed to clone repo at {repo_url}"))?, @@ -41,6 +40,21 @@ pub fn checkout(repo_dir: &Path, revision_or_branch: &str) -> Result<(), String> ) })?, |_| {}, + )?; + output_to_result( + Command::new("git") + .arg("submodule") + .arg("update") + .arg("--init") + .arg("--recursive") + .current_dir(repo_dir) + .output() + .map_err(|_| { + format!( + "failed to update submodules on branch or revision at {repo_dir:?}/{revision_or_branch}", + ) + })?, + |_| {}, ) } From a0605c4ee6a5e70fbc1fd26d18605262d4a353d9 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Fri, 19 Aug 2022 04:27:21 +0000 Subject: [PATCH 145/184] Bump EF tests to `v1.2.0 rc.3` (#3483) ## Issue Addressed NA ## Proposed Changes Bumps test vectors and ignores another weird MacOS file. ## Additional Info NA --- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 9127093310..dc89cb5d5f 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.2.0-rc.2 +TESTS_TAG := v1.2.0-rc.3 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 88567c688e..a10ccf1e6f 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -44,7 +44,9 @@ excluded_paths = [ # Capella tests are disabled for now. "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac - ".*\.DS_Store.*" + ".*\.DS_Store.*", + # More Mac weirdness. + "tests/mainnet/bellatrix/operations/deposit/pyspec_tests/deposit_with_previous_fork_version__valid_ineffective/._meta.yaml" ] def normalize_path(path): From 043fa2153ed0e33c96b2b48606cfd65a92919cc8 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Fri, 19 Aug 2022 04:27:22 +0000 Subject: [PATCH 146/184] Revise EE peer penalites (#3485) ## Issue Addressed NA ## Proposed Changes Don't penalize peers for errors that might be caused by an honest optimistic node. ## Additional Info NA --- .../beacon_chain/src/block_verification.rs | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4d84fe35e0..95d5f818f0 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -349,13 +349,27 @@ impl ExecutionPayloadError { // always forced to consider here whether or not to penalize a peer when // we add a new error condition. match self { + // The peer has nothing to do with this error, do not penalize them. ExecutionPayloadError::NoExecutionConnection => false, + // The peer has nothing to do with this error, do not penalize them. ExecutionPayloadError::RequestFailed(_) => false, - ExecutionPayloadError::RejectedByExecutionEngine { .. } => true, + // An honest optimistic node may propagate blocks which are rejected by an EE, do not + // penalize them. + ExecutionPayloadError::RejectedByExecutionEngine { .. } => false, + // This is a trivial gossip validation condition, there is no reason for an honest peer + // to propagate a block with an invalid payload time stamp. ExecutionPayloadError::InvalidPayloadTimestamp { .. } => true, - ExecutionPayloadError::InvalidTerminalPoWBlock { .. } => true, - ExecutionPayloadError::InvalidActivationEpoch { .. } => true, - ExecutionPayloadError::InvalidTerminalBlockHash { .. } => true, + // An honest optimistic node may propagate blocks with an invalid terminal PoW block, we + // should not penalized them. + ExecutionPayloadError::InvalidTerminalPoWBlock { .. } => false, + // This condition is checked *after* gossip propagation, therefore penalizing gossip + // peers for this block would be unfair. There may be an argument to penalize RPC + // blocks, since even an optimistic node shouldn't verify this block. We will remove the + // penalties for all block imports to keep things simple. + ExecutionPayloadError::InvalidActivationEpoch { .. } => false, + // As per `Self::InvalidActivationEpoch`. + ExecutionPayloadError::InvalidTerminalBlockHash { .. } => false, + // Do not penalize the peer since it's not their fault that *we're* optimistic. ExecutionPayloadError::UnverifiedNonOptimisticCandidate => false, } } From df358b864dc7e261c7bf26da1c2be3c92cf18e60 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Fri, 19 Aug 2022 04:27:23 +0000 Subject: [PATCH 147/184] Add metrics for EE `PayloadStatus` returns (#3486) ## Issue Addressed NA ## Proposed Changes Adds some metrics so we can track payload status responses from the EE. I think this will be useful for troubleshooting and alerting. I also bumped the `BecaonChain::per_slot_task` to `debug` since it doesn't seem too noisy and would have helped us with some things we were debugging in the past. ## Additional Info NA --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 7 ++++++- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/execution_layer/src/engine_api.rs | 4 +++- beacon_node/execution_layer/src/lib.rs | 14 ++++++++++++++ beacon_node/execution_layer/src/metrics.rs | 5 +++++ 6 files changed, 30 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a406df149f..223e8de300 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2083,6 +2083,7 @@ dependencies = [ "slot_clock", "ssz-rs", "state_processing", + "strum", "task_executor", "tempfile", "tokio", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f7d08c395d..2cb33d7dad 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4274,8 +4274,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// it contains a call to `fork_choice` which may eventually call /// `tokio::runtime::block_on` in certain cases. pub async fn per_slot_task(self: &Arc<Self>) { - trace!(self.log, "Running beacon chain per slot tasks"); if let Some(slot) = self.slot_clock.now() { + debug!( + self.log, + "Running beacon chain per slot tasks"; + "slot" => ?slot + ); + // Always run the light-weight pruning tasks (these structures should be empty during // sync anyway). self.naive_aggregation_pool.write().prune(slot); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 83f9454f8a..3b401d4591 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -43,4 +43,4 @@ fork_choice = { path = "../../consensus/fork_choice" } mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", tag = "v0.2.0"} ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus"} ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs"} - +strum = "0.24.0" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index c370985ec0..ba0a37736b 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -4,6 +4,7 @@ use http::deposit_methods::RpcError; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use strum::IntoStaticStr; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, Hash256, Uint256, VariableList, @@ -71,7 +72,8 @@ impl From<builder_client::Error> for Error { } } -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] +#[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { Valid, Invalid, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index e7bbc6cd5e..778b2247be 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -893,6 +893,13 @@ impl<T: EthSpec> ExecutionLayer<T> { .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; + if let Ok(status) = &result { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_STATUS, + &["new_payload", status.status.into()], + ); + } + process_payload_status(execution_payload.block_hash, result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) @@ -1032,6 +1039,13 @@ impl<T: EthSpec> ExecutionLayer<T> { }) .await; + if let Ok(status) = &result { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_STATUS, + &["forkchoice_updated", status.payload_status.status.into()], + ); + } + process_payload_status( head_block_hash, result.map(|response| response.payload_status), diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index e28a81fd87..9b00193a4a 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -36,4 +36,9 @@ lazy_static::lazy_static! { "execution_layer_get_payload_by_block_hash_time", "Time to reconstruct a payload from the EE using eth_getBlockByHash" ); + pub static ref EXECUTION_LAYER_PAYLOAD_STATUS: Result<IntCounterVec> = try_create_int_counter_vec( + "execution_layer_payload_status", + "Indicates the payload status returned for a particular method", + &["method", "status"] + ); } From 931153885c56f612e5796ebab7569e108b59f517 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Fri, 19 Aug 2022 04:27:24 +0000 Subject: [PATCH 148/184] Run per-slot fork choice at a further distance from the head (#3487) ## Issue Addressed NA ## Proposed Changes Run fork choice when the head is 256 slots from the wall-clock slot, rather than 4. The reason we don't *always* run FC is so that it doesn't slow us down during sync. As the comments state, setting the value to 256 means that we'd only have one interrupting fork-choice call if we were syncing at 20 slots/sec. ## Additional Info NA --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++++- beacon_node/beacon_chain/src/state_advance_timer.rs | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2cb33d7dad..4d37926dd9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -131,7 +131,11 @@ const PREPARE_PROPOSER_HISTORIC_EPOCHS: u64 = 4; /// run the per-slot tasks (primarily fork choice). /// /// This prevents unnecessary work during sync. -const MAX_PER_SLOT_FORK_CHOICE_DISTANCE: u64 = 4; +/// +/// The value is set to 256 since this would be just over one slot (12.8s) when syncing at +/// 20 slots/second. Having a single fork-choice run interrupt syncing would have very little +/// impact whilst having 8 epochs without a block is a comfortable grace period. +const MAX_PER_SLOT_FORK_CHOICE_DISTANCE: u64 = 256; /// Reported to the user when the justified block has an invalid execution payload. pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 48c0f2f8a2..4359b6f1e8 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -38,7 +38,11 @@ use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot}; const MAX_ADVANCE_DISTANCE: u64 = 4; /// Similarly for fork choice: avoid the fork choice lookahead during sync. -const MAX_FORK_CHOICE_DISTANCE: u64 = 4; +/// +/// The value is set to 256 since this would be just over one slot (12.8s) when syncing at +/// 20 slots/second. Having a single fork-choice run interrupt syncing would have very little +/// impact whilst having 8 epochs without a block is a comfortable grace period. +const MAX_FORK_CHOICE_DISTANCE: u64 = 256; #[derive(Debug)] enum Error { From 18c61a5e8be3e54226a86a69b96f8f4f7fd790e4 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Mon, 22 Aug 2022 03:43:08 +0000 Subject: [PATCH 149/184] v3.0.0 (#3464) ## Issue Addressed NA ## Proposed Changes Bump versions to v3.0.0 ## Additional Info - ~~Blocked on #3439~~ - ~~Blocked on #3459~~ - ~~Blocked on #3463~~ - ~~Blocked on #3462~~ - ~~Requires further testing~~ Co-authored-by: Michael Sproul <michael@sigmaprime.io> --- .github/workflows/test-suite.yml | 4 +++- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 7 files changed, 13 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 3be8097ddf..1a7d78f61f 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -337,8 +337,10 @@ jobs: - uses: actions/checkout@v1 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY + # NOTE: cargo-udeps version is pinned until this issue is resolved: + # https://github.com/est31/cargo-udeps/issues/135 - name: Install cargo-udeps - run: cargo install cargo-udeps --locked + run: cargo install cargo-udeps --locked --force --version 0.1.30 - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/Cargo.lock b/Cargo.lock index 223e8de300..9e17e2e2a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.5.1" +version = "3.0.0" dependencies = [ "beacon_chain", "clap", @@ -594,7 +594,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.5.1" +version = "3.0.0" dependencies = [ "beacon_node", "clap", @@ -3104,7 +3104,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.5.1" +version = "3.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -3604,7 +3604,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.5.1" +version = "3.0.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 417acf3d9e..e580a7e968 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.5.1" +version = "3.0.0" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] edition = "2021" diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index b53b8a5fd6..b6aa9b4f34 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.5.1" +version = "3.0.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index c499bf0498..4963f98cd5 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.5.1-", - fallback = "Lighthouse/v2.5.1" + prefix = "Lighthouse/v3.0.0-", + fallback = "Lighthouse/v3.0.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 5d94a50461..8c76b8f39b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.5.1" +version = "3.0.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 805b4eca26..9250ef82a8 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.5.1" +version = "3.0.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false From aab4a8d2f2c13238293700712a39bca98dec2964 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Tue, 23 Aug 2022 03:50:58 +0000 Subject: [PATCH 150/184] Update docs for mainnet merge release (#3494) ## Proposed Changes Update the merge migration docs to encourage updating mainnet configs _now_! The docs are also updated to recommend _against_ `--suggested-fee-recipient` on the beacon node (https://github.com/sigp/lighthouse/issues/3432). Additionally the `--help` for the CLI is updated to match with a few small semantic changes: - `--execution-jwt` is no longer allowed without `--execution-endpoint`. We've ended up without a default for `--execution-endpoint`, so I think that's fine. - The flags related to the JWT are only allowed if `--execution-jwt` is provided. --- beacon_node/src/cli.rs | 24 ++++++----- book/src/database-migrations.md | 1 + book/src/merge-migration.md | 64 +++++++++++++++++++++++------ book/src/suggested-fee-recipient.md | 7 ++++ lighthouse/src/main.rs | 6 +-- validator_client/src/cli.rs | 5 ++- 6 files changed, 79 insertions(+), 28 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 7a91530252..bbb904717b 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -235,6 +235,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-spec-fork") .long("http-spec-fork") + .value_name("FORK") .help("Serve the spec for a specific hard fork on /eth/v1/config/spec. It should \ not be necessary to set this flag.") .takes_value(true) @@ -327,9 +328,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("staking") .long("staking") - .help("Standard option for a staking beacon node. Equivalent to \ - `lighthouse bn --http --eth1 `. This will enable the http server on localhost:5052 \ - and try connecting to an eth1 node on localhost:8545") + .help("Standard option for a staking beacon node. This will enable the HTTP server \ + on localhost:5052 and import deposit logs from the execution node. This is \ + equivalent to `--http` on merge-ready networks, or `--http --eth1` pre-merge") .takes_value(false) ) @@ -419,16 +420,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Deprecated. The feature activates automatically when --execution-endpoint \ is supplied.") .takes_value(false) + .hidden(true) ) .arg( Arg::with_name("execution-endpoint") .long("execution-endpoint") .value_name("EXECUTION-ENDPOINT") .alias("execution-endpoints") - .help("Server endpoint for an execution layer jwt authenticated HTTP \ + .help("Server endpoint for an execution layer JWT-authenticated HTTP \ JSON-RPC connection. Uses the same endpoint to populate the \ - deposit cache. Also enables the --merge flag.\ - If not provided, uses the default value of http://127.0.0.1:8551") + deposit cache.") .takes_value(true) .requires("execution-jwt") ) @@ -439,6 +440,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .alias("jwt-secrets") .help("File path which contains the hex-encoded JWT secret for the \ execution endpoint provided in the --execution-endpoint flag.") + .requires("execution-endpoint") .takes_value(true) ) .arg( @@ -449,6 +451,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Used by the beacon node to communicate a unique identifier to execution nodes \ during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ Set to empty by default") + .requires("execution-jwt") .takes_value(true) ) .arg( @@ -459,16 +462,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Used by the beacon node to communicate a client version to execution nodes \ during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ Set to empty by default") + .requires("execution-jwt") .takes_value(true) ) .arg( Arg::with_name("suggested-fee-recipient") .long("suggested-fee-recipient") .value_name("SUGGESTED-FEE-RECIPIENT") - .help("Once the merge has happened, this address will receive transaction fees \ - collected from any blocks produced by this node. Defaults to a junk \ - address whilst the merge is in development stages. THE DEFAULT VALUE \ - WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") + .help("Emergency fallback fee recipient for use in case the validator client does \ + not have one configured. You should set this flag on the validator \ + client instead of (or in addition to) setting it here.") .requires("execution-endpoint") .takes_value(true) ) @@ -632,6 +635,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("slasher-backend") .long("slasher-backend") + .value_name("DATABASE") .help("Set the database backend to be used by the slasher.") .takes_value(true) .possible_values(slasher::DatabaseBackend::VARIANTS) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 6bbe1345d3..de7ced1331 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -22,6 +22,7 @@ validator client or the slasher**. | v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) | | v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | | v2.5.0 | Aug 2022 | v11 | yes | +| v3.0.0 | Aug 2022 | v11 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index e2d54ea0aa..9ac22a7612 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -2,7 +2,8 @@ This document provides detail for users who want to run a merge-ready Lighthouse node. -> If you are running a testnet node, this configuration is necessary _now_. +> The merge is occuring on mainnet in September. You _must_ have a merge-ready setup by September 6 +> 2022. ## Necessary Configuration @@ -17,20 +18,20 @@ the merge: receive transactions tips from blocks proposed by your validators. This is covered on the [Suggested fee recipient](./suggested-fee-recipient.md) page. -Additionally, you _must_ update Lighthouse to a merge-compatible release in the weeks before -the merge. Merge releases are available now for all testnets. +Additionally, you _must_ update Lighthouse to v3.0.0 (or later), and must update your execution +engine to a merge-ready version. ## When? You must configure your node to be merge-ready before the Bellatrix fork occurs on the network on which your node is operating. -* **Mainnet**: the Bellatrix fork epoch has not yet been announced. It's possible to set up a - merge-ready node now, but some execution engines will require additional configuration. Please see - the section on [Execution engine configuration](#execution-engine-configuration) below. +* **Mainnet**: the Bellatrix fork is scheduled for epoch 144896, September 6 2022 11:34 UTC. + You must ensure your node configuration is updated before then in order to continue following + the chain. We recommend updating your configuration now. -* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: you must have a merge-ready configuration - right now. +* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has already occurred. + You must have a merge-ready configuration right now. ## Connecting to an execution engine @@ -46,6 +47,11 @@ If you set up an execution engine with `--execution-endpoint` then you *must* pr using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse has authority to control the execution engine. +The execution engine connection must be **exclusive**, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please +see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not +supported. + ### Execution engine configuration Each execution engine has its own flags for configuring the engine API and JWT. Please consult @@ -59,9 +65,7 @@ Once you have configured your execution engine to open up the engine API (usuall should add the URL to your `lighthouse bn` flags with `--execution-endpoint <URL>`, as well as the path to the JWT secret with `--execution-jwt <FILE>`. -> NOTE: Geth v1.10.21 or earlier requires a manual TTD override to communicate with Lighthouse over -> the engine API on mainnet. We recommend waiting for a compatible Geth release before configuring -> Lighthouse-Geth on mainnet. +There are merge-ready releases of all compatible execution engines available now. ### Example @@ -138,6 +142,27 @@ be used for all such queries. Therefore we can say that where `--execution-endpo ## FAQ +### How do I know if my node is set up correctly? + +Lighthouse will log a message indicating that it is ready for the merge: + +``` +INFO Ready for the merge, current_difficulty: 10789363, terminal_total_difficulty: 10790000 +``` + +Once the merge has occurred you should see that Lighthouse remains in sync and marks blocks +as `verified` indicating that they have been processed successfully by the execution engine: + +``` +INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 +``` + +### Can I still use the `--staking` flag? + +Yes. The `--staking` flag is just an alias for `--http --eth1`. The `--eth1` flag is now superfluous +so `--staking` is equivalent to `--http`. You need either `--staking` or `--http` for the validator +client to be able to connect to the beacon node. + ### Can I use `http://localhost:8545` for the execution endpoint? Most execution nodes use port `8545` for the Ethereum JSON-RPC API. Unless custom configuration is @@ -145,9 +170,22 @@ used, an execution node _will not_ provide the necessary engine API on port `854 not attempt to use `http://localhost:8545` as your engine URL and should instead use `http://localhost:8551`. -### What about multiple execution endpoints? +### Can I share an execution node between multiple beacon nodes (many:1)? -Since an execution engine can only have one connected BN, the value of having multiple execution +It is **not** possible to connect more than one beacon node to the same execution engine. There must be a 1:1 relationship between beacon nodes and execution nodes. + +The beacon node controls the execution node via the engine API, telling it which block is the +current head of the chain. If multiple beacon nodes were to connect to a single execution node they +could set conflicting head blocks, leading to frequent re-orgs on the execution node. + +We imagine that in future there will be HTTP proxies available which allow users to nominate a +single controlling beacon node, while allowing consistent updates from other beacon nodes. + +### What about multiple execution endpoints (1:many)? + +It is **not** possible to connect one beacon node to more than one execution engine. There must be a 1:1 relationship between beacon nodes and execution nodes. + +Since an execution engine can only have one controlling BN, the value of having multiple execution engines connected to the same BN is very low. An execution engine cannot be shared between BNs to reduce costs. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index d862cf1a6c..bcd5878027 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -34,6 +34,10 @@ Assuming trustworthy nodes, the priority for the three methods is: 1. `--suggested-fee-recipient` provided to the VC. 1. `--suggested-fee-recipient` provided to the BN. +> **NOTE**: It is **not** recommended to _only_ set the fee recipient on the beacon node, as this results +> in sub-optimal block proposals. See [this issue](https://github.com/sigp/lighthouse/issues/3432) +> for details. + ### 1. Setting the fee recipient in the `validator_definitions.yml` Users can set the fee recipient in `validator_definitions.yml` with the `suggested_fee_recipient` @@ -66,6 +70,9 @@ validators where a `suggested_fee_recipient` is not loaded from another method. The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. +**This value should be considered an emergency fallback**. You should set the fee recipient in the +validator client in order for the execution node to be given adequate notice of block proposal. + ## Setting the fee recipient dynamically using the keymanager API When the [validator client API](api-vc.md) is enabled, the diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index be87083763..bd707f7a77 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -227,7 +227,7 @@ fn main() { Accepts a 256-bit decimal integer (not a hex value). \ This flag should only be used if the user has a clear understanding that \ the broad Ethereum community has elected to override the terminal difficulty. \ - Incorrect use of this flag will cause your node to experience a consensus + Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .takes_value(true) .global(true) @@ -239,7 +239,7 @@ fn main() { .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ This flag should only be used if the user has a clear understanding that \ the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus + Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-epoch-override") .takes_value(true) @@ -252,7 +252,7 @@ fn main() { .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ parameter. This flag should only be used if the user has a clear understanding \ that the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus + Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-override") .takes_value(true) diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index ceca31aa75..e034bd55ca 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -131,8 +131,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("suggested-fee-recipient") .long("suggested-fee-recipient") - .help("The fallback address provided to the BN if nothing suitable is found \ - in the validator definitions or fee recipient file.") + .help("Once the merge has happened, this address will receive transaction fees \ + from blocks proposed by this validator client. If a fee recipient is \ + configured in the validator definitions it takes priority over this value.") .value_name("FEE-RECIPIENT") .takes_value(true) ) From 8c69d57c2ce0d5f1a3cd44c215b2d52844043150 Mon Sep 17 00:00:00 2001 From: Divma <divma@protonmail.com> Date: Wed, 24 Aug 2022 23:34:56 +0000 Subject: [PATCH 151/184] Pause sync when EE is offline (#3428) ## Issue Addressed #3032 ## Proposed Changes Pause sync when ee is offline. Changes include three main parts: - Online/offline notification system - Pause sync - Resume sync #### Online/offline notification system - The engine state is now guarded behind a new struct `State` that ensures every change is correctly notified. Notifications are only sent if the state changes. The new `State` is behind a `RwLock` (as before) as the synchronization mechanism. - The actual notification channel is a [tokio::sync::watch](https://docs.rs/tokio/latest/tokio/sync/watch/index.html) which ensures only the last value is in the receiver channel. This way we don't need to worry about message order etc. - Sync waits for state changes concurrently with normal messages. #### Pause Sync Sync has four components, pausing is done differently in each: - **Block lookups**: Disabled while in this state. We drop current requests and don't search for new blocks. Block lookups are infrequent and I don't think it's worth the extra logic of keeping these and delaying processing. If we later see that this is required, we can add it. - **Parent lookups**: Disabled while in this state. We drop current requests and don't search for new parents. Parent lookups are even less frequent and I don't think it's worth the extra logic of keeping these and delaying processing. If we later see that this is required, we can add it. - **Range**: Chains don't send batches for processing to the beacon processor. This is easily done by guarding the channel to the beacon processor and giving it access only if the ee is responsive. I find this the simplest and most powerful approach since we don't need to deal with new sync states and chain segments that are added while the ee is offline will follow the same logic without needing to synchronize a shared state among those. Another advantage of passive pause vs active pause is that we can still keep track of active advertised chain segments so that on resume we don't need to re-evaluate all our peers. - **Backfill**: Not affected by ee states, we don't pause. #### Resume Sync - **Block lookups**: Enabled again. - **Parent lookups**: Enabled again. - **Range**: Active resume. Since the only real pause range does is not sending batches for processing, resume makes all chains that are holding read-for-processing batches send them. - **Backfill**: Not affected by ee states, no need to resume. ## Additional Info **QUESTION**: Originally I made this to notify and change on synced state, but @pawanjay176 on talks with @paulhauner concluded we only need to check online/offline states. The upcheck function mentions extra checks to have a very up to date sync status to aid the networking stack. However, the only need the networking stack would have is this one. I added a TODO to review if the extra check can be removed Next gen of #3094 Will work best with #3439 Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com> --- Cargo.lock | 1 + beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engines.rs | 135 +++++++-- beacon_node/execution_layer/src/lib.rs | 10 +- .../network/src/sync/backfill_sync/mod.rs | 43 +-- .../network/src/sync/block_lookups/mod.rs | 122 ++++---- .../src/sync/block_lookups/parent_lookup.rs | 23 +- .../network/src/sync/block_lookups/tests.rs | 9 +- beacon_node/network/src/sync/manager.rs | 275 +++++++++++------- .../network/src/sync/network_context.rs | 47 ++- .../network/src/sync/range_sync/chain.rs | 63 ++-- .../src/sync/range_sync/chain_collection.rs | 16 +- .../network/src/sync/range_sync/range.rs | 156 +++++++--- 14 files changed, 574 insertions(+), 328 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e17e2e2a4..5a2c4312b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2087,6 +2087,7 @@ dependencies = [ "task_executor", "tempfile", "tokio", + "tokio-stream", "tree_hash", "tree_hash_derive", "types", diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 481b1ae736..fc24a34bbb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -59,6 +59,7 @@ pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBl pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; +pub use execution_layer::EngineState; pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 3b401d4591..5c0e66ea44 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -43,4 +43,5 @@ fork_choice = { path = "../../consensus/fork_choice" } mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", tag = "v0.2.0"} ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus"} ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs"} +tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index eb188c61f8..339006c1ba 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -9,7 +9,8 @@ use slog::{debug, error, info, Logger}; use std::future::Future; use std::sync::Arc; use task_executor::TaskExecutor; -use tokio::sync::{Mutex, RwLock}; +use tokio::sync::{watch, Mutex, RwLock}; +use tokio_stream::wrappers::WatchStream; use types::{Address, ExecutionBlockHash, Hash256}; /// The number of payload IDs that will be stored for each `Engine`. @@ -18,14 +19,74 @@ use types::{Address, ExecutionBlockHash, Hash256}; const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; /// Stores the remembered state of a engine. -#[derive(Copy, Clone, PartialEq, Debug)] -enum EngineState { +#[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] +enum EngineStateInternal { Synced, + #[default] Offline, Syncing, AuthFailed, } +/// A subset of the engine state to inform other services if the engine is online or offline. +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub enum EngineState { + Online, + Offline, +} + +impl From<EngineStateInternal> for EngineState { + fn from(state: EngineStateInternal) -> Self { + match state { + EngineStateInternal::Synced | EngineStateInternal::Syncing => EngineState::Online, + EngineStateInternal::Offline | EngineStateInternal::AuthFailed => EngineState::Offline, + } + } +} + +/// Wrapper structure that ensures changes to the engine state are correctly reported to watchers. +struct State { + /// The actual engine state. + state: EngineStateInternal, + /// Notifier to watch the engine state. + notifier: watch::Sender<EngineState>, +} + +impl std::ops::Deref for State { + type Target = EngineStateInternal; + + fn deref(&self) -> &Self::Target { + &self.state + } +} + +impl Default for State { + fn default() -> Self { + let state = EngineStateInternal::default(); + let (notifier, _receiver) = watch::channel(state.into()); + State { state, notifier } + } +} + +impl State { + // Updates the state and notifies all watchers if the state has changed. + pub fn update(&mut self, new_state: EngineStateInternal) { + self.state = new_state; + self.notifier.send_if_modified(|last_state| { + let changed = *last_state != new_state.into(); // notify conditionally + *last_state = new_state.into(); // update the state unconditionally + changed + }); + } + + /// Gives access to a channel containing whether the last state is online. + /// + /// This can be called several times. + pub fn watch(&self) -> WatchStream<EngineState> { + self.notifier.subscribe().into() + } +} + #[derive(Copy, Clone, PartialEq, Debug)] pub struct ForkChoiceState { pub head_block_hash: ExecutionBlockHash, @@ -53,10 +114,10 @@ pub enum EngineError { pub struct Engine { pub api: HttpJsonRpc, payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>, - state: RwLock<EngineState>, - pub latest_forkchoice_state: RwLock<Option<ForkChoiceState>>, - pub executor: TaskExecutor, - pub log: Logger, + state: RwLock<State>, + latest_forkchoice_state: RwLock<Option<ForkChoiceState>>, + executor: TaskExecutor, + log: Logger, } impl Engine { @@ -65,13 +126,20 @@ impl Engine { Self { api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), - state: RwLock::new(EngineState::Offline), + state: Default::default(), latest_forkchoice_state: Default::default(), executor, log: log.clone(), } } + /// Gives access to a channel containing the last engine state. + /// + /// This can be called several times. + pub async fn watch_state(&self) -> WatchStream<EngineState> { + self.state.read().await.watch() + } + pub async fn get_payload_id( &self, head_block_hash: ExecutionBlockHash, @@ -165,17 +233,16 @@ impl Engine { /// Returns `true` if the engine has a "synced" status. pub async fn is_synced(&self) -> bool { - *self.state.read().await == EngineState::Synced + **self.state.read().await == EngineStateInternal::Synced } /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck(&self) { - let state: EngineState = match self.api.upcheck().await { + let state: EngineStateInternal = match self.api.upcheck().await { Ok(()) => { let mut state = self.state.write().await; - - if *state != EngineState::Synced { + if **state != EngineStateInternal::Synced { info!( self.log, "Execution engine online"; @@ -189,14 +256,13 @@ impl Engine { "Execution engine online"; ); } - - *state = EngineState::Synced; - *state + state.update(EngineStateInternal::Synced); + **state } Err(EngineApiError::IsSyncing) => { let mut state = self.state.write().await; - *state = EngineState::Syncing; - *state + state.update(EngineStateInternal::Syncing); + **state } Err(EngineApiError::Auth(err)) => { error!( @@ -206,8 +272,8 @@ impl Engine { ); let mut state = self.state.write().await; - *state = EngineState::AuthFailed; - *state + state.update(EngineStateInternal::AuthFailed); + **state } Err(e) => { error!( @@ -217,8 +283,8 @@ impl Engine { ); let mut state = self.state.write().await; - *state = EngineState::Offline; - *state + state.update(EngineStateInternal::Offline); + **state } }; @@ -244,12 +310,10 @@ impl Engine { Ok(result) => { // Take a clone *without* holding the read-lock since the `upcheck` function will // take a write-lock. - let state: EngineState = *self.state.read().await; + let state: EngineStateInternal = **self.state.read().await; - // If this request just returned successfully but we don't think this node is - // synced, check to see if it just became synced. This helps to ensure that the - // networking stack can get fast feedback about a synced engine. - if state != EngineState::Synced { + // Keep an up to date engine state. + if state != EngineStateInternal::Synced { // Spawn the upcheck in another task to avoid slowing down this request. let inner_self = self.clone(); self.executor.spawn( @@ -293,3 +357,22 @@ impl PayloadIdCacheKey { } } } + +#[cfg(test)] +mod tests { + use super::*; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_state_notifier() { + let mut state = State::default(); + let initial_state: EngineState = state.state.into(); + assert_eq!(initial_state, EngineState::Offline); + state.update(EngineStateInternal::Synced); + + // a watcher that arrives after the first update. + let mut watcher = state.watch(); + let new_state = watcher.next().await.expect("Last state is always present"); + assert_eq!(new_state, EngineState::Online); + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 778b2247be..3bdca82ad0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -10,8 +10,8 @@ use builder_client::BuilderHttpClient; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; -pub use engines::ForkChoiceState; use engines::{Engine, EngineError}; +pub use engines::{EngineState, ForkChoiceState}; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; @@ -31,6 +31,7 @@ use tokio::{ sync::{Mutex, MutexGuard, RwLock}, time::sleep, }; +use tokio_stream::wrappers::WatchStream; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, @@ -286,6 +287,13 @@ impl<T: EthSpec> ExecutionLayer<T> { self.inner.execution_blocks.lock().await } + /// Gives access to a channel containing if the last engine state is online or not. + /// + /// This can be called several times. + pub async fn get_responsiveness_watch(&self) -> WatchStream<EngineState> { + self.engine().watch_state().await + } + /// Note: this function returns a mutex guard, be careful to avoid deadlocks. async fn proposer_preparation_data( &self, diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 6767350ce3..d36bbbc79b 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -24,7 +24,6 @@ use std::collections::{ HashMap, HashSet, }; use std::sync::Arc; -use tokio::sync::mpsc; use types::{Epoch, EthSpec, SignedBeaconBlock}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of @@ -144,9 +143,6 @@ pub struct BackFillSync<T: BeaconChainTypes> { /// (i.e synced peers). network_globals: Arc<NetworkGlobals<T::EthSpec>>, - /// A multi-threaded, non-blocking processor for processing batches in the beacon chain. - beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, - /// A logger for backfill sync. log: slog::Logger, } @@ -155,7 +151,6 @@ impl<T: BeaconChainTypes> BackFillSync<T> { pub fn new( beacon_chain: Arc<BeaconChain<T>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>, - beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, log: slog::Logger, ) -> Self { // Determine if backfill is enabled or not. @@ -193,7 +188,6 @@ impl<T: BeaconChainTypes> BackFillSync<T> { participating_peers: HashSet::new(), restart_failed_sync: false, beacon_chain, - beacon_processor_send, log, }; @@ -216,7 +210,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn start( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, ) -> Result<SyncStart, BackFillError> { match self.state() { BackFillState::Syncing => {} // already syncing ignore. @@ -312,7 +306,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { pub fn peer_disconnected( &mut self, peer_id: &PeerId, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, ) -> Result<(), BackFillError> { if matches!( self.state(), @@ -355,7 +349,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn inject_error( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, peer_id: &PeerId, request_id: Id, @@ -392,7 +386,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn on_block_response( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, peer_id: &PeerId, request_id: Id, @@ -505,7 +499,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// The batch must exist and be ready for processing fn process_batch( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, ) -> Result<ProcessResult, BackFillError> { // Only process batches if this chain is Syncing, and only one at a time @@ -541,8 +535,8 @@ impl<T: BeaconChainTypes> BackFillSync<T> { let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id); self.current_processing_batch = Some(batch_id); - if let Err(e) = self - .beacon_processor_send + if let Err(e) = network + .processor_channel() .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) { crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", @@ -563,7 +557,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn on_batch_process_result( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, result: &BatchProcessResult, ) -> Result<ProcessResult, BackFillError> { @@ -704,7 +698,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// Processes the next ready batch. fn process_completed_batches( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, ) -> Result<ProcessResult, BackFillError> { // Only process batches if backfill is syncing and only process one batch at a time if self.state() != BackFillState::Syncing || self.current_processing_batch.is_some() { @@ -764,11 +758,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. - fn advance_chain( - &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, - validating_epoch: Epoch, - ) { + fn advance_chain(&mut self, network: &mut SyncNetworkContext<T>, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch >= self.current_start { return; @@ -863,7 +853,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// intended and can result in downvoting a peer. fn handle_invalid_batch( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, ) -> Result<(), BackFillError> { // The current batch could not be processed, indicating either the current or previous @@ -914,7 +904,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// Sends and registers the request of a batch awaiting download. fn retry_batch_download( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, ) -> Result<(), BackFillError> { let batch = match self.batches.get_mut(&batch_id) { @@ -958,7 +948,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// Requests the batch assigned to the given id from a given peer. fn send_batch( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, peer: PeerId, ) -> Result<(), BackFillError> { @@ -1011,10 +1001,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// When resuming a chain, this function searches for batches that need to be re-downloaded and /// transitions their state to redownload the batch. - fn resume_batches( - &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, - ) -> Result<(), BackFillError> { + fn resume_batches(&mut self, network: &mut SyncNetworkContext<T>) -> Result<(), BackFillError> { let batch_ids_to_retry = self .batches .iter() @@ -1040,7 +1027,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> { /// pool and left over batches until the batch buffer is reached or all peers are exhausted. fn request_batches( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, ) -> Result<(), BackFillError> { if !matches!(self.state(), BackFillState::Syncing) { return Ok(()); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 9f2a5fdce7..22d815121a 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -5,11 +5,10 @@ use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -use slog::{crit, debug, error, trace, warn, Logger}; +use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; -use tokio::sync::mpsc; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; @@ -36,7 +35,7 @@ const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; pub(crate) struct BlockLookups<T: BeaconChainTypes> { /// A collection of parent block lookups. - parent_queue: SmallVec<[ParentLookup<T::EthSpec>; 3]>, + parent_queue: SmallVec<[ParentLookup<T>; 3]>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache<Hash256>, @@ -47,22 +46,18 @@ pub(crate) struct BlockLookups<T: BeaconChainTypes> { /// The flag allows us to determine if the peer returned data or sent us nothing. single_block_lookups: FnvHashMap<Id, SingleBlockRequest<SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS>>, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender<WorkEvent<T>>, - /// The logger for the import manager. log: Logger, } impl<T: BeaconChainTypes> BlockLookups<T> { - pub fn new(beacon_processor_send: mpsc::Sender<WorkEvent<T>>, log: Logger) -> Self { + pub fn new(log: Logger) -> Self { Self { parent_queue: Default::default(), failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), - beacon_processor_send, log, } } @@ -71,12 +66,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. - pub fn search_block( - &mut self, - hash: Hash256, - peer_id: PeerId, - cx: &mut SyncNetworkContext<T::EthSpec>, - ) { + pub fn search_block(&mut self, hash: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext<T>) { // Do not re-request a block that is already being requested if self .single_block_lookups @@ -113,7 +103,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { &mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>, peer_id: PeerId, - cx: &mut SyncNetworkContext<T::EthSpec>, + cx: &mut SyncNetworkContext<T>, ) { let block_root = block.canonical_root(); let parent_root = block.parent_root(); @@ -147,18 +137,16 @@ impl<T: BeaconChainTypes> BlockLookups<T> { peer_id: PeerId, block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>, seen_timestamp: Duration, - cx: &mut SyncNetworkContext<T::EthSpec>, + cx: &mut SyncNetworkContext<T>, ) { let mut request = match self.single_block_lookups.entry(id) { Entry::Occupied(req) => req, Entry::Vacant(_) => { if block.is_some() { - crit!( + debug!( self.log, "Block returned for single block lookup not present" ); - #[cfg(debug_assertions)] - panic!("block returned for single block lookup not present"); } return; } @@ -172,6 +160,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { block, seen_timestamp, BlockProcessType::SingleBlock { id }, + cx, ) .is_err() { @@ -212,7 +201,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { peer_id: PeerId, block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>, seen_timestamp: Duration, - cx: &mut SyncNetworkContext<T::EthSpec>, + cx: &mut SyncNetworkContext<T>, ) { let mut parent_lookup = if let Some(pos) = self .parent_queue @@ -236,6 +225,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { block, seen_timestamp, BlockProcessType::ParentLookup { chain_hash }, + cx, ) .is_ok() { @@ -289,7 +279,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { /* Error responses */ #[allow(clippy::needless_collect)] // false positive - pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext<T::EthSpec>) { + pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext<T>) { /* Check disconnection for single block lookups */ // better written after https://github.com/rust-lang/rust/issues/59618 let remove_retry_ids: Vec<Id> = self @@ -345,7 +335,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { &mut self, id: Id, peer_id: PeerId, - cx: &mut SyncNetworkContext<T::EthSpec>, + cx: &mut SyncNetworkContext<T>, ) { if let Some(pos) = self .parent_queue @@ -365,7 +355,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { ); } - pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext<T::EthSpec>) { + pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext<T>) { if let Some(mut request) = self.single_block_lookups.remove(&id) { request.register_failure_downloading(); trace!(self.log, "Single block lookup failed"; "block" => %request.hash); @@ -388,15 +378,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> { &mut self, id: Id, result: BlockProcessResult<T::EthSpec>, - cx: &mut SyncNetworkContext<T::EthSpec>, + cx: &mut SyncNetworkContext<T>, ) { let mut req = match self.single_block_lookups.remove(&id) { Some(req) => req, None => { - #[cfg(debug_assertions)] - panic!("block processed for single block lookup not present"); - #[cfg(not(debug_assertions))] - return crit!( + return debug!( self.log, "Block processed for single block lookup not present" ); @@ -476,7 +463,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { &mut self, chain_hash: Hash256, result: BlockProcessResult<T::EthSpec>, - cx: &mut SyncNetworkContext<T::EthSpec>, + cx: &mut SyncNetworkContext<T>, ) { let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self .parent_queue @@ -489,13 +476,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { }) { (self.parent_queue.remove(pos), peer) } else { - #[cfg(debug_assertions)] - panic!( - "Process response for a parent lookup request that was not found. Chain_hash: {}", - chain_hash - ); - #[cfg(not(debug_assertions))] - return crit!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; match &result { @@ -524,14 +505,22 @@ impl<T: BeaconChainTypes> BlockLookups<T> { } BlockProcessResult::Ok | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + // Check if the beacon processor is available + let beacon_processor_send = match cx.processor_channel_if_enabled() { + Some(channel) => channel, + None => { + return trace!( + self.log, + "Dropping parent chain segment that was ready for processing."; + parent_lookup + ); + } + }; let chain_hash = parent_lookup.chain_hash(); let blocks = parent_lookup.chain_blocks(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); - match self - .beacon_processor_send - .try_send(WorkEvent::chain_segment(process_id, blocks)) - { + match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) { Ok(_) => { self.parent_queue.push(parent_lookup); } @@ -595,7 +584,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> { &mut self, chain_hash: Hash256, result: BatchProcessResult, - cx: &mut SyncNetworkContext<T::EthSpec>, + cx: &mut SyncNetworkContext<T>, ) { let parent_lookup = if let Some(pos) = self .parent_queue @@ -604,12 +593,6 @@ impl<T: BeaconChainTypes> BlockLookups<T> { { self.parent_queue.remove(pos) } else { - #[cfg(debug_assertions)] - panic!( - "Chain process response for a parent lookup request that was not found. Chain_hash: {}", - chain_hash - ); - #[cfg(not(debug_assertions))] return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; @@ -645,25 +628,34 @@ impl<T: BeaconChainTypes> BlockLookups<T> { block: Arc<SignedBeaconBlock<T::EthSpec>>, duration: Duration, process_type: BlockProcessType, + cx: &mut SyncNetworkContext<T>, ) -> Result<(), ()> { - trace!(self.log, "Sending block for processing"; "block" => %block.canonical_root(), "process" => ?process_type); - let event = WorkEvent::rpc_beacon_block(block, duration, process_type); - if let Err(e) = self.beacon_processor_send.try_send(event) { - error!( - self.log, - "Failed to send sync block to processor"; - "error" => ?e - ); - return Err(()); + match cx.processor_channel_if_enabled() { + Some(beacon_processor_send) => { + trace!(self.log, "Sending block for processing"; "block" => %block.canonical_root(), "process" => ?process_type); + let event = WorkEvent::rpc_beacon_block(block, duration, process_type); + if let Err(e) = beacon_processor_send.try_send(event) { + error!( + self.log, + "Failed to send sync block to processor"; + "error" => ?e + ); + Err(()) + } else { + Ok(()) + } + } + None => { + trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block.canonical_root()); + Err(()) + } } - - Ok(()) } fn request_parent( &mut self, - mut parent_lookup: ParentLookup<T::EthSpec>, - cx: &mut SyncNetworkContext<T::EthSpec>, + mut parent_lookup: ParentLookup<T>, + cx: &mut SyncNetworkContext<T>, ) { match parent_lookup.request_parent(cx) { Err(e) => { @@ -710,4 +702,14 @@ impl<T: BeaconChainTypes> BlockLookups<T> { self.parent_queue.len() as i64, ); } + + /// Drops all the single block requests and returns how many requests were dropped. + pub fn drop_single_block_requests(&mut self) -> usize { + self.single_block_lookups.drain().len() + } + + /// Drops all the parent chain requests and returns how many requests were dropped. + pub fn drop_parent_chain_requests(&mut self) -> usize { + self.parent_queue.drain(..).len() + } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index bf5a1b259b..295d9cc94b 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,6 +1,7 @@ +use beacon_chain::BeaconChainTypes; use lighthouse_network::PeerId; use std::sync::Arc; -use store::{EthSpec, Hash256, SignedBeaconBlock}; +use store::{Hash256, SignedBeaconBlock}; use strum::IntoStaticStr; use crate::sync::{ @@ -18,11 +19,11 @@ pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; /// Maintains a sequential list of parents to lookup and the lookup's current state. -pub(crate) struct ParentLookup<T: EthSpec> { +pub(crate) struct ParentLookup<T: BeaconChainTypes> { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec<Arc<SignedBeaconBlock<T>>>, + downloaded_blocks: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>, /// Request of the last parent. current_parent_request: SingleBlockRequest<PARENT_FAIL_TOLERANCE>, /// Id of the last parent request. @@ -50,14 +51,14 @@ pub enum RequestError { NoPeers, } -impl<T: EthSpec> ParentLookup<T> { - pub fn contains_block(&self, block: &SignedBeaconBlock<T>) -> bool { +impl<T: BeaconChainTypes> ParentLookup<T> { + pub fn contains_block(&self, block: &SignedBeaconBlock<T::EthSpec>) -> bool { self.downloaded_blocks .iter() .any(|d_block| d_block.as_ref() == block) } - pub fn new(block: Arc<SignedBeaconBlock<T>>, peer_id: PeerId) -> Self { + pub fn new(block: Arc<SignedBeaconBlock<T::EthSpec>>, peer_id: PeerId) -> Self { let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); Self { @@ -92,7 +93,7 @@ impl<T: EthSpec> ParentLookup<T> { self.current_parent_request.check_peer_disconnected(peer_id) } - pub fn add_block(&mut self, block: Arc<SignedBeaconBlock<T>>) { + pub fn add_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) { let next_parent = block.parent_root(); self.downloaded_blocks.push(block); self.current_parent_request.hash = next_parent; @@ -119,7 +120,7 @@ impl<T: EthSpec> ParentLookup<T> { self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec<Arc<SignedBeaconBlock<T>>> { + pub fn chain_blocks(&mut self) -> Vec<Arc<SignedBeaconBlock<T::EthSpec>>> { std::mem::take(&mut self.downloaded_blocks) } @@ -127,9 +128,9 @@ impl<T: EthSpec> ParentLookup<T> { /// the processing result of the block. pub fn verify_block( &mut self, - block: Option<Arc<SignedBeaconBlock<T>>>, + block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>, failed_chains: &mut lru_cache::LRUTimeCache<Hash256>, - ) -> Result<Option<Arc<SignedBeaconBlock<T>>>, VerifyError> { + ) -> Result<Option<Arc<SignedBeaconBlock<T::EthSpec>>>, VerifyError> { let block = self.current_parent_request.verify_block(block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should @@ -189,7 +190,7 @@ impl From<super::single_block_lookup::LookupRequestError> for RequestError { } } -impl<T: EthSpec> slog::KV for ParentLookup<T> { +impl<T: BeaconChainTypes> slog::KV for ParentLookup<T> { fn serialize( &self, record: &slog::Record, diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 2f2720fd1e..ead15e23a5 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -12,6 +12,7 @@ use lighthouse_network::{NetworkGlobals, Request}; use slog::{Drain, Level}; use slot_clock::SystemTimeSlotClock; use store::MemoryStore; +use tokio::sync::mpsc; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::MinimalEthSpec as E; @@ -26,7 +27,7 @@ struct TestRig { const D: Duration = Duration::new(0, 0); impl TestRig { - fn test_setup(log_level: Option<Level>) -> (BlockLookups<T>, SyncNetworkContext<E>, Self) { + fn test_setup(log_level: Option<Level>) -> (BlockLookups<T>, SyncNetworkContext<T>, Self) { let log = { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); @@ -47,15 +48,13 @@ impl TestRig { network_rx, rng, }; - let bl = BlockLookups::new( - beacon_processor_tx, - log.new(slog::o!("component" => "block_lookups")), - ); + let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); let cx = { let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); SyncNetworkContext::new( network_tx, globals, + beacon_processor_tx, log.new(slog::o!("component" => "network_context")), ) }; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 64755300c3..6230347977 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -41,7 +41,8 @@ use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; +use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -165,7 +166,7 @@ pub struct SyncManager<T: BeaconChainTypes> { input_channel: mpsc::UnboundedReceiver<SyncMessage<T::EthSpec>>, /// A network context to contact the network service. - network: SyncNetworkContext<T::EthSpec>, + network: SyncNetworkContext<T>, /// The object handling long-range batch load-balanced syncing. range_sync: RangeSync<T>, @@ -202,19 +203,15 @@ pub fn spawn<T: BeaconChainTypes>( chain: beacon_chain.clone(), network_globals: network_globals.clone(), input_channel: sync_recv, - network: SyncNetworkContext::new(network_send, network_globals.clone(), log.clone()), - range_sync: RangeSync::new( - beacon_chain.clone(), - beacon_processor_send.clone(), + network: SyncNetworkContext::new( + network_send, + network_globals.clone(), + beacon_processor_send, log.clone(), ), - backfill_sync: BackFillSync::new( - beacon_chain, - network_globals, - beacon_processor_send.clone(), - log.clone(), - ), - block_lookups: BlockLookups::new(beacon_processor_send, log.clone()), + range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), + backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), + block_lookups: BlockLookups::new(log.clone()), log: log.clone(), }; @@ -468,100 +465,178 @@ impl<T: BeaconChainTypes> SyncManager<T> { /// The main driving future for the sync manager. async fn main(&mut self) { + let check_ee = self.chain.execution_layer.is_some(); + let mut check_ee_stream = { + // some magic to have an instance implementing stream even if there is no execution layer + let ee_responsiveness_watch: futures::future::OptionFuture<_> = self + .chain + .execution_layer + .as_ref() + .map(|el| el.get_responsiveness_watch()) + .into(); + futures::stream::iter(ee_responsiveness_watch.await).flatten() + }; + // process any inbound messages loop { - if let Some(sync_message) = self.input_channel.recv().await { - match sync_message { - SyncMessage::AddPeer(peer_id, info) => { - self.add_peer(peer_id, info); - } - SyncMessage::RpcBlock { - request_id, - peer_id, - beacon_block, - seen_timestamp, - } => { - self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); - } - SyncMessage::UnknownBlock(peer_id, block) => { - // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore - if !self.network_globals.sync_state.read().is_synced() { - let head_slot = self.chain.canonical_head.cached_head().head_slot(); - let unknown_block_slot = block.slot(); + tokio::select! { + Some(sync_message) = self.input_channel.recv() => { + self.handle_message(sync_message); + }, + Some(engine_state) = check_ee_stream.next(), if check_ee => { + self.handle_new_execution_engine_state(engine_state); + } + } + } + } - // if the block is far in the future, ignore it. If its within the slot tolerance of - // our current head, regardless of the syncing state, fetch it. - if (head_slot >= unknown_block_slot - && head_slot.sub(unknown_block_slot).as_usize() - > SLOT_IMPORT_TOLERANCE) - || (head_slot < unknown_block_slot - && unknown_block_slot.sub(head_slot).as_usize() - > SLOT_IMPORT_TOLERANCE) - { - continue; - } - } - if self.network_globals.peers.read().is_connected(&peer_id) { - self.block_lookups - .search_parent(block, peer_id, &mut self.network); - } + fn handle_message(&mut self, sync_message: SyncMessage<T::EthSpec>) { + match sync_message { + SyncMessage::AddPeer(peer_id, info) => { + self.add_peer(peer_id, info); + } + SyncMessage::RpcBlock { + request_id, + peer_id, + beacon_block, + seen_timestamp, + } => { + self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); + } + SyncMessage::UnknownBlock(peer_id, block) => { + // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore + if !self.network_globals.sync_state.read().is_synced() { + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let unknown_block_slot = block.slot(); + + // if the block is far in the future, ignore it. If its within the slot tolerance of + // our current head, regardless of the syncing state, fetch it. + if (head_slot >= unknown_block_slot + && head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + || (head_slot < unknown_block_slot + && unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + { + return; } - SyncMessage::UnknownBlockHash(peer_id, block_hash) => { - // If we are not synced, ignore this block. - if self.network_globals.sync_state.read().is_synced() - && self.network_globals.peers.read().is_connected(&peer_id) - { - self.block_lookups - .search_block(block_hash, peer_id, &mut self.network); - } - } - SyncMessage::Disconnect(peer_id) => { - self.peer_disconnect(&peer_id); - } - SyncMessage::RpcError { - peer_id, - request_id, - } => self.inject_error(peer_id, request_id), - SyncMessage::BlockProcessed { - process_type, + } + if self.network_globals.peers.read().is_connected(&peer_id) + && self.network.is_execution_engine_online() + { + self.block_lookups + .search_parent(block, peer_id, &mut self.network); + } + } + SyncMessage::UnknownBlockHash(peer_id, block_hash) => { + // If we are not synced, ignore this block. + if self.network_globals.sync_state.read().is_synced() + && self.network_globals.peers.read().is_connected(&peer_id) + && self.network.is_execution_engine_online() + { + self.block_lookups + .search_block(block_hash, peer_id, &mut self.network); + } + } + SyncMessage::Disconnect(peer_id) => { + self.peer_disconnect(&peer_id); + } + SyncMessage::RpcError { + peer_id, + request_id, + } => self.inject_error(peer_id, request_id), + SyncMessage::BlockProcessed { + process_type, + result, + } => match process_type { + BlockProcessType::SingleBlock { id } => { + self.block_lookups + .single_block_processed(id, result, &mut self.network) + } + BlockProcessType::ParentLookup { chain_hash } => self + .block_lookups + .parent_block_processed(chain_hash, result, &mut self.network), + }, + SyncMessage::BatchProcessed { sync_type, result } => match sync_type { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { + self.range_sync.handle_block_process_result( + &mut self.network, + chain_id, + epoch, result, - } => match process_type { - BlockProcessType::SingleBlock { id } => self - .block_lookups - .single_block_processed(id, result, &mut self.network), - BlockProcessType::ParentLookup { chain_hash } => self - .block_lookups - .parent_block_processed(chain_hash, result, &mut self.network), - }, - SyncMessage::BatchProcessed { sync_type, result } => match sync_type { - ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { - self.range_sync.handle_block_process_result( - &mut self.network, - chain_id, - epoch, - result, - ); + ); + self.update_sync_state(); + } + ChainSegmentProcessId::BackSyncBatchId(epoch) => { + match self.backfill_sync.on_batch_process_result( + &mut self.network, + epoch, + &result, + ) { + Ok(ProcessResult::Successful) => {} + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Err(error) => { + error!(self.log, "Backfill sync failed"; "error" => ?error); + // Update the global status self.update_sync_state(); } - ChainSegmentProcessId::BackSyncBatchId(epoch) => { - match self.backfill_sync.on_batch_process_result( - &mut self.network, - epoch, - &result, - ) { - Ok(ProcessResult::Successful) => {} - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Err(error) => { - error!(self.log, "Backfill sync failed"; "error" => ?error); - // Update the global status - self.update_sync_state(); - } - } - } - ChainSegmentProcessId::ParentLookup(chain_hash) => self - .block_lookups - .parent_chain_processed(chain_hash, result, &mut self.network), - }, + } + } + ChainSegmentProcessId::ParentLookup(chain_hash) => self + .block_lookups + .parent_chain_processed(chain_hash, result, &mut self.network), + }, + } + } + + fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) { + self.network.update_execution_engine_state(engine_state); + + match engine_state { + EngineState::Online => { + // Resume sync components. + + // - Block lookups: + // We start searching for blocks again. This is done by updating the stored ee online + // state. No further action required. + + // - Parent lookups: + // We start searching for parents again. This is done by updating the stored ee + // online state. No further action required. + + // - Range: + // Actively resume. + self.range_sync.resume(&mut self.network); + + // - Backfill: + // Not affected by ee states, nothing to do. + } + + EngineState::Offline => { + // Pause sync components. + + // - Block lookups: + // Disabled while in this state. We drop current requests and don't search for new + // blocks. + let dropped_single_blocks_requests = + self.block_lookups.drop_single_block_requests(); + + // - Parent lookups: + // Disabled while in this state. We drop current requests and don't search for new + // blocks. + let dropped_parent_chain_requests = self.block_lookups.drop_parent_chain_requests(); + + // - Range: + // We still send found peers to range so that it can keep track of potential chains + // with respect to our current peers. Range will stop processing batches in the + // meantime. No further action from the manager is required for this. + + // - Backfill: Not affected by ee states, nothing to do. + + // Some logs. + if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { + debug!(self.log, "Execution engine not online, dropping active requests."; + "dropped_single_blocks_requests" => dropped_single_blocks_requests, + "dropped_parent_chain_requests" => dropped_parent_chain_requests, + ); } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index ffbd1a64da..45ade7034c 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -3,24 +3,25 @@ use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ChainId}; +use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; +use beacon_chain::{BeaconChainTypes, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; use std::sync::Arc; use tokio::sync::mpsc; -use types::EthSpec; /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. -pub struct SyncNetworkContext<T: EthSpec> { +pub struct SyncNetworkContext<T: BeaconChainTypes> { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender<NetworkMessage<T>>, + network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, /// Access to the network global vars. - network_globals: Arc<NetworkGlobals<T>>, + network_globals: Arc<NetworkGlobals<T::EthSpec>>, /// A sequential ID for all RPC requests. request_id: Id, @@ -28,24 +29,35 @@ pub struct SyncNetworkContext<T: EthSpec> { /// BlocksByRange requests made by the range syncing algorithm. range_requests: FnvHashMap<Id, (ChainId, BatchId)>, + /// BlocksByRange requests made by backfill syncing. backfill_requests: FnvHashMap<Id, BatchId>, + /// Whether the ee is online. If it's not, we don't allow access to the + /// `beacon_processor_send`. + execution_engine_state: EngineState, + + /// Channel to send work to the beacon processor. + beacon_processor_send: mpsc::Sender<WorkEvent<T>>, + /// Logger for the `SyncNetworkContext`. log: slog::Logger, } -impl<T: EthSpec> SyncNetworkContext<T> { +impl<T: BeaconChainTypes> SyncNetworkContext<T> { pub fn new( - network_send: mpsc::UnboundedSender<NetworkMessage<T>>, - network_globals: Arc<NetworkGlobals<T>>, + network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, + network_globals: Arc<NetworkGlobals<T::EthSpec>>, + beacon_processor_send: mpsc::Sender<WorkEvent<T>>, log: slog::Logger, ) -> Self { Self { network_send, + execution_engine_state: EngineState::Online, // always assume `Online` at the start network_globals, request_id: 1, range_requests: FnvHashMap::default(), backfill_requests: FnvHashMap::default(), + beacon_processor_send, log, } } @@ -211,6 +223,16 @@ impl<T: EthSpec> SyncNetworkContext<T> { Ok(id) } + pub fn is_execution_engine_online(&self) -> bool { + self.execution_engine_state == EngineState::Online + } + + pub fn update_execution_engine_state(&mut self, engine_state: EngineState) { + debug!(self.log, "Sync's view on execution engine state updated"; + "past_state" => ?self.execution_engine_state, "new_state" => ?engine_state); + self.execution_engine_state = engine_state; + } + /// Terminates the connection with the peer and bans them. pub fn goodbye_peer(&mut self, peer_id: PeerId, reason: GoodbyeReason) { self.network_send @@ -249,13 +271,22 @@ impl<T: EthSpec> SyncNetworkContext<T> { } /// Sends an arbitrary network message. - fn send_network_msg(&mut self, msg: NetworkMessage<T>) -> Result<(), &'static str> { + fn send_network_msg(&mut self, msg: NetworkMessage<T::EthSpec>) -> Result<(), &'static str> { self.network_send.send(msg).map_err(|_| { debug!(self.log, "Could not send message to the network service"); "Network channel send Failed" }) } + pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender<WorkEvent<T>>> { + self.is_execution_engine_online() + .then_some(&self.beacon_processor_send) + } + + pub fn processor_channel(&self) -> &mpsc::Sender<WorkEvent<T>> { + &self.beacon_processor_send + } + fn next_id(&mut self) -> Id { let id = self.request_id; self.request_id += 1; diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index a54105f5cb..4226b600f5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -11,7 +11,6 @@ use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; use std::sync::Arc; -use tokio::sync::mpsc::Sender; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of @@ -102,9 +101,6 @@ pub struct SyncingChain<T: BeaconChainTypes> { /// Batches validated by this chain. validated_batches: u64, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: Sender<BeaconWorkEvent<T>>, - is_finalized_segment: bool, /// The chain's log. @@ -132,7 +128,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> { target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, - beacon_processor_send: Sender<BeaconWorkEvent<T>>, is_finalized_segment: bool, log: &slog::Logger, ) -> Self { @@ -155,7 +150,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> { state: ChainSyncingState::Stopped, current_processing_batch: None, validated_batches: 0, - beacon_processor_send, is_finalized_segment, log: log.new(o!("chain" => id)), } @@ -186,7 +180,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { pub fn remove_peer( &mut self, peer_id: &PeerId, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, ) -> ProcessingResult { if let Some(batch_ids) = self.peers.remove(peer_id) { // fail the batches @@ -227,7 +221,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// If the block correctly completes the batch it will be processed if possible. pub fn on_block_response( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, peer_id: &PeerId, request_id: Id, @@ -296,7 +290,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// The batch must exist and be ready for processing fn process_batch( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, ) -> ProcessingResult { // Only process batches if this chain is Syncing, and only one at a time @@ -304,6 +298,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> { return Ok(KeepChain); } + let beacon_processor_send = match network.processor_channel_if_enabled() { + Some(channel) => channel, + None => return Ok(KeepChain), + }; + let batch = match self.batches.get_mut(&batch_id) { Some(batch) => batch, None => { @@ -327,9 +326,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> { let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized); self.current_processing_batch = Some(batch_id); - if let Err(e) = self - .beacon_processor_send - .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) + if let Err(e) = + beacon_processor_send.try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) { crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); @@ -346,7 +344,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// Processes the next ready batch, prioritizing optimistic batches over the processing target. fn process_completed_batches( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, ) -> ProcessingResult { // Only process batches if this chain is Syncing and only process one batch at a time if self.state != ChainSyncingState::Syncing || self.current_processing_batch.is_some() { @@ -447,7 +445,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// of the batch processor. pub fn on_batch_process_result( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, result: &BatchProcessResult, ) -> ProcessingResult { @@ -580,7 +578,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { fn reject_optimistic_batch( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, redownload: bool, reason: &str, ) -> ProcessingResult { @@ -611,11 +609,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. - fn advance_chain( - &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, - validating_epoch: Epoch, - ) { + fn advance_chain(&mut self, network: &mut SyncNetworkContext<T>, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch <= self.start_epoch { return; @@ -719,7 +713,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// intended and can result in downvoting a peer. fn handle_invalid_batch( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, ) -> ProcessingResult { // The current batch could not be processed, indicating either the current or previous @@ -778,7 +772,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// This could be new chain, or an old chain that is being resumed. pub fn start_syncing( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, local_finalized_epoch: Epoch, optimistic_start_epoch: Epoch, ) -> ProcessingResult { @@ -816,7 +810,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// If the chain is active, this starts requesting batches from this peer. pub fn add_peer( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, peer_id: PeerId, ) -> ProcessingResult { // add the peer without overwriting its active requests @@ -833,7 +827,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// If the batch exists it is re-requested. pub fn inject_error( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, peer_id: &PeerId, request_id: Id, @@ -865,7 +859,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// Sends and registers the request of a batch awaiting download. pub fn retry_batch_download( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, ) -> ProcessingResult { let batch = match self.batches.get_mut(&batch_id) { @@ -898,7 +892,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> { /// Requests the batch assigned to the given id from a given peer. pub fn send_batch( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, batch_id: BatchId, peer: PeerId, ) -> ProcessingResult { @@ -967,12 +961,21 @@ impl<T: BeaconChainTypes> SyncingChain<T> { } } + /// Kickstarts the chain by sending for processing batches that are ready and requesting more + /// batches if needed. + pub fn resume( + &mut self, + network: &mut SyncNetworkContext<T>, + ) -> Result<KeepChain, RemoveChain> { + // Request more batches if needed. + self.request_batches(network)?; + // If there is any batch ready for processing, send it. + self.process_completed_batches(network) + } + /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. - fn request_batches( - &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, - ) -> ProcessingResult { + fn request_batches(&mut self, network: &mut SyncNetworkContext<T>) -> ProcessingResult { if !matches!(self.state, ChainSyncingState::Syncing) { return Ok(KeepChain); } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index e76adff3af..37a3f13e73 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -6,7 +6,6 @@ use super::block_storage::BlockStorage; use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; -use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::BeaconChainTypes; @@ -18,7 +17,6 @@ use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::Arc; -use tokio::sync::mpsc; use types::EthSpec; use types::{Epoch, Hash256, Slot}; @@ -193,10 +191,9 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { /// do so. pub fn update( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, local: &SyncInfo, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, - beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, ) { // Remove any outdated finalized/head chains self.purge_outdated_chains(local, awaiting_head_peers); @@ -212,7 +209,6 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { local.finalized_epoch, local_head_epoch, awaiting_head_peers, - beacon_processor_send, ); } } @@ -257,7 +253,7 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { /// or not. fn update_finalized_chains( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, local_epoch: Epoch, local_head_epoch: Epoch, ) { @@ -326,11 +322,10 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { /// Start syncing any head chains if required. fn update_head_chains( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, local_epoch: Epoch, local_head_epoch: Epoch, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, - beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, ) { // Include the awaiting head peers for (peer_id, peer_sync_info) in awaiting_head_peers.drain() { @@ -341,7 +336,6 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { peer_sync_info.head_slot, peer_id, RangeSyncType::Head, - beacon_processor_send, network, ); } @@ -468,8 +462,7 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { target_head_slot: Slot, peer: PeerId, sync_type: RangeSyncType, - beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, ) { let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot); let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type { @@ -500,7 +493,6 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> { target_head_slot, target_head_root, peer, - beacon_processor_send.clone(), is_finalized, &self.log, ); diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 4b29d31295..2531454387 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -43,7 +43,6 @@ use super::block_storage::BlockStorage; use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain}; use super::chain_collection::ChainCollection; use super::sync_type::RangeSyncType; -use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::status::ToStatusMessage; use crate::sync::manager::Id; use crate::sync::network_context::SyncNetworkContext; @@ -56,7 +55,6 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use tokio::sync::mpsc; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// For how long we store failed finalized chains to prevent retries. @@ -76,8 +74,6 @@ pub struct RangeSync<T: BeaconChainTypes, C = BeaconChain<T>> { chains: ChainCollection<T, C>, /// Chains that have failed and are stored to prevent being retried. failed_chains: LRUTimeCache<Hash256>, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, /// The syncing logger. log: slog::Logger, } @@ -87,11 +83,7 @@ where C: BlockStorage + ToStatusMessage, T: BeaconChainTypes, { - pub fn new( - beacon_chain: Arc<C>, - beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, - log: slog::Logger, - ) -> Self { + pub fn new(beacon_chain: Arc<C>, log: slog::Logger) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, log.clone()), @@ -99,7 +91,6 @@ where FAILED_CHAINS_EXPIRY_SECONDS, )), awaiting_head_peers: HashMap::new(), - beacon_processor_send, log, } } @@ -117,7 +108,7 @@ where /// prioritised by peer-pool size. pub fn add_peer( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, local_info: SyncInfo, peer_id: PeerId, remote_info: SyncInfo, @@ -159,16 +150,11 @@ where remote_finalized_slot, peer_id, RangeSyncType::Finalized, - &self.beacon_processor_send, network, ); - self.chains.update( - network, - &local_info, - &mut self.awaiting_head_peers, - &self.beacon_processor_send, - ); + self.chains + .update(network, &local_info, &mut self.awaiting_head_peers); } RangeSyncType::Head => { // This peer requires a head chain sync @@ -197,15 +183,10 @@ where remote_info.head_slot, peer_id, RangeSyncType::Head, - &self.beacon_processor_send, network, ); - self.chains.update( - network, - &local_info, - &mut self.awaiting_head_peers, - &self.beacon_processor_send, - ); + self.chains + .update(network, &local_info, &mut self.awaiting_head_peers); } } } @@ -216,7 +197,7 @@ where /// This request could complete a chain or simply add to its progress. pub fn blocks_by_range_response( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, peer_id: PeerId, chain_id: ChainId, batch_id: BatchId, @@ -246,7 +227,7 @@ where pub fn handle_block_process_result( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, chain_id: ChainId, batch_id: Epoch, result: BatchProcessResult, @@ -276,11 +257,7 @@ where /// A peer has disconnected. This removes the peer from any ongoing chains and mappings. A /// disconnected peer could remove a chain - pub fn peer_disconnect( - &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, - peer_id: &PeerId, - ) { + pub fn peer_disconnect(&mut self, network: &mut SyncNetworkContext<T>, peer_id: &PeerId) { // if the peer is in the awaiting head mapping, remove it self.awaiting_head_peers.remove(peer_id); @@ -292,7 +269,7 @@ where /// which pool the peer is in. The chain may also have a batch or batches awaiting /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain. - fn remove_peer(&mut self, network: &mut SyncNetworkContext<T::EthSpec>, peer_id: &PeerId) { + fn remove_peer(&mut self, network: &mut SyncNetworkContext<T>, peer_id: &PeerId) { for (removed_chain, sync_type, remove_reason) in self .chains .call_all(|chain| chain.remove_peer(peer_id, network)) @@ -304,8 +281,6 @@ where network, "peer removed", ); - - // update the state of the collection } } @@ -315,7 +290,7 @@ where /// been too many failed attempts for the batch, remove the chain. pub fn inject_error( &mut self, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, peer_id: PeerId, batch_id: BatchId, chain_id: ChainId, @@ -347,7 +322,7 @@ where chain: SyncingChain<T>, sync_type: RangeSyncType, remove_reason: RemoveChain, - network: &mut SyncNetworkContext<T::EthSpec>, + network: &mut SyncNetworkContext<T>, op: &'static str, ) { if remove_reason.is_critical() { @@ -374,12 +349,23 @@ where }; // update the state of the collection - self.chains.update( - network, - &local, - &mut self.awaiting_head_peers, - &self.beacon_processor_send, - ); + self.chains + .update(network, &local, &mut self.awaiting_head_peers); + } + + /// Kickstarts sync. + pub fn resume(&mut self, network: &mut SyncNetworkContext<T>) { + for (removed_chain, sync_type, remove_reason) in + self.chains.call_all(|chain| chain.resume(network)) + { + self.on_chain_removed( + removed_chain, + sync_type, + remove_reason, + network, + "chain resumed", + ); + } } } @@ -389,13 +375,16 @@ mod tests { use crate::NetworkMessage; use super::*; + use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; + use beacon_chain::EngineState; use lighthouse_network::rpc::BlocksByRangeRequest; use lighthouse_network::Request; use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; use slog::{o, Drain}; + use tokio::sync::mpsc; use slot_clock::SystemTimeSlotClock; use std::collections::HashSet; @@ -470,7 +459,7 @@ mod tests { /// To set up different scenarios where sync is told about known/unkown blocks. chain: Arc<FakeStorage>, /// Needed by range to handle communication with the network. - cx: SyncNetworkContext<E>, + cx: SyncNetworkContext<TestBeaconChainType>, /// To check what the network receives from Range. network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>, /// To modify what the network declares about various global variables, in particular about @@ -516,12 +505,13 @@ mod tests { } /// Reads an BlocksByRange request to a given peer from the network receiver channel. + #[track_caller] fn grab_request(&mut self, expected_peer: &PeerId) -> (RequestId, BlocksByRangeRequest) { - if let Some(NetworkMessage::SendRequest { + if let Ok(NetworkMessage::SendRequest { peer_id, request: Request::BlocksByRange(request), request_id, - }) = self.network_rx.blocking_recv() + }) = self.network_rx.try_recv() { assert_eq!(&peer_id, expected_peer); (request_id, request) @@ -575,6 +565,29 @@ mod tests { let peer_id = PeerId::random(); (peer_id, local_info, remote_info) } + + #[track_caller] + fn expect_empty_processor(&mut self) { + match self.beacon_processor_rx.try_recv() { + Ok(work) => { + panic!("Expected empty processor. Instead got {}", work.work_type()); + } + Err(e) => match e { + mpsc::error::TryRecvError::Empty => {} + mpsc::error::TryRecvError::Disconnected => unreachable!("bad coded test?"), + }, + } + } + + #[track_caller] + fn expect_chain_segment(&mut self) { + match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); + } + other => panic!("Expected chain segment process, found {:?}", other), + } + } } fn range(log_enabled: bool) -> (TestRig, RangeSync<TestBeaconChainType, FakeStorage>) { @@ -583,7 +596,6 @@ mod tests { let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10); let range_sync = RangeSync::<TestBeaconChainType, FakeStorage>::new( chain.clone(), - beacon_processor_tx, log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -591,6 +603,7 @@ mod tests { let cx = SyncNetworkContext::new( network_tx, globals.clone(), + beacon_processor_tx, log.new(o!("component" => "network_context")), ); let test_rig = TestRig { @@ -661,4 +674,53 @@ mod tests { let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); } + + #[test] + fn pause_and_resume_on_ee_offline() { + let (mut rig, mut range) = range(true); + + // add some peers + let (peer1, local_info, head_info) = rig.head_peer(); + range.add_peer(&mut rig.cx, local_info, peer1, head_info); + let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } + other => panic!("unexpected request {:?}", other), + }; + + // make the ee offline + rig.cx.update_execution_engine_state(EngineState::Offline); + + // send the response to the request + range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, None); + + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // while the ee is offline, more peers might arrive. Add a new finalized peer. + let (peer2, local_info, finalized_info) = rig.finalized_peer(); + range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); + let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } + other => panic!("unexpected request {:?}", other), + }; + + // send the response to the request + range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, None); + + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // make the beacon processor available again. + rig.cx.update_execution_engine_state(EngineState::Online); + + // now resume range, we should have two processing requests in the beacon processor. + range.resume(&mut rig.cx); + + rig.expect_chain_segment(); + rig.expect_chain_segment(); + } } From cb132c622d776d804752f991a5a6937f4e75b7d4 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Wed, 24 Aug 2022 23:34:58 +0000 Subject: [PATCH 152/184] don't register exited or slashed validators with the builder api (#3473) ## Issue Addressed #3465 ## Proposed Changes Filter out any validator registrations for validators that are not `active` or `pending`. I'm adding this filtering the beacon node because all the information is readily available there. In other parts of the VC we are usually sending per-validator requests based on duties from the BN. And duties will only be provided for active validators so we don't have this type of filtering elsewhere in the VC. Co-authored-by: realbigsean <sean@sigmaprime.io> --- beacon_node/http_api/src/lib.rs | 45 +++++++++++--- beacon_node/http_api/tests/tests.rs | 95 +++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+), 8 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index bcd8788465..59e6554aee 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -25,6 +25,7 @@ use beacon_chain::{ BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; +use eth2::types::ValidatorStatus; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -2481,19 +2482,47 @@ pub fn serve<T: BeaconChainTypes>( "count" => register_val_data.len(), ); - let preparation_data = register_val_data - .iter() + let head_snapshot = chain.head_snapshot(); + let spec = &chain.spec; + + let (preparation_data, filtered_registration_data): ( + Vec<ProposerPreparationData>, + Vec<SignedValidatorRegistrationData>, + ) = register_val_data + .into_iter() .filter_map(|register_data| { chain .validator_index(®ister_data.message.pubkey) .ok() .flatten() - .map(|validator_index| ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data.message.fee_recipient, + .and_then(|validator_index| { + let validator = head_snapshot + .beacon_state + .get_validator(validator_index) + .ok()?; + let validator_status = ValidatorStatus::from_validator( + validator, + current_epoch, + spec.far_future_epoch, + ) + .superstatus(); + let is_active_or_pending = + matches!(validator_status, ValidatorStatus::Pending) + || matches!(validator_status, ValidatorStatus::Active); + + // Filter out validators who are not 'active' or 'pending'. + is_active_or_pending.then(|| { + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data.message.fee_recipient, + }, + register_data, + ) + }) }) }) - .collect::<Vec<_>>(); + .unzip(); // Update the prepare beacon proposer cache based on this request. execution_layer @@ -2522,11 +2551,11 @@ pub fn serve<T: BeaconChainTypes>( info!( log, "Forwarding register validator request to connected builder"; - "count" => register_val_data.len(), + "count" => filtered_registration_data.len(), ); builder - .post_builder_validators(®ister_val_data) + .post_builder_validators(&filtered_registration_data) .await .map(|resp| warp::reply::json(&resp)) .map_err(|e| { diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index bd25450a47..3144060f10 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2459,6 +2459,93 @@ impl ApiTester { self } + pub async fn test_post_validator_register_validator_slashed(self) -> Self { + // slash a validator + self.client + .post_beacon_pool_attester_slashings(&self.attester_slashing) + .await + .unwrap(); + + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut registrations = vec![]; + let mut fee_recipients = vec![]; + + let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); + let fork = Fork { + current_version: self.chain.spec.genesis_fork_version, + previous_version: self.chain.spec.genesis_fork_version, + epoch: genesis_epoch, + }; + + let expected_gas_limit = 11_111_111; + + for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { + let pubkey = keypair.pk.compress(); + let fee_recipient = Address::from_low_u64_be(val_index as u64); + + let data = ValidatorRegistrationData { + fee_recipient, + gas_limit: expected_gas_limit, + timestamp: 0, + pubkey, + }; + + let domain = self.chain.spec.get_domain( + genesis_epoch, + Domain::ApplicationMask(ApplicationDomain::Builder), + &fork, + Hash256::zero(), + ); + let message = data.signing_root(domain); + let signature = keypair.sk.sign(message); + + let signed = SignedValidatorRegistrationData { + message: data, + signature, + }; + + fee_recipients.push(fee_recipient); + registrations.push(signed); + } + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head_snapshot() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + if val_index == 0 || val_index == 1 { + assert_eq!(actual, Address::from_low_u64_be(val_index as u64)); + } else { + assert_eq!(actual, fee_recipient); + } + } + + self + } + // Helper function for tests that require a valid RANDAO signature. async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { let fork = self.chain.canonical_head.cached_head().head_fork(); @@ -3964,6 +4051,14 @@ async fn post_validator_register_validator() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_validator_slashed() { + ApiTester::new() + .await + .test_post_validator_register_validator_slashed() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_valid() { ApiTester::new_mev_tester() From ebd661783e24894e35c334c66fc2dc1e8f7b3bb9 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 24 Aug 2022 23:34:59 +0000 Subject: [PATCH 153/184] Enable `block_lookup_failed` EF test (#3489) ## Issue Addressed Resolves #3448 ## Proposed Changes Removes a known failure that wasn't actually a known failure. The tests declare this block invalid and we refuse to import it due to `ExecutionPayloadError(UnverifiedNonOptimisticCandidate)`. This is correct since there is only one "eth1" block included in this test and two are required to trigger the merge (pre- and post-TTD blocks). It is slot 1 (tick = 12s) when this block is imported so the import must be prevented by `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY`. I'm not sure where I got the idea in #3448 that this test needed retrospective checking, that seems like a false assumption in hindsight. ## Additional Info - Blocked on #3464 --- testing/ef_tests/src/cases/fork_choice.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 9efb7ada12..650452d783 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -156,13 +156,6 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let tester = Tester::new(self, testing_spec::<E>(fork_name))?; - // TODO(merge): re-enable this test before production. - // This test is skipped until we can do retrospective confirmations of the terminal - // block after an optimistic sync. - if self.description == "block_lookup_failed" { - return Err(Error::SkippedKnownFailure); - }; - for step in &self.steps { match step { Step::Tick { tick } => tester.set_tick(*tick), From c64e17bb81502afa6820ff5a2d3a479a45e67ab3 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 24 Aug 2022 23:35:00 +0000 Subject: [PATCH 154/184] Return `readonly: false` for local keystores (#3490) ## Issue Addressed NA ## Proposed Changes Indicate that local keystores are `readonly: Some(false)` rather than `None` via the `/eth/v1/keystores` method on the VC API. I'll mark this as backwards-incompat so we remember to mention it in the release notes. There aren't any type-level incompatibilities here, just a change in how Lighthouse responds to responses. ## Additional Info - Blocked on #3464 --- validator_client/src/http_api/keystores.rs | 2 +- validator_client/src/http_api/tests/keystores.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index 29af8d0205..b886f60435 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -40,7 +40,7 @@ pub fn list<T: SlotClock + 'static, E: EthSpec>( SigningMethod::LocalKeystore { ref voting_keystore, .. - } => (voting_keystore.path(), None), + } => (voting_keystore.path(), Some(false)), SigningMethod::Web3Signer { .. } => (None, Some(true)), }); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 5cc755db53..769d8a1d49 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -399,7 +399,7 @@ fn get_web3_signer_keystores() { .map(|local_keystore| SingleKeystoreResponse { validating_pubkey: keystore_pubkey(local_keystore), derivation_path: local_keystore.path(), - readonly: None, + readonly: Some(false), }) .chain(remote_vals.iter().map(|remote_val| SingleKeystoreResponse { validating_pubkey: remote_val.voting_public_key.compress(), @@ -1775,7 +1775,7 @@ fn import_same_local_and_remote_keys() { .map(|local_keystore| SingleKeystoreResponse { validating_pubkey: keystore_pubkey(local_keystore), derivation_path: local_keystore.path(), - readonly: None, + readonly: Some(false), }) .collect::<Vec<_>>(); for response in expected_responses { From 1c9ec42dcbe3b143adc947ce1d87e6834495534d Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Fri, 26 Aug 2022 21:47:50 +0000 Subject: [PATCH 155/184] More merge doc updates (#3509) ## Proposed Changes Address a few shortcomings of the book noticed by users: - Remove description of redundant execution nodes - Use an Infura eth1 node rather than an eth2 node in the merge migration example - Add an example of the fee recipient address format (we support addresses without the 0x prefix, but 0x prefixed feels more canonical). - Clarify that Windows support is no longer beta - Add a link to the MSRV to the build-from-source instructions --- book/src/installation-binaries.md | 4 +--- book/src/installation-source.md | 11 +++++++---- book/src/installation.md | 6 ++++-- book/src/merge-migration.md | 7 ++++--- book/src/redundancy.md | 29 ++++++++--------------------- book/src/suggested-fee-recipient.md | 11 +++++++++++ 6 files changed, 35 insertions(+), 33 deletions(-) diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 7a5aad32bf..2365ea7ed7 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -4,8 +4,6 @@ Each Lighthouse release contains several downloadable binaries in the "Assets" section of the release. You can find the [releases on Github](https://github.com/sigp/lighthouse/releases). -> Note: binaries are provided for Windows native, but Windows Lighthouse support is still in beta testing. - ## Platforms Binaries are supplied for four platforms: @@ -13,7 +11,7 @@ Binaries are supplied for four platforms: - `x86_64-unknown-linux-gnu`: AMD/Intel 64-bit processors (most desktops, laptops, servers) - `aarch64-unknown-linux-gnu`: 64-bit ARM processors (Raspberry Pi 4) - `x86_64-apple-darwin`: macOS with Intel chips -- `x86_64-windows`: Windows with 64-bit processors (Beta) +- `x86_64-windows`: Windows with 64-bit processors Additionally there is also a `-portable` suffix which indicates if the `portable` feature is used: diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 1f8477260f..661035ca51 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -52,10 +52,9 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` -These dependencies are for compiling Lighthouse natively on Windows, which is currently in beta -testing. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. -If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies -(Ubuntu)](#ubuntu) section. +These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run +successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you +should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about @@ -138,6 +137,10 @@ See ["Configuring the `PATH` environment variable" Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`. +If you can't install the latest version of Rust you can instead compile using the Minimum Supported +Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's +[Cargo.toml](https://github.com/sigp/lighthouse/blob/stable/lighthouse/Cargo.toml). + If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can look into [cross compilation](./cross-compiling.md), or use a [pre-built diff --git a/book/src/installation.md b/book/src/installation.md index e222c401a2..bc546e0987 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -1,6 +1,6 @@ # 📦 Installation -Lighthouse runs on Linux, macOS, and Windows (still in beta testing). +Lighthouse runs on Linux, macOS, and Windows. There are three core methods to obtain the Lighthouse application: @@ -8,9 +8,11 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). -The community maintains additional installation methods (currently only one). +Community-maintained additional installation methods: - [Homebrew package](./homebrew.md). +- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), + [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). Additionally, there are two extra guides for specific uses: diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index 9ac22a7612..104a7ead6d 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -11,11 +11,12 @@ There are two configuration changes required for a Lighthouse node to operate co the merge: 1. You *must* run your own execution engine such as Geth or Nethermind alongside Lighthouse. - You *must* update your Lighthouse configuration to connect to the execution engine using new + You *must* update your `lighthouse bn` configuration to connect to the execution engine using new flags which are documented on this page in the [Connecting to an execution engine](#connecting-to-an-execution-engine) section. 2. If your Lighthouse node has validators attached you *must* nominate an Ethereum address to - receive transactions tips from blocks proposed by your validators. This is covered on the + receive transactions tips from blocks proposed by your validators. These changes should + be made to your `lighthouse vc` configuration, and are covered on the [Suggested fee recipient](./suggested-fee-recipient.md) page. Additionally, you _must_ update Lighthouse to v3.0.0 (or later), and must update your execution @@ -76,7 +77,7 @@ lighthouse \ --network mainnet \ beacon_node \ --http \ - --eth1-endpoints http://localhost:8545,https://TOKEN@eth2-beacon-mainnet.infura.io + --eth1-endpoints http://localhost:8545,https://mainnet.infura.io/v3/TOKEN ``` Converting the above to a post-merge configuration would render: diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 3409effb36..d4156832bd 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -4,8 +4,8 @@ There are three places in Lighthouse where redundancy is notable: -1. ✅ GOOD: Using a redundant Beacon node in `lighthouse vc --beacon-nodes` -1. ✅ GOOD: Using a redundant execution node in `lighthouse bn --eth1-endpoints` +1. ✅ GOOD: Using a redundant beacon node in `lighthouse vc --beacon-nodes` +1. ❌ NOT SUPPORTED: Using a redundant execution node in `lighthouse bn --execution-endpoint` 1. ☠️ BAD: Running redundant `lighthouse vc` instances with overlapping keypairs. I mention (3) since it is unsafe and should not be confused with the other two @@ -94,23 +94,10 @@ resource consumption akin to running 64+ validators. ## Redundant execution nodes -Compared to redundancy in beacon nodes (see above), using redundant execution nodes -is very straight-forward: +Lighthouse previously supported redundant execution nodes for fetching data from the deposit +contract. On merged networks _this is no longer supported_. Each Lighthouse beacon node must be +configured in a 1:1 relationship with an execution node. For more information on the rationale +behind this decision please see the [Merge Migration](./merge-migration.md) documentation. -1. `lighthouse bn --eth1-endpoints http://localhost:8545` -1. `lighthouse bn --eth1-endpoints http://localhost:8545,http://192.168.0.1:8545` - -In the case of (1), any failure on `http://localhost:8545` will result in a -failure to update the execution client cache in the beacon node. Consistent failure over a -period of hours may result in a failure in block production. - -However, in the case of (2), the `http://192.168.0.1:8545` execution client endpoint will -be tried each time the first fails. Execution client endpoints will be tried from first to -last in the list, until a successful response is obtained. - -There is no need for special configuration on the execution client endpoint, all endpoints can (probably should) -be configured identically. - -> Note: When supplying multiple endpoints the `http://localhost:8545` address must be explicitly -> provided (if it is desired). It will only be used as default if no `--eth1-endpoints` flag is -> provided at all. +To achieve redundancy we recommend configuring [Redundant beacon nodes](#redundant-beacon-nodes) +where each has its own execution engine. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index bcd5878027..c966481a31 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -65,11 +65,22 @@ Below is an example of the validator_definitions.yml with `suggested_fee_recipie The `--suggested-fee-recipient` can be provided to the VC to act as a default value for all validators where a `suggested_fee_recipient` is not loaded from another method. +Provide a 0x-prefixed address, e.g. + +``` +lighthouse vc --suggested-fee-recipient 0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b ... +``` + + ### 3. Using the "--suggested-fee-recipient" flag on the beacon node The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. +``` +lighthouse bn --suggested-fee-recipient 0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b ... +``` + **This value should be considered an emergency fallback**. You should set the fee recipient in the validator client in order for the execution node to be given adequate notice of block proposal. From 66eca1a88218462235cb76a116dc3c6a1853444f Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 29 Aug 2022 09:10:26 +0000 Subject: [PATCH 156/184] Refactor op pool for speed and correctness (#3312) ## Proposed Changes This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12). Attestation packing is sped up by removing several inefficiencies: - No more recalculation of `attesting_indices` during packing. - No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`. - No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra). - No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release). So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms. Verification of attester slashings, proposer slashings and voluntary exits is fixed by: - Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message. - Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot. This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f. ## Additional Info The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s. This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency. Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised. --- Cargo.lock | 4 + .../src/attestation_verification.rs | 9 +- beacon_node/beacon_chain/src/beacon_chain.rs | 153 +++- beacon_node/beacon_chain/src/block_reward.rs | 33 +- .../beacon_chain/src/block_verification.rs | 10 +- beacon_node/beacon_chain/src/chain_config.rs | 3 + .../beacon_chain/src/observed_operations.rs | 34 +- beacon_node/beacon_chain/src/schema_change.rs | 11 + .../src/schema_change/migration_schema_v12.rs | 226 +++++ beacon_node/beacon_chain/src/test_utils.rs | 33 +- beacon_node/beacon_chain/tests/store_tests.rs | 4 +- beacon_node/http_api/src/block_rewards.rs | 11 +- beacon_node/http_api/src/lib.rs | 34 +- .../beacon_processor/worker/gossip_methods.rs | 15 +- beacon_node/operation_pool/Cargo.toml | 2 + beacon_node/operation_pool/src/attestation.rs | 74 +- .../operation_pool/src/attestation_id.rs | 37 +- .../operation_pool/src/attestation_storage.rs | 245 ++++++ .../operation_pool/src/attester_slashing.rs | 8 +- beacon_node/operation_pool/src/lib.rs | 811 +++++++++++------- beacon_node/operation_pool/src/max_cover.rs | 22 +- beacon_node/operation_pool/src/metrics.rs | 4 + beacon_node/operation_pool/src/persistence.rs | 121 ++- .../operation_pool/src/reward_cache.rs | 122 +++ beacon_node/src/cli.rs | 10 + beacon_node/src/config.rs | 2 + beacon_node/store/src/metadata.rs | 2 +- book/src/database-migrations.md | 1 + consensus/state_processing/Cargo.toml | 2 + .../src/common/get_attesting_indices.rs | 15 +- .../src/common/get_indexed_attestation.rs | 4 +- consensus/state_processing/src/common/mod.rs | 2 +- .../base/validator_statuses.rs | 4 +- .../state_processing/src/upgrade/altair.rs | 4 +- .../state_processing/src/verify_operation.rs | 137 ++- consensus/types/src/proposer_slashing.rs | 7 + lighthouse/tests/beacon_node.rs | 15 + 37 files changed, 1710 insertions(+), 521 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs create mode 100644 beacon_node/operation_pool/src/attestation_storage.rs create mode 100644 beacon_node/operation_pool/src/reward_cache.rs diff --git a/Cargo.lock b/Cargo.lock index 5a2c4312b1..46cd2d96f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4410,12 +4410,14 @@ name = "operation_pool" version = "0.2.0" dependencies = [ "beacon_chain", + "bitvec 1.0.1", "derivative", "eth2_ssz", "eth2_ssz_derive", "itertools", "lazy_static", "lighthouse_metrics", + "maplit", "parking_lot 0.12.1", "rayon", "serde", @@ -6271,9 +6273,11 @@ dependencies = [ "arbitrary", "beacon_chain", "bls", + "derivative", "env_logger 0.9.0", "eth2_hashing", "eth2_ssz", + "eth2_ssz_derive", "eth2_ssz_types", "int_to_bytes", "integer-sqrt", diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 63af6ab9e1..b60ce7efe5 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -318,10 +318,17 @@ impl<'a, T: BeaconChainTypes> Clone for IndexedUnaggregatedAttestation<'a, T> { /// A helper trait implemented on wrapper types that can be progressed to a state where they can be /// verified for application to fork choice. -pub trait VerifiedAttestation<T: BeaconChainTypes> { +pub trait VerifiedAttestation<T: BeaconChainTypes>: Sized { fn attestation(&self) -> &Attestation<T::EthSpec>; fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec>; + + // Inefficient default implementation. This is overridden for gossip verified attestations. + fn into_attestation_and_indices(self) -> (Attestation<T::EthSpec>, Vec<u64>) { + let attestation = self.attestation().clone(); + let attesting_indices = self.indexed_attestation().attesting_indices.clone().into(); + (attestation, attesting_indices) + } } impl<'a, T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedAggregatedAttestation<'a, T> { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4d37926dd9..fdcd3eed88 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -63,7 +63,7 @@ use fork_choice::{ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; -use operation_pool::{OperationPool, PersistedOperationPool}; +use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slasher::Slasher; @@ -71,12 +71,15 @@ use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ - common::get_indexed_attestation, + common::{get_attesting_indices_from_state, get_indexed_attestation}, per_block_processing, - per_block_processing::errors::AttestationValidationError, + per_block_processing::{ + errors::AttestationValidationError, verify_attestation_for_block_inclusion, + VerifySignatures, + }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, + BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; use std::cmp::Ordering; use std::collections::HashMap; @@ -1904,25 +1907,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// Accepts a `VerifiedAttestation` and attempts to apply it to `self.op_pool`. /// /// The op pool is used by local block producers to pack blocks with operations. - pub fn add_to_block_inclusion_pool( + pub fn add_to_block_inclusion_pool<A>( &self, - verified_attestation: &impl VerifiedAttestation<T>, - ) -> Result<(), AttestationError> { + verified_attestation: A, + ) -> Result<(), AttestationError> + where + A: VerifiedAttestation<T>, + { let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_APPLY_TO_OP_POOL); // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. if self.eth1_chain.is_some() { - let fork = self.canonical_head.cached_head().head_fork(); - + let (attestation, attesting_indices) = + verified_attestation.into_attestation_and_indices(); self.op_pool - .insert_attestation( - // TODO: address this clone. - verified_attestation.attestation().clone(), - &fork, - self.genesis_validators_root, - &self.spec, - ) + .insert_attestation(attestation, attesting_indices) .map_err(Error::from)?; } @@ -1955,15 +1955,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn filter_op_pool_attestation( &self, filter_cache: &mut HashMap<(Hash256, Epoch), bool>, - att: &Attestation<T::EthSpec>, + att: &AttestationRef<T::EthSpec>, state: &BeaconState<T::EthSpec>, ) -> bool { *filter_cache - .entry((att.data.beacon_block_root, att.data.target.epoch)) + .entry((att.data.beacon_block_root, att.checkpoint.target_epoch)) .or_insert_with(|| { self.shuffling_is_compatible( &att.data.beacon_block_root, - att.data.target.epoch, + att.checkpoint.target_epoch, state, ) }) @@ -2045,7 +2045,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn verify_voluntary_exit_for_gossip( &self, exit: SignedVoluntaryExit, - ) -> Result<ObservationOutcome<SignedVoluntaryExit>, Error> { + ) -> Result<ObservationOutcome<SignedVoluntaryExit, T::EthSpec>, Error> { // NOTE: this could be more efficient if it avoided cloning the head state let wall_clock_state = self.wall_clock_state()?; Ok(self @@ -2066,7 +2066,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } /// Accept a pre-verified exit and queue it for inclusion in an appropriate block. - pub fn import_voluntary_exit(&self, exit: SigVerifiedOp<SignedVoluntaryExit>) { + pub fn import_voluntary_exit(&self, exit: SigVerifiedOp<SignedVoluntaryExit, T::EthSpec>) { if self.eth1_chain.is_some() { self.op_pool.insert_voluntary_exit(exit) } @@ -2076,7 +2076,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn verify_proposer_slashing_for_gossip( &self, proposer_slashing: ProposerSlashing, - ) -> Result<ObservationOutcome<ProposerSlashing>, Error> { + ) -> Result<ObservationOutcome<ProposerSlashing, T::EthSpec>, Error> { let wall_clock_state = self.wall_clock_state()?; Ok(self.observed_proposer_slashings.lock().verify_and_observe( proposer_slashing, @@ -2086,7 +2086,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. - pub fn import_proposer_slashing(&self, proposer_slashing: SigVerifiedOp<ProposerSlashing>) { + pub fn import_proposer_slashing( + &self, + proposer_slashing: SigVerifiedOp<ProposerSlashing, T::EthSpec>, + ) { if self.eth1_chain.is_some() { self.op_pool.insert_proposer_slashing(proposer_slashing) } @@ -2096,7 +2099,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn verify_attester_slashing_for_gossip( &self, attester_slashing: AttesterSlashing<T::EthSpec>, - ) -> Result<ObservationOutcome<AttesterSlashing<T::EthSpec>>, Error> { + ) -> Result<ObservationOutcome<AttesterSlashing<T::EthSpec>, T::EthSpec>, Error> { let wall_clock_state = self.wall_clock_state()?; Ok(self.observed_attester_slashings.lock().verify_and_observe( attester_slashing, @@ -2111,7 +2114,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// 2. Add it to the op pool. pub fn import_attester_slashing( &self, - attester_slashing: SigVerifiedOp<AttesterSlashing<T::EthSpec>>, + attester_slashing: SigVerifiedOp<AttesterSlashing<T::EthSpec>, T::EthSpec>, ) { // Add to fork choice. self.canonical_head @@ -2120,10 +2123,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { // Add to the op pool (if we have the ability to propose blocks). if self.eth1_chain.is_some() { - self.op_pool.insert_attester_slashing( - attester_slashing, - self.canonical_head.cached_head().head_fork(), - ) + self.op_pool.insert_attester_slashing(attester_slashing) } } @@ -3351,7 +3351,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } }; - let (proposer_slashings, attester_slashings, voluntary_exits) = + let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; @@ -3362,12 +3362,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let unagg_import_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_UNAGGREGATED_TIMES); for attestation in self.naive_aggregation_pool.read().iter() { - if let Err(e) = self.op_pool.insert_attestation( - attestation.clone(), - &state.fork(), - state.genesis_validators_root(), - &self.spec, - ) { + let import = |attestation: &Attestation<T::EthSpec>| { + let attesting_indices = get_attesting_indices_from_state(&state, attestation)?; + self.op_pool + .insert_attestation(attestation.clone(), attesting_indices) + }; + if let Err(e) = import(attestation) { // Don't stop block production if there's an error, just create a log. error!( self.log, @@ -3388,15 +3388,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> { metrics::start_timer(&metrics::BLOCK_PRODUCTION_ATTESTATION_TIMES); let mut prev_filter_cache = HashMap::new(); - let prev_attestation_filter = |att: &&Attestation<T::EthSpec>| { - self.filter_op_pool_attestation(&mut prev_filter_cache, *att, &state) + let prev_attestation_filter = |att: &AttestationRef<T::EthSpec>| { + self.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) }; let mut curr_filter_cache = HashMap::new(); - let curr_attestation_filter = |att: &&Attestation<T::EthSpec>| { - self.filter_op_pool_attestation(&mut curr_filter_cache, *att, &state) + let curr_attestation_filter = |att: &AttestationRef<T::EthSpec>| { + self.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) }; - let attestations = self + let mut attestations = self .op_pool .get_attestations( &state, @@ -3407,6 +3407,77 @@ impl<T: BeaconChainTypes> BeaconChain<T> { .map_err(BlockProductionError::OpPoolError)?; drop(attestation_packing_timer); + // If paranoid mode is enabled re-check the signatures of every included message. + // This will be a lot slower but guards against bugs in block production and can be + // quickly rolled out without a release. + if self.config.paranoid_block_proposal { + attestations.retain(|att| { + verify_attestation_for_block_inclusion( + &state, + att, + VerifySignatures::True, + &self.spec, + ) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid attestation"; + "err" => ?e, + "block_slot" => state.slot(), + "attestation" => ?att + ); + }) + .is_ok() + }); + + proposer_slashings.retain(|slashing| { + slashing + .clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid proposer slashing"; + "err" => ?e, + "block_slot" => state.slot(), + "slashing" => ?slashing + ); + }) + .is_ok() + }); + + attester_slashings.retain(|slashing| { + slashing + .clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid attester slashing"; + "err" => ?e, + "block_slot" => state.slot(), + "slashing" => ?slashing + ); + }) + .is_ok() + }); + + voluntary_exits.retain(|exit| { + exit.clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid proposer slashing"; + "err" => ?e, + "block_slot" => state.slot(), + "exit" => ?exit + ); + }) + .is_ok() + }); + } + let slot = state.slot(); let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 4b8b809d3f..3bddd2a521 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -1,7 +1,10 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; -use operation_pool::{AttMaxCover, MaxCover}; -use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; +use operation_pool::{AttMaxCover, MaxCover, RewardCache, SplitAttestation}; +use state_processing::{ + common::get_attesting_indices_from_state, + per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, +}; use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; impl<T: BeaconChainTypes> BeaconChain<T> { @@ -10,20 +13,38 @@ impl<T: BeaconChainTypes> BeaconChain<T> { block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, state: &BeaconState<T::EthSpec>, + reward_cache: &mut RewardCache, include_attestations: bool, ) -> Result<BlockReward, BeaconChainError> { if block.slot() != state.slot() { return Err(BeaconChainError::BlockRewardSlotError); } + reward_cache.update(state)?; + let total_active_balance = state.get_total_active_balance()?; - let mut per_attestation_rewards = block + + let split_attestations = block .body() .attestations() .iter() .map(|att| { - AttMaxCover::new(att, state, total_active_balance, &self.spec) - .ok_or(BeaconChainError::BlockRewardAttestationError) + let attesting_indices = get_attesting_indices_from_state(state, att)?; + Ok(SplitAttestation::new(att.clone(), attesting_indices)) + }) + .collect::<Result<Vec<_>, BeaconChainError>>()?; + + let mut per_attestation_rewards = split_attestations + .iter() + .map(|att| { + AttMaxCover::new( + att.as_ref(), + state, + reward_cache, + total_active_balance, + &self.spec, + ) + .ok_or(BeaconChainError::BlockRewardAttestationError) }) .collect::<Result<Vec<_>, _>>()?; @@ -34,7 +55,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let latest_att = &updated[i]; for att in to_update { - att.update_covering_set(latest_att.object(), latest_att.covering_set()); + att.update_covering_set(latest_att.intermediate(), latest_att.covering_set()); } } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 95d5f818f0..cdcbf3f68e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1307,8 +1307,14 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> { */ if let Some(ref event_handler) = chain.event_handler { if event_handler.has_block_reward_subscribers() { - let block_reward = - chain.compute_block_reward(block.message(), block_root, &state, true)?; + let mut reward_cache = Default::default(); + let block_reward = chain.compute_block_reward( + block.message(), + block_root, + &state, + &mut reward_cache, + true, + )?; event_handler.register(EventKind::BlockReward(block_reward)); } } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index aa7ff02af1..ba3a0b628c 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -35,6 +35,8 @@ pub struct ChainConfig { /// Whether any chain health checks should be considered when deciding whether to use the builder API. pub builder_fallback_disable_checks: bool, pub count_unrealized: bool, + /// Whether to apply paranoid checks to blocks proposed by this beacon node. + pub paranoid_block_proposal: bool, } impl Default for ChainConfig { @@ -52,6 +54,7 @@ impl Default for ChainConfig { builder_fallback_epochs_since_finalization: 3, builder_fallback_disable_checks: false, count_unrealized: true, + paranoid_block_proposal: false, } } } diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index f1eb996a54..8d8272b67d 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,10 +1,12 @@ use derivative::Derivative; use smallvec::SmallVec; +use ssz::{Decode, Encode}; use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::HashSet; use std::marker::PhantomData; use types::{ - AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit, + AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing, + SignedVoluntaryExit, Slot, }; /// Number of validator indices to store on the stack in `observed_validators`. @@ -24,13 +26,16 @@ pub struct ObservedOperations<T: ObservableOperation<E>, E: EthSpec> { /// previously seen attester slashings, i.e. those validators in the intersection of /// `attestation_1.attester_indices` and `attestation_2.attester_indices`. observed_validator_indices: HashSet<u64>, + /// The name of the current fork. The default will be overwritten on first use. + #[derivative(Default(value = "ForkName::Base"))] + current_fork: ForkName, _phantom: PhantomData<(T, E)>, } /// Was the observed operation new and valid for further processing, or a useless duplicate? #[derive(Debug, PartialEq, Eq, Clone)] -pub enum ObservationOutcome<T> { - New(SigVerifiedOp<T>), +pub enum ObservationOutcome<T: Encode + Decode, E: EthSpec> { + New(SigVerifiedOp<T, E>), AlreadyKnown, } @@ -81,7 +86,9 @@ impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> { op: T, head_state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<ObservationOutcome<T>, T::Error> { + ) -> Result<ObservationOutcome<T, E>, T::Error> { + self.reset_at_fork_boundary(head_state.slot(), spec); + let observed_validator_indices = &mut self.observed_validator_indices; let new_validator_indices = op.observed_validators(); @@ -107,4 +114,23 @@ impl<T: ObservableOperation<E>, E: EthSpec> ObservedOperations<T, E> { Ok(ObservationOutcome::New(verified_op)) } + + /// Reset the cache when crossing a fork boundary. + /// + /// This prevents an attacker from crafting a self-slashing which is only valid before the fork + /// (e.g. using the Altair fork domain at a Bellatrix epoch), in order to prevent propagation of + /// all other slashings due to the duplicate check. + /// + /// It doesn't matter if this cache gets reset too often, as we reset it on restart anyway and a + /// false negative just results in propagation of messages which should have been ignored. + /// + /// In future we could check slashing relevance against the op pool itself, but that would + /// require indexing the attester slashings in the op pool by validator index. + fn reset_at_fork_boundary(&mut self, head_slot: Slot, spec: &ChainSpec) { + let head_fork = spec.fork_name_at_slot::<E>(head_slot); + if head_fork != self.current_fork { + self.observed_validator_indices.clear(); + self.current_fork = head_fork; + } + } } diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index b6c70b5435..15b0f39f3a 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,6 +1,7 @@ //! Utilities for managing database schema changes. mod migration_schema_v10; mod migration_schema_v11; +mod migration_schema_v12; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; @@ -196,6 +197,16 @@ pub fn migrate_schema<T: BeaconChainTypes>( Ok(()) } + // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. + (SchemaVersion(11), SchemaVersion(12)) => { + let ops = migration_schema_v12::upgrade_to_v12::<T>(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + // Downgrade from v12 to v11 to drop richer metadata from the attestation op pool. + (SchemaVersion(12), SchemaVersion(11)) => { + let ops = migration_schema_v12::downgrade_from_v12::<T>(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs new file mode 100644 index 0000000000..bb72b28c0e --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -0,0 +1,226 @@ +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}; +use crate::persisted_fork_choice::PersistedForkChoiceV11; +use operation_pool::{PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5}; +use slog::{debug, info, Logger}; +use state_processing::{ + common::get_indexed_attestation, per_block_processing::is_valid_indexed_attestation, + VerifyOperation, VerifySignatures, +}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v12<T: BeaconChainTypes>( + db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>, + log: Logger, +) -> Result<Vec<KeyValueStoreOp>, Error> { + let spec = db.get_chain_spec(); + + // Load a V5 op pool and transform it to V12. + let PersistedOperationPoolV5 { + attestations_v5, + sync_contributions, + attester_slashings_v5, + proposer_slashings_v5, + voluntary_exits_v5, + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + // Load the persisted fork choice so we can grab the state of the justified block and use + // it to verify the stored attestations, slashings and exits. + let fork_choice = db + .get_item::<PersistedForkChoiceV11>(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + let justified_block_root = fork_choice + .fork_choice_store + .unrealized_justified_checkpoint + .root; + let justified_block = db + .get_blinded_block(&justified_block_root)? + .ok_or_else(|| { + Error::SchemaMigrationError(format!( + "unrealized justified block missing for migration: {justified_block_root:?}", + )) + })?; + let justified_state_root = justified_block.state_root(); + let mut state = db + .get_state(&justified_state_root, Some(justified_block.slot()))? + .ok_or_else(|| { + Error::SchemaMigrationError(format!( + "justified state missing for migration: {justified_state_root:?}" + )) + })?; + state.build_all_committee_caches(spec).map_err(|e| { + Error::SchemaMigrationError(format!("unable to build committee caches: {e:?}")) + })?; + + // Re-verify attestations while adding attesting indices. + let attestations = attestations_v5 + .into_iter() + .flat_map(|(_, attestations)| attestations) + .filter_map(|attestation| { + let res = state + .get_beacon_committee(attestation.data.slot, attestation.data.index) + .map_err(Into::into) + .and_then(|committee| get_indexed_attestation(committee.committee, &attestation)) + .and_then(|indexed_attestation| { + is_valid_indexed_attestation( + &state, + &indexed_attestation, + VerifySignatures::True, + spec, + )?; + Ok(indexed_attestation) + }); + + match res { + Ok(indexed) => Some((attestation, indexed.attesting_indices.into())), + Err(e) => { + debug!( + log, + "Dropping attestation on migration"; + "err" => ?e, + "head_block" => ?attestation.data.beacon_block_root, + ); + None + } + } + }) + .collect::<Vec<_>>(); + + let attester_slashings = attester_slashings_v5 + .iter() + .filter_map(|(slashing, _)| { + slashing + .clone() + .validate(&state, spec) + .map_err(|e| { + debug!( + log, + "Dropping attester slashing on migration"; + "err" => ?e, + "slashing" => ?slashing, + ); + }) + .ok() + }) + .collect::<Vec<_>>(); + + let proposer_slashings = proposer_slashings_v5 + .iter() + .filter_map(|slashing| { + slashing + .clone() + .validate(&state, spec) + .map_err(|e| { + debug!( + log, + "Dropping proposer slashing on migration"; + "err" => ?e, + "slashing" => ?slashing, + ); + }) + .ok() + }) + .collect::<Vec<_>>(); + + let voluntary_exits = voluntary_exits_v5 + .iter() + .filter_map(|exit| { + exit.clone() + .validate(&state, spec) + .map_err(|e| { + debug!( + log, + "Dropping voluntary exit on migration"; + "err" => ?e, + "exit" => ?exit, + ); + }) + .ok() + }) + .collect::<Vec<_>>(); + + debug!( + log, + "Migrated op pool"; + "attestations" => attestations.len(), + "attester_slashings" => attester_slashings.len(), + "proposer_slashings" => proposer_slashings.len(), + "voluntary_exits" => voluntary_exits.len() + ); + + let v12 = PersistedOperationPool::V12(PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + }); + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v12<T: BeaconChainTypes>( + db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>, + log: Logger, +) -> Result<Vec<KeyValueStoreOp>, Error> { + // Load a V12 op pool and transform it to V5. + let PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + } = if let Some(PersistedOperationPool::<T::EthSpec>::V12(op_pool)) = + db.get_item(&OP_POOL_DB_KEY)? + { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Dropping attestations from pool"; + "count" => attestations.len(), + ); + + let attester_slashings_v5 = attester_slashings + .into_iter() + .filter_map(|slashing| { + let fork_version = slashing.first_fork_verified_against()?; + Some((slashing.into_inner(), fork_version)) + }) + .collect::<Vec<_>>(); + + let proposer_slashings_v5 = proposer_slashings + .into_iter() + .map(|slashing| slashing.into_inner()) + .collect::<Vec<_>>(); + + let voluntary_exits_v5 = voluntary_exits + .into_iter() + .map(|exit| exit.into_inner()) + .collect::<Vec<_>>(); + + info!( + log, + "Migrated slashings and exits"; + "attester_slashings" => attester_slashings_v5.len(), + "proposer_slashings" => proposer_slashings_v5.len(), + "voluntary_exits" => voluntary_exits_v5.len(), + ); + + let v5 = PersistedOperationPoolV5 { + attestations_v5: vec![], + sync_contributions, + attester_slashings_v5, + proposer_slashings_v5, + voluntary_exits_v5, + }; + Ok(vec![v5.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 9b62590703..a62608202e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1175,6 +1175,19 @@ where } pub fn make_attester_slashing(&self, validator_indices: Vec<u64>) -> AttesterSlashing<E> { + self.make_attester_slashing_with_epochs(validator_indices, None, None, None, None) + } + + pub fn make_attester_slashing_with_epochs( + &self, + validator_indices: Vec<u64>, + source1: Option<Epoch>, + target1: Option<Epoch>, + source2: Option<Epoch>, + target2: Option<Epoch>, + ) -> AttesterSlashing<E> { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let mut attestation_1 = IndexedAttestation { attesting_indices: VariableList::new(validator_indices).unwrap(), data: AttestationData { @@ -1183,11 +1196,11 @@ where beacon_block_root: Hash256::zero(), target: Checkpoint { root: Hash256::zero(), - epoch: Epoch::new(0), + epoch: target1.unwrap_or(fork.epoch), }, source: Checkpoint { root: Hash256::zero(), - epoch: Epoch::new(0), + epoch: source1.unwrap_or(Epoch::new(0)), }, }, signature: AggregateSignature::infinity(), @@ -1195,8 +1208,9 @@ where let mut attestation_2 = attestation_1.clone(); attestation_2.data.index += 1; + attestation_2.data.source.epoch = source2.unwrap_or(Epoch::new(0)); + attestation_2.data.target.epoch = target2.unwrap_or(fork.epoch); - let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; @@ -1280,8 +1294,19 @@ where } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { + self.make_proposer_slashing_at_slot(validator_index, None) + } + + pub fn make_proposer_slashing_at_slot( + &self, + validator_index: u64, + slot_override: Option<Slot>, + ) -> ProposerSlashing { let mut block_header_1 = self.chain.head_beacon_block().message().block_header(); block_header_1.proposer_index = validator_index; + if let Some(slot) = slot_override { + block_header_1.slot = slot; + } let mut block_header_2 = block_header_1.clone(); block_header_2.state_root = Hash256::zero(); @@ -1488,7 +1513,7 @@ where self.chain .apply_attestation_to_fork_choice(&verified) .unwrap(); - self.chain.add_to_block_inclusion_pool(&verified).unwrap(); + self.chain.add_to_block_inclusion_pool(verified).unwrap(); } } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index d9d5ca20d7..afd97750a6 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -795,9 +795,7 @@ async fn multiple_attestations_per_block() { snapshot .beacon_block .as_ref() - .clone() - .deconstruct() - .0 + .message() .body() .attestations() .len() as u64, diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 682828aee4..3b81b894db 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -52,6 +52,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>( .build_all_caches(&chain.spec) .map_err(beacon_state_error)?; + let mut reward_cache = Default::default(); let mut block_rewards = Vec::with_capacity(blocks.len()); let block_replayer = BlockReplayer::new(state, &chain.spec) @@ -63,6 +64,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>( block.message(), block.canonical_root(), state, + &mut reward_cache, query.include_attestations, )?; block_rewards.push(block_reward); @@ -100,6 +102,7 @@ pub fn compute_block_rewards<T: BeaconChainTypes>( ) -> Result<Vec<BlockReward>, warp::Rejection> { let mut block_rewards = Vec::with_capacity(blocks.len()); let mut state_cache = LruCache::new(STATE_CACHE_SIZE); + let mut reward_cache = Default::default(); for block in blocks { let parent_root = block.parent_root(); @@ -170,7 +173,13 @@ pub fn compute_block_rewards<T: BeaconChainTypes>( // Compute block reward. let block_reward = chain - .compute_block_reward(block.to_ref(), block.canonical_root(), state, true) + .compute_block_reward( + block.to_ref(), + block.canonical_root(), + state, + &mut reward_cache, + true, + ) .map_err(beacon_chain_error)?; block_rewards.push(block_reward); } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 59e6554aee..48178f4f0d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -45,11 +45,12 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, - ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, - ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlindedBeaconBlock, SignedContributionAndProof, SignedValidatorRegistrationData, - SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, + Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, + CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, + ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, + SyncContributionData, }; use version::{ add_consensus_version_header, execution_optimistic_fork_versioned_response, @@ -1305,13 +1306,11 @@ pub fn serve<T: BeaconChainTypes>( .and_then( |chain: Arc<BeaconChain<T>>, query: api_types::AttestationPoolQuery| { blocking_json_task(move || { - let query_filter = |attestation: &Attestation<T::EthSpec>| { - query - .slot - .map_or(true, |slot| slot == attestation.data.slot) + let query_filter = |data: &AttestationData| { + query.slot.map_or(true, |slot| slot == data.slot) && query .committee_index - .map_or(true, |index| index == attestation.data.index) + .map_or(true, |index| index == data.index) }; let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); @@ -1321,7 +1320,7 @@ pub fn serve<T: BeaconChainTypes>( .read() .iter() .cloned() - .filter(query_filter), + .filter(|att| query_filter(&att.data)), ); Ok(api_types::GenericResponse::from(attestations)) }) @@ -2317,12 +2316,13 @@ pub fn serve<T: BeaconChainTypes>( ); failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); } - if let Err(e) = chain.add_to_block_inclusion_pool(&verified_aggregate) { - warn!(log, - "Could not add verified aggregate attestation to the inclusion pool"; - "error" => format!("{:?}", e), - "request_index" => index, - ); + if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { + warn!( + log, + "Could not add verified aggregate attestation to the inclusion pool"; + "error" => ?e, + "request_index" => index, + ); failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); } } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index e6625e43f8..93ed1b463b 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -54,6 +54,12 @@ impl<T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedUnaggregate<T> { fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> { &self.indexed_attestation } + + fn into_attestation_and_indices(self) -> (Attestation<T::EthSpec>, Vec<u64>) { + let attestation = *self.attestation; + let attesting_indices = self.indexed_attestation.attesting_indices.into(); + (attestation, attesting_indices) + } } /// An attestation that failed validation by the `BeaconChain`. @@ -81,6 +87,13 @@ impl<T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedAggregate<T> { fn indexed_attestation(&self) -> &IndexedAttestation<T::EthSpec> { &self.indexed_attestation } + + /// Efficient clone-free implementation that moves out of the `Box`. + fn into_attestation_and_indices(self) -> (Attestation<T::EthSpec>, Vec<u64>) { + let attestation = self.signed_aggregate.message.aggregate; + let attesting_indices = self.indexed_attestation.attesting_indices.into(); + (attestation, attesting_indices) + } } /// An attestation that failed validation by the `BeaconChain`. @@ -595,7 +608,7 @@ impl<T: BeaconChainTypes> Worker<T> { } } - if let Err(e) = self.chain.add_to_block_inclusion_pool(&verified_aggregate) { + if let Err(e) = self.chain.add_to_block_inclusion_pool(verified_aggregate) { debug!( self.log, "Attestation invalid for op pool"; diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 6b8b8eb145..1d67ecdccc 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -18,7 +18,9 @@ rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" store = { path = "../store" } +bitvec = "1" [dev-dependencies] beacon_chain = { path = "../beacon_chain" } tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +maplit = "1.0.2" diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 2f7fba4540..4af4edc0e4 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,4 +1,6 @@ +use crate::attestation_storage::AttestationRef; use crate::max_cover::MaxCover; +use crate::reward_cache::RewardCache; use state_processing::common::{ altair, base, get_attestation_participation_flag_indices, get_attesting_indices, }; @@ -12,34 +14,35 @@ use types::{ #[derive(Debug, Clone)] pub struct AttMaxCover<'a, T: EthSpec> { /// Underlying attestation. - pub att: &'a Attestation<T>, + pub att: AttestationRef<'a, T>, /// Mapping of validator indices and their rewards. pub fresh_validators_rewards: HashMap<u64, u64>, } impl<'a, T: EthSpec> AttMaxCover<'a, T> { pub fn new( - att: &'a Attestation<T>, + att: AttestationRef<'a, T>, state: &BeaconState<T>, + reward_cache: &'a RewardCache, total_active_balance: u64, spec: &ChainSpec, ) -> Option<Self> { if let BeaconState::Base(ref base_state) = state { Self::new_for_base(att, state, base_state, total_active_balance, spec) } else { - Self::new_for_altair(att, state, total_active_balance, spec) + Self::new_for_altair(att, state, reward_cache, total_active_balance, spec) } } /// Initialise an attestation cover object for base/phase0 hard fork. pub fn new_for_base( - att: &'a Attestation<T>, + att: AttestationRef<'a, T>, state: &BeaconState<T>, base_state: &BeaconStateBase<T>, total_active_balance: u64, spec: &ChainSpec, ) -> Option<Self> { - let fresh_validators = earliest_attestation_validators(att, state, base_state); + let fresh_validators = earliest_attestation_validators(&att, state, base_state); let committee = state .get_beacon_committee(att.data.slot, att.data.index) .ok()?; @@ -67,45 +70,41 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { /// Initialise an attestation cover object for Altair or later. pub fn new_for_altair( - att: &'a Attestation<T>, + att: AttestationRef<'a, T>, state: &BeaconState<T>, + reward_cache: &'a RewardCache, total_active_balance: u64, spec: &ChainSpec, ) -> Option<Self> { - let committee = state - .get_beacon_committee(att.data.slot, att.data.index) - .ok()?; - let attesting_indices = - get_attesting_indices::<T>(committee.committee, &att.aggregation_bits).ok()?; + let att_data = att.attestation_data(); - let participation_list = if att.data.target.epoch == state.current_epoch() { - state.current_epoch_participation().ok()? - } else if att.data.target.epoch == state.previous_epoch() { - state.previous_epoch_participation().ok()? - } else { - return None; - }; - - let inclusion_delay = state.slot().as_u64().checked_sub(att.data.slot.as_u64())?; + let inclusion_delay = state.slot().as_u64().checked_sub(att_data.slot.as_u64())?; let att_participation_flags = - get_attestation_participation_flag_indices(state, &att.data, inclusion_delay, spec) + get_attestation_participation_flag_indices(state, &att_data, inclusion_delay, spec) .ok()?; let base_reward_per_increment = altair::BaseRewardPerIncrement::new(total_active_balance, spec).ok()?; - let fresh_validators_rewards = attesting_indices + let fresh_validators_rewards = att + .indexed + .attesting_indices .iter() .filter_map(|&index| { + if reward_cache + .has_attested_in_epoch(index, att_data.target.epoch) + .ok()? + { + return None; + } + let mut proposer_reward_numerator = 0; - let participation = participation_list.get(index)?; let base_reward = - altair::get_base_reward(state, index, base_reward_per_increment, spec).ok()?; + altair::get_base_reward(state, index as usize, base_reward_per_increment, spec) + .ok()?; for (flag_index, weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { - if att_participation_flags.contains(&flag_index) - && !participation.has_flag(flag_index).ok()? - { + if att_participation_flags.contains(&flag_index) { proposer_reward_numerator += base_reward.checked_mul(*weight)?; } } @@ -113,7 +112,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let proposer_reward = proposer_reward_numerator .checked_div(WEIGHT_DENOMINATOR.checked_mul(spec.proposer_reward_quotient)?)?; - Some((index as u64, proposer_reward)).filter(|_| proposer_reward != 0) + Some((index, proposer_reward)).filter(|_| proposer_reward != 0) }) .collect(); @@ -126,10 +125,15 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { type Object = Attestation<T>; + type Intermediate = AttestationRef<'a, T>; type Set = HashMap<u64, u64>; - fn object(&self) -> &Attestation<T> { - self.att + fn intermediate(&self) -> &AttestationRef<'a, T> { + &self.att + } + + fn convert_to_object(att_ref: &AttestationRef<'a, T>) -> Attestation<T> { + att_ref.clone_as_attestation() } fn covering_set(&self) -> &HashMap<u64, u64> { @@ -148,7 +152,7 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { /// of slashable voting, which is rare. fn update_covering_set( &mut self, - best_att: &Attestation<T>, + best_att: &AttestationRef<'a, T>, covered_validators: &HashMap<u64, u64>, ) { if self.att.data.slot == best_att.data.slot && self.att.data.index == best_att.data.index { @@ -172,16 +176,16 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { /// /// This isn't optimal, but with the Altair fork this code is obsolete and not worth upgrading. pub fn earliest_attestation_validators<T: EthSpec>( - attestation: &Attestation<T>, + attestation: &AttestationRef<T>, state: &BeaconState<T>, base_state: &BeaconStateBase<T>, ) -> BitList<T::MaxValidatorsPerCommittee> { // Bitfield of validators whose attestations are new/fresh. - let mut new_validators = attestation.aggregation_bits.clone(); + let mut new_validators = attestation.indexed.aggregation_bits.clone(); - let state_attestations = if attestation.data.target.epoch == state.current_epoch() { + let state_attestations = if attestation.checkpoint.target_epoch == state.current_epoch() { &base_state.current_epoch_attestations - } else if attestation.data.target.epoch == state.previous_epoch() { + } else if attestation.checkpoint.target_epoch == state.previous_epoch() { &base_state.previous_epoch_attestations } else { return BitList::with_capacity(0).unwrap(); diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs index f496ecb3a3..b65975787e 100644 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ b/beacon_node/operation_pool/src/attestation_id.rs @@ -1,45 +1,12 @@ use serde_derive::{Deserialize, Serialize}; -use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; -use types::{AttestationData, ChainSpec, Domain, Epoch, Fork, Hash256}; /// Serialized `AttestationData` augmented with a domain to encode the fork info. +/// +/// [DEPRECATED] To be removed once all nodes have updated to schema v12. #[derive( PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, )] pub struct AttestationId { v: Vec<u8>, } - -/// Number of domain bytes that the end of an attestation ID is padded with. -const DOMAIN_BYTES_LEN: usize = std::mem::size_of::<Hash256>(); - -impl AttestationId { - pub fn from_data( - attestation: &AttestationData, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> Self { - let mut bytes = ssz_encode(attestation); - let epoch = attestation.target.epoch; - bytes.extend_from_slice( - AttestationId::compute_domain_bytes(epoch, fork, genesis_validators_root, spec) - .as_bytes(), - ); - AttestationId { v: bytes } - } - - pub fn compute_domain_bytes( - epoch: Epoch, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> Hash256 { - spec.get_domain(epoch, Domain::BeaconAttester, fork, genesis_validators_root) - } - - pub fn domain_bytes_match(&self, domain_bytes: &Hash256) -> bool { - &self.v[self.v.len() - DOMAIN_BYTES_LEN..] == domain_bytes.as_bytes() - } -} diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs new file mode 100644 index 0000000000..0fb9bafd82 --- /dev/null +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -0,0 +1,245 @@ +use crate::AttestationStats; +use itertools::Itertools; +use std::collections::HashMap; +use types::{ + AggregateSignature, Attestation, AttestationData, BeaconState, BitList, Checkpoint, Epoch, + EthSpec, Hash256, Slot, +}; + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub struct CheckpointKey { + pub source: Checkpoint, + pub target_epoch: Epoch, +} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub struct CompactAttestationData { + pub slot: Slot, + pub index: u64, + pub beacon_block_root: Hash256, + pub target_root: Hash256, +} + +#[derive(Debug, PartialEq)] +pub struct CompactIndexedAttestation<T: EthSpec> { + pub attesting_indices: Vec<u64>, + pub aggregation_bits: BitList<T::MaxValidatorsPerCommittee>, + pub signature: AggregateSignature, +} + +#[derive(Debug)] +pub struct SplitAttestation<T: EthSpec> { + pub checkpoint: CheckpointKey, + pub data: CompactAttestationData, + pub indexed: CompactIndexedAttestation<T>, +} + +#[derive(Debug, Clone)] +pub struct AttestationRef<'a, T: EthSpec> { + pub checkpoint: &'a CheckpointKey, + pub data: &'a CompactAttestationData, + pub indexed: &'a CompactIndexedAttestation<T>, +} + +#[derive(Debug, Default, PartialEq)] +pub struct AttestationMap<T: EthSpec> { + checkpoint_map: HashMap<CheckpointKey, AttestationDataMap<T>>, +} + +#[derive(Debug, Default, PartialEq)] +pub struct AttestationDataMap<T: EthSpec> { + attestations: HashMap<CompactAttestationData, Vec<CompactIndexedAttestation<T>>>, +} + +impl<T: EthSpec> SplitAttestation<T> { + pub fn new(attestation: Attestation<T>, attesting_indices: Vec<u64>) -> Self { + let checkpoint = CheckpointKey { + source: attestation.data.source, + target_epoch: attestation.data.target.epoch, + }; + let data = CompactAttestationData { + slot: attestation.data.slot, + index: attestation.data.index, + beacon_block_root: attestation.data.beacon_block_root, + target_root: attestation.data.target.root, + }; + let indexed = CompactIndexedAttestation { + attesting_indices, + aggregation_bits: attestation.aggregation_bits, + signature: attestation.signature, + }; + Self { + checkpoint, + data, + indexed, + } + } + + pub fn as_ref(&self) -> AttestationRef<T> { + AttestationRef { + checkpoint: &self.checkpoint, + data: &self.data, + indexed: &self.indexed, + } + } +} + +impl<'a, T: EthSpec> AttestationRef<'a, T> { + pub fn attestation_data(&self) -> AttestationData { + AttestationData { + slot: self.data.slot, + index: self.data.index, + beacon_block_root: self.data.beacon_block_root, + source: self.checkpoint.source, + target: Checkpoint { + epoch: self.checkpoint.target_epoch, + root: self.data.target_root, + }, + } + } + + pub fn clone_as_attestation(&self) -> Attestation<T> { + Attestation { + aggregation_bits: self.indexed.aggregation_bits.clone(), + data: self.attestation_data(), + signature: self.indexed.signature.clone(), + } + } +} + +impl CheckpointKey { + /// Return two checkpoint keys: `(previous, current)` for the previous and current epochs of + /// the `state`. + pub fn keys_for_state<T: EthSpec>(state: &BeaconState<T>) -> (Self, Self) { + ( + CheckpointKey { + source: state.previous_justified_checkpoint(), + target_epoch: state.previous_epoch(), + }, + CheckpointKey { + source: state.current_justified_checkpoint(), + target_epoch: state.current_epoch(), + }, + ) + } +} + +impl<T: EthSpec> CompactIndexedAttestation<T> { + pub fn signers_disjoint_from(&self, other: &Self) -> bool { + self.aggregation_bits + .intersection(&other.aggregation_bits) + .is_zero() + } + + pub fn aggregate(&mut self, other: &Self) { + self.attesting_indices = self + .attesting_indices + .drain(..) + .merge(other.attesting_indices.iter().copied()) + .dedup() + .collect(); + self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); + self.signature.add_assign_aggregate(&other.signature); + } +} + +impl<T: EthSpec> AttestationMap<T> { + pub fn insert(&mut self, attestation: Attestation<T>, attesting_indices: Vec<u64>) { + let SplitAttestation { + checkpoint, + data, + indexed, + } = SplitAttestation::new(attestation, attesting_indices); + + let attestation_map = self + .checkpoint_map + .entry(checkpoint) + .or_insert_with(AttestationDataMap::default); + let attestations = attestation_map + .attestations + .entry(data) + .or_insert_with(Vec::new); + + // Greedily aggregate the attestation with all existing attestations. + // NOTE: this is sub-optimal and in future we will remove this in favour of max-clique + // aggregation. + let mut aggregated = false; + for existing_attestation in attestations.iter_mut() { + if existing_attestation.signers_disjoint_from(&indexed) { + existing_attestation.aggregate(&indexed); + aggregated = true; + } else if *existing_attestation == indexed { + aggregated = true; + } + } + + if !aggregated { + attestations.push(indexed); + } + } + + /// Iterate all attestations matching the given `checkpoint_key`. + pub fn get_attestations<'a>( + &'a self, + checkpoint_key: &'a CheckpointKey, + ) -> impl Iterator<Item = AttestationRef<'a, T>> + 'a { + self.checkpoint_map + .get(checkpoint_key) + .into_iter() + .flat_map(|attestation_map| attestation_map.iter(checkpoint_key)) + } + + /// Iterate all attestations in the map. + pub fn iter(&self) -> impl Iterator<Item = AttestationRef<T>> { + self.checkpoint_map + .iter() + .flat_map(|(checkpoint_key, attestation_map)| attestation_map.iter(checkpoint_key)) + } + + /// Prune attestations that are from before the previous epoch. + pub fn prune(&mut self, current_epoch: Epoch) { + self.checkpoint_map + .retain(|checkpoint_key, _| current_epoch <= checkpoint_key.target_epoch + 1); + } + + /// Statistics about all attestations stored in the map. + pub fn stats(&self) -> AttestationStats { + self.checkpoint_map + .values() + .map(AttestationDataMap::stats) + .fold(AttestationStats::default(), |mut acc, new| { + acc.num_attestations += new.num_attestations; + acc.num_attestation_data += new.num_attestation_data; + acc.max_aggregates_per_data = + std::cmp::max(acc.max_aggregates_per_data, new.max_aggregates_per_data); + acc + }) + } +} + +impl<T: EthSpec> AttestationDataMap<T> { + pub fn iter<'a>( + &'a self, + checkpoint_key: &'a CheckpointKey, + ) -> impl Iterator<Item = AttestationRef<'a, T>> + 'a { + self.attestations.iter().flat_map(|(data, vec_indexed)| { + vec_indexed.iter().map(|indexed| AttestationRef { + checkpoint: checkpoint_key, + data, + indexed, + }) + }) + } + + pub fn stats(&self) -> AttestationStats { + let mut stats = AttestationStats::default(); + + for aggregates in self.attestations.values() { + stats.num_attestations += aggregates.len(); + stats.num_attestation_data += 1; + stats.max_aggregates_per_data = + std::cmp::max(stats.max_aggregates_per_data, aggregates.len()); + } + stats + } +} diff --git a/beacon_node/operation_pool/src/attester_slashing.rs b/beacon_node/operation_pool/src/attester_slashing.rs index 2cb63ad252..f5916384d4 100644 --- a/beacon_node/operation_pool/src/attester_slashing.rs +++ b/beacon_node/operation_pool/src/attester_slashing.rs @@ -39,14 +39,18 @@ impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> { impl<'a, T: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, T> { /// The result type, of which we would eventually like a collection of maximal quality. type Object = AttesterSlashing<T>; + type Intermediate = AttesterSlashing<T>; /// The type used to represent sets. type Set = HashMap<u64, u64>; - /// Extract an object for inclusion in a solution. - fn object(&self) -> &AttesterSlashing<T> { + fn intermediate(&self) -> &AttesterSlashing<T> { self.slashing } + fn convert_to_object(slashing: &AttesterSlashing<T>) -> AttesterSlashing<T> { + slashing.clone() + } + /// Get the set of elements covered. fn covering_set(&self) -> &HashMap<u64, u64> { &self.effective_balances diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 771dca12f6..8c335189c6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,34 +1,38 @@ mod attestation; mod attestation_id; +mod attestation_storage; mod attester_slashing; mod max_cover; mod metrics; mod persistence; +mod reward_cache; mod sync_aggregate_id; pub use attestation::AttMaxCover; +pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; -pub use persistence::{PersistedOperationPool, PersistedOperationPoolAltair}; +pub use persistence::{ + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, +}; +pub use reward_cache::RewardCache; +use crate::attestation_storage::{AttestationMap, CheckpointKey}; use crate::sync_aggregate_id::SyncAggregateId; -use attestation_id::AttestationId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; -use parking_lot::RwLock; +use parking_lot::{RwLock, RwLockWriteGuard}; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ - get_slashable_indices_modular, verify_attestation_for_block_inclusion, verify_exit, - VerifySignatures, + get_slashable_indices_modular, verify_exit, VerifySignatures, }; -use state_processing::SigVerifiedOp; +use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::marker::PhantomData; use std::ptr; use types::{ - sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttesterSlashing, - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, Hash256, - ProposerSlashing, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, - Validator, + sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttestationData, + AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, + SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, }; type SyncContributions<T> = RwLock<HashMap<SyncAggregateId, Vec<SyncCommitteeContribution<T>>>>; @@ -36,15 +40,17 @@ type SyncContributions<T> = RwLock<HashMap<SyncAggregateId, Vec<SyncCommitteeCon #[derive(Default, Debug)] pub struct OperationPool<T: EthSpec + Default> { /// Map from attestation ID (see below) to vectors of attestations. - attestations: RwLock<HashMap<AttestationId, Vec<Attestation<T>>>>, + attestations: RwLock<AttestationMap<T>>, /// Map from sync aggregate ID to the best `SyncCommitteeContribution`s seen for that ID. sync_contributions: SyncContributions<T>, /// Set of attester slashings, and the fork version they were verified against. - attester_slashings: RwLock<HashSet<(AttesterSlashing<T>, ForkVersion)>>, + attester_slashings: RwLock<HashSet<SigVerifiedOp<AttesterSlashing<T>, T>>>, /// Map from proposer index to slashing. - proposer_slashings: RwLock<HashMap<u64, ProposerSlashing>>, + proposer_slashings: RwLock<HashMap<u64, SigVerifiedOp<ProposerSlashing, T>>>, /// Map from exiting validator to their exit data. - voluntary_exits: RwLock<HashMap<u64, SignedVoluntaryExit>>, + voluntary_exits: RwLock<HashMap<u64, SigVerifiedOp<SignedVoluntaryExit, T>>>, + /// Reward cache for accelerating attestation packing. + reward_cache: RwLock<RewardCache>, _phantom: PhantomData<T>, } @@ -53,9 +59,16 @@ pub enum OpPoolError { GetAttestationsTotalBalanceError(BeaconStateError), GetBlockRootError(BeaconStateError), SyncAggregateError(SyncAggregateError), + RewardCacheUpdatePrevEpoch(BeaconStateError), + RewardCacheUpdateCurrEpoch(BeaconStateError), + RewardCacheGetBlockRoot(BeaconStateError), + RewardCacheWrongEpoch, + RewardCacheValidatorUnknown(BeaconStateError), + RewardCacheOutOfBounds, IncorrectOpPoolVariant, } +#[derive(Default)] pub struct AttestationStats { /// Total number of attestations for all committeees/indices/votes. pub num_attestations: usize, @@ -176,95 +189,45 @@ impl<T: EthSpec> OperationPool<T> { pub fn insert_attestation( &self, attestation: Attestation<T>, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, + attesting_indices: Vec<u64>, ) -> Result<(), AttestationValidationError> { - let id = AttestationId::from_data(&attestation.data, fork, genesis_validators_root, spec); - - // Take a write lock on the attestations map. - let mut attestations = self.attestations.write(); - - let existing_attestations = match attestations.entry(id) { - Entry::Vacant(entry) => { - entry.insert(vec![attestation]); - return Ok(()); - } - Entry::Occupied(entry) => entry.into_mut(), - }; - - let mut aggregated = false; - for existing_attestation in existing_attestations.iter_mut() { - if existing_attestation.signers_disjoint_from(&attestation) { - existing_attestation.aggregate(&attestation); - aggregated = true; - } else if *existing_attestation == attestation { - aggregated = true; - } - } - - if !aggregated { - existing_attestations.push(attestation); - } - + self.attestations + .write() + .insert(attestation, attesting_indices); Ok(()) } /// Total number of attestations in the pool, including attestations for the same data. pub fn num_attestations(&self) -> usize { - self.attestations.read().values().map(Vec::len).sum() + self.attestation_stats().num_attestations } pub fn attestation_stats(&self) -> AttestationStats { - let mut num_attestations = 0; - let mut num_attestation_data = 0; - let mut max_aggregates_per_data = 0; - - for aggregates in self.attestations.read().values() { - num_attestations += aggregates.len(); - num_attestation_data += 1; - max_aggregates_per_data = std::cmp::max(max_aggregates_per_data, aggregates.len()); - } - AttestationStats { - num_attestations, - num_attestation_data, - max_aggregates_per_data, - } + self.attestations.read().stats() } /// Return all valid attestations for the given epoch, for use in max cover. + #[allow(clippy::too_many_arguments)] fn get_valid_attestations_for_epoch<'a>( &'a self, - epoch: Epoch, - all_attestations: &'a HashMap<AttestationId, Vec<Attestation<T>>>, + checkpoint_key: &'a CheckpointKey, + all_attestations: &'a AttestationMap<T>, state: &'a BeaconState<T>, + reward_cache: &'a RewardCache, total_active_balance: u64, - validity_filter: impl FnMut(&&Attestation<T>) -> bool + Send, + validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, spec: &'a ChainSpec, ) -> impl Iterator<Item = AttMaxCover<'a, T>> + Send { - let domain_bytes = AttestationId::compute_domain_bytes( - epoch, - &state.fork(), - state.genesis_validators_root(), - spec, - ); all_attestations - .iter() - .filter(move |(key, _)| key.domain_bytes_match(&domain_bytes)) - .flat_map(|(_, attestations)| attestations) - .filter(move |attestation| attestation.data.target.epoch == epoch) - .filter(move |attestation| { - // Ensure attestations are valid for block inclusion - verify_attestation_for_block_inclusion( - state, - attestation, - VerifySignatures::False, - spec, - ) - .is_ok() + .get_attestations(checkpoint_key) + .filter(|att| { + att.data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= att.data.slot + T::slots_per_epoch() }) .filter(validity_filter) - .filter_map(move |att| AttMaxCover::new(att, state, total_active_balance, spec)) + .filter_map(move |att| { + AttMaxCover::new(att, state, reward_cache, total_active_balance, spec) + }) } /// Get a list of attestations for inclusion in a block. @@ -276,18 +239,24 @@ impl<T: EthSpec> OperationPool<T> { pub fn get_attestations( &self, state: &BeaconState<T>, - prev_epoch_validity_filter: impl FnMut(&&Attestation<T>) -> bool + Send, - curr_epoch_validity_filter: impl FnMut(&&Attestation<T>) -> bool + Send, + prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, + curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, spec: &ChainSpec, ) -> Result<Vec<Attestation<T>>, OpPoolError> { // Attestations for the current fork, which may be from the current or previous epoch. - let prev_epoch = state.previous_epoch(); - let current_epoch = state.current_epoch(); + let (prev_epoch_key, curr_epoch_key) = CheckpointKey::keys_for_state(state); let all_attestations = self.attestations.read(); let total_active_balance = state .get_total_active_balance() .map_err(OpPoolError::GetAttestationsTotalBalanceError)?; + // Update the reward cache. + let reward_timer = metrics::start_timer(&metrics::BUILD_REWARD_CACHE_TIME); + let mut reward_cache = self.reward_cache.write(); + reward_cache.update(state)?; + let reward_cache = RwLockWriteGuard::downgrade(reward_cache); + drop(reward_timer); + // Split attestations for the previous & current epochs, so that we // can optimise them individually in parallel. let mut num_prev_valid = 0_i64; @@ -295,9 +264,10 @@ impl<T: EthSpec> OperationPool<T> { let prev_epoch_att = self .get_valid_attestations_for_epoch( - prev_epoch, + &prev_epoch_key, &*all_attestations, state, + &*reward_cache, total_active_balance, prev_epoch_validity_filter, spec, @@ -305,9 +275,10 @@ impl<T: EthSpec> OperationPool<T> { .inspect(|_| num_prev_valid += 1); let curr_epoch_att = self .get_valid_attestations_for_epoch( - current_epoch, + &curr_epoch_key, &*all_attestations, state, + &*reward_cache, total_active_balance, curr_epoch_validity_filter, spec, @@ -328,7 +299,7 @@ impl<T: EthSpec> OperationPool<T> { move || { let _timer = metrics::start_timer(&metrics::ATTESTATION_PREV_EPOCH_PACKING_TIME); // If we're in the genesis epoch, just use the current epoch attestations. - if prev_epoch == current_epoch { + if prev_epoch_key == curr_epoch_key { vec![] } else { maximum_cover(prev_epoch_att, prev_epoch_limit, "prev_epoch_attestations") @@ -356,36 +327,26 @@ impl<T: EthSpec> OperationPool<T> { /// Remove attestations which are too old to be included in a block. pub fn prune_attestations(&self, current_epoch: Epoch) { - // Prune attestations that are from before the previous epoch. - self.attestations.write().retain(|_, attestations| { - // All the attestations in this bucket have the same data, so we only need to - // check the first one. - attestations - .first() - .map_or(false, |att| current_epoch <= att.data.target.epoch + 1) - }); + self.attestations.write().prune(current_epoch); } /// Insert a proposer slashing into the pool. pub fn insert_proposer_slashing( &self, - verified_proposer_slashing: SigVerifiedOp<ProposerSlashing>, + verified_proposer_slashing: SigVerifiedOp<ProposerSlashing, T>, ) { - let slashing = verified_proposer_slashing.into_inner(); - self.proposer_slashings - .write() - .insert(slashing.signed_header_1.message.proposer_index, slashing); + self.proposer_slashings.write().insert( + verified_proposer_slashing.as_inner().proposer_index(), + verified_proposer_slashing, + ); } /// Insert an attester slashing into the pool. pub fn insert_attester_slashing( &self, - verified_slashing: SigVerifiedOp<AttesterSlashing<T>>, - fork: Fork, + verified_slashing: SigVerifiedOp<AttesterSlashing<T>, T>, ) { - self.attester_slashings - .write() - .insert((verified_slashing.into_inner(), fork.current_version)); + self.attester_slashings.write().insert(verified_slashing); } /// Get proposer and attester slashings for inclusion in a block. @@ -405,11 +366,13 @@ impl<T: EthSpec> OperationPool<T> { let proposer_slashings = filter_limit_operations( self.proposer_slashings.read().values(), |slashing| { - state - .validators() - .get(slashing.signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed) + slashing.signature_is_still_valid(&state.fork()) + && state + .validators() + .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) + .map_or(false, |validator| !validator.slashed) }, + |slashing| slashing.as_inner().clone(), T::MaxProposerSlashings::to_usize(), ); @@ -417,30 +380,10 @@ impl<T: EthSpec> OperationPool<T> { // slashings. let mut to_be_slashed = proposer_slashings .iter() - .map(|s| s.signed_header_1.message.proposer_index) - .collect::<HashSet<_>>(); + .map(|s| s.proposer_index()) + .collect(); - let reader = self.attester_slashings.read(); - - let relevant_attester_slashings = reader.iter().flat_map(|(slashing, fork)| { - if *fork == state.fork().previous_version || *fork == state.fork().current_version { - AttesterSlashingMaxCover::new(slashing, &to_be_slashed, state) - } else { - None - } - }); - - let attester_slashings = maximum_cover( - relevant_attester_slashings, - T::MaxAttesterSlashings::to_usize(), - "attester_slashings", - ) - .into_iter() - .map(|cover| { - to_be_slashed.extend(cover.covering_set().keys()); - cover.object().clone() - }) - .collect(); + let attester_slashings = self.get_attester_slashings(state, &mut to_be_slashed); let voluntary_exits = self.get_voluntary_exits( state, @@ -451,6 +394,37 @@ impl<T: EthSpec> OperationPool<T> { (proposer_slashings, attester_slashings, voluntary_exits) } + /// Get attester slashings taking into account already slashed validators. + /// + /// This function *must* remain private. + fn get_attester_slashings( + &self, + state: &BeaconState<T>, + to_be_slashed: &mut HashSet<u64>, + ) -> Vec<AttesterSlashing<T>> { + let reader = self.attester_slashings.read(); + + let relevant_attester_slashings = reader.iter().flat_map(|slashing| { + if slashing.signature_is_still_valid(&state.fork()) { + AttesterSlashingMaxCover::new(slashing.as_inner(), to_be_slashed, state) + } else { + None + } + }); + + maximum_cover( + relevant_attester_slashings, + T::MaxAttesterSlashings::to_usize(), + "attester_slashings", + ) + .into_iter() + .map(|cover| { + to_be_slashed.extend(cover.covering_set().keys()); + cover.intermediate().clone() + }) + .collect() + } + /// Prune proposer slashings for validators which are exited in the finalized epoch. pub fn prune_proposer_slashings(&self, head_state: &BeaconState<T>) { prune_validator_hash_map( @@ -463,30 +437,23 @@ impl<T: EthSpec> OperationPool<T> { /// Prune attester slashings for all slashed or withdrawn validators, or attestations on another /// fork. pub fn prune_attester_slashings(&self, head_state: &BeaconState<T>) { - self.attester_slashings - .write() - .retain(|(slashing, fork_version)| { - let previous_fork_is_finalized = - head_state.finalized_checkpoint().epoch >= head_state.fork().epoch; - // Prune any slashings which don't match the current fork version, or the previous - // fork version if it is not finalized yet. - let fork_ok = (*fork_version == head_state.fork().current_version) - || (*fork_version == head_state.fork().previous_version - && !previous_fork_is_finalized); - // Slashings that don't slash any validators can also be dropped. - let slashing_ok = - get_slashable_indices_modular(head_state, slashing, |_, validator| { - // Declare that a validator is still slashable if they have not exited prior - // to the finalized epoch. - // - // We cannot check the `slashed` field since the `head` is not finalized and - // a fork could un-slash someone. - validator.exit_epoch > head_state.finalized_checkpoint().epoch - }) - .map_or(false, |indices| !indices.is_empty()); + self.attester_slashings.write().retain(|slashing| { + // Check that the attestation's signature is still valid wrt the fork version. + let signature_ok = slashing.signature_is_still_valid(&head_state.fork()); + // Slashings that don't slash any validators can also be dropped. + let slashing_ok = + get_slashable_indices_modular(head_state, slashing.as_inner(), |_, validator| { + // Declare that a validator is still slashable if they have not exited prior + // to the finalized epoch. + // + // We cannot check the `slashed` field since the `head` is not finalized and + // a fork could un-slash someone. + validator.exit_epoch > head_state.finalized_checkpoint().epoch + }) + .map_or(false, |indices| !indices.is_empty()); - fork_ok && slashing_ok - }); + signature_ok && slashing_ok + }); } /// Total number of attester slashings in the pool. @@ -500,11 +467,10 @@ impl<T: EthSpec> OperationPool<T> { } /// Insert a voluntary exit that has previously been checked elsewhere. - pub fn insert_voluntary_exit(&self, verified_exit: SigVerifiedOp<SignedVoluntaryExit>) { - let exit = verified_exit.into_inner(); + pub fn insert_voluntary_exit(&self, exit: SigVerifiedOp<SignedVoluntaryExit, T>) { self.voluntary_exits .write() - .insert(exit.message.validator_index, exit); + .insert(exit.as_inner().message.validator_index, exit); } /// Get a list of voluntary exits for inclusion in a block. @@ -519,7 +485,12 @@ impl<T: EthSpec> OperationPool<T> { { filter_limit_operations( self.voluntary_exits.read().values(), - |exit| filter(exit) && verify_exit(state, exit, VerifySignatures::False, spec).is_ok(), + |exit| { + filter(exit.as_inner()) + && exit.signature_is_still_valid(&state.fork()) + && verify_exit(state, exit.as_inner(), VerifySignatures::False, spec).is_ok() + }, + |exit| exit.as_inner().clone(), T::MaxVoluntaryExits::to_usize(), ) } @@ -558,8 +529,8 @@ impl<T: EthSpec> OperationPool<T> { pub fn get_all_attestations(&self) -> Vec<Attestation<T>> { self.attestations .read() - .values() - .flat_map(|attns| attns.iter().cloned()) + .iter() + .map(|att| att.clone_as_attestation()) .collect() } @@ -568,14 +539,13 @@ impl<T: EthSpec> OperationPool<T> { /// This method may return objects that are invalid for block inclusion. pub fn get_filtered_attestations<F>(&self, filter: F) -> Vec<Attestation<T>> where - F: Fn(&Attestation<T>) -> bool, + F: Fn(&AttestationData) -> bool, { self.attestations .read() - .values() - .flat_map(|attns| attns.iter()) - .filter(|attn| filter(*attn)) - .cloned() + .iter() + .filter(|att| filter(&att.attestation_data())) + .map(|att| att.clone_as_attestation()) .collect() } @@ -586,7 +556,7 @@ impl<T: EthSpec> OperationPool<T> { self.attester_slashings .read() .iter() - .map(|(slashing, _)| slashing.clone()) + .map(|slashing| slashing.as_inner().clone()) .collect() } @@ -597,7 +567,7 @@ impl<T: EthSpec> OperationPool<T> { self.proposer_slashings .read() .iter() - .map(|(_, slashing)| slashing.clone()) + .map(|(_, slashing)| slashing.as_inner().clone()) .collect() } @@ -608,23 +578,29 @@ impl<T: EthSpec> OperationPool<T> { self.voluntary_exits .read() .iter() - .map(|(_, exit)| exit.clone()) + .map(|(_, exit)| exit.as_inner().clone()) .collect() } } /// Filter up to a maximum number of operations out of an iterator. -fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: usize) -> Vec<T> +fn filter_limit_operations<'a, T: 'a, V: 'a, I, F, G>( + operations: I, + filter: F, + mapping: G, + limit: usize, +) -> Vec<V> where I: IntoIterator<Item = &'a T>, F: Fn(&T) -> bool, + G: Fn(&T) -> V, T: Clone, { operations .into_iter() .filter(|x| filter(*x)) .take(limit) - .cloned() + .map(mapping) .collect() } @@ -634,17 +610,19 @@ where /// in the state's validator registry and then passed to `prune_if`. /// Entries for unknown validators will be kept. fn prune_validator_hash_map<T, F, E: EthSpec>( - map: &mut HashMap<u64, T>, + map: &mut HashMap<u64, SigVerifiedOp<T, E>>, prune_if: F, head_state: &BeaconState<E>, ) where F: Fn(&Validator) -> bool, + T: VerifyOperation<E>, { - map.retain(|&validator_index, _| { - head_state - .validators() - .get(validator_index as usize) - .map_or(true, |validator| !prune_if(validator)) + map.retain(|&validator_index, op| { + op.signature_is_still_valid(&head_state.fork()) + && head_state + .validators() + .get(validator_index as usize) + .map_or(true, |validator| !prune_if(validator)) }); } @@ -655,6 +633,7 @@ impl<T: EthSpec + Default> PartialEq for OperationPool<T> { return true; } *self.attestations.read() == *other.attestations.read() + && *self.sync_contributions.read() == *other.sync_contributions.read() && *self.attester_slashings.read() == *other.attester_slashings.read() && *self.proposer_slashings.read() == *other.proposer_slashings.read() && *self.voluntary_exits.read() == *other.voluntary_exits.read() @@ -669,7 +648,8 @@ mod release_tests { test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, }; use lazy_static::lazy_static; - use state_processing::VerifyOperation; + use maplit::hashset; + use state_processing::{common::get_attesting_indices_from_state, VerifyOperation}; use std::collections::BTreeSet; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::*; @@ -689,6 +669,7 @@ mod release_tests { .spec_or_default(spec) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() + .mock_execution_layer() .build(); harness.advance_slot(); @@ -714,7 +695,6 @@ mod release_tests { num_committees: usize, ) -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec) { let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); let num_validators = @@ -780,10 +760,19 @@ mod release_tests { }) .unwrap(); + let att1_indices = get_attesting_indices_from_state(&state, &att1).unwrap(); + let att2_indices = get_attesting_indices_from_state(&state, &att2).unwrap(); + let att1_split = SplitAttestation::new(att1.clone(), att1_indices); + let att2_split = SplitAttestation::new(att2.clone(), att2_indices); + assert_eq!( att1.aggregation_bits.num_set_bits(), - earliest_attestation_validators(&att1, &state, state.as_base().unwrap()) - .num_set_bits() + earliest_attestation_validators( + &att1_split.as_ref(), + &state, + state.as_base().unwrap() + ) + .num_set_bits() ); state @@ -800,8 +789,12 @@ mod release_tests { assert_eq!( committees.get(0).unwrap().committee.len() - 2, - earliest_attestation_validators(&att2, &state, state.as_base().unwrap()) - .num_set_bits() + earliest_attestation_validators( + &att2_split.as_ref(), + &state, + state.as_base().unwrap() + ) + .num_set_bits() ); } } @@ -840,14 +833,12 @@ mod release_tests { ); for (atts, _) in attestations { - for att in atts.into_iter() { - op_pool - .insert_attestation(att.0, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + for (att, _) in atts { + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } } - assert_eq!(op_pool.attestations.read().len(), committees.len()); assert_eq!(op_pool.num_attestations(), committees.len()); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. @@ -913,17 +904,11 @@ mod release_tests { for (_, aggregate) in attestations { let att = aggregate.unwrap().message.aggregate; + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); op_pool - .insert_attestation( - att.clone(), - &state.fork(), - state.genesis_validators_root(), - spec, - ) - .unwrap(); - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) + .insert_attestation(att.clone(), attesting_indices.clone()) .unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } assert_eq!(op_pool.num_attestations(), committees.len()); @@ -1007,16 +992,17 @@ mod release_tests { .collect::<Vec<_>>(); for att in aggs1.into_iter().chain(aggs2.into_iter()) { - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } } // The attestations should get aggregated into two attestations that comprise all // validators. - assert_eq!(op_pool.attestations.read().len(), committees.len()); - assert_eq!(op_pool.num_attestations(), 2 * committees.len()); + let stats = op_pool.attestation_stats(); + assert_eq!(stats.num_attestation_data, committees.len()); + assert_eq!(stats.num_attestations, 2 * committees.len()); + assert_eq!(stats.max_aggregates_per_data, 2); } /// Create a bunch of attestations signed by a small number of validators, and another @@ -1078,9 +1064,8 @@ mod release_tests { .collect::<Vec<_>>(); for att in aggs { - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } }; @@ -1095,12 +1080,13 @@ mod release_tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; - assert_eq!(op_pool.attestations.read().len(), committees.len()); + let stats = op_pool.attestation_stats(); + assert_eq!(stats.num_attestation_data, committees.len()); assert_eq!( - op_pool.num_attestations(), + stats.num_attestations, (num_small + num_big) * committees.len() ); - assert!(op_pool.num_attestations() > max_attestations); + assert!(stats.num_attestations > max_attestations); *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool @@ -1173,9 +1159,8 @@ mod release_tests { .collect::<Vec<_>>(); for att in aggs { - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } }; @@ -1190,7 +1175,10 @@ mod release_tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; - assert_eq!(op_pool.attestations.read().len(), committees.len()); + assert_eq!( + op_pool.attestation_stats().num_attestation_data, + committees.len() + ); assert_eq!( op_pool.num_attestations(), (num_small + num_big) * committees.len() @@ -1210,11 +1198,21 @@ mod release_tests { // Used for asserting that rewards are in decreasing order. let mut prev_reward = u64::max_value(); - for att in &best_attestations { - let mut fresh_validators_rewards = - AttMaxCover::new(att, &state, total_active_balance, spec) - .unwrap() - .fresh_validators_rewards; + let mut reward_cache = RewardCache::default(); + reward_cache.update(&state).unwrap(); + + for att in best_attestations { + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let split_attestation = SplitAttestation::new(att, attesting_indices); + let mut fresh_validators_rewards = AttMaxCover::new( + split_attestation.as_ref(), + &state, + &reward_cache, + total_active_balance, + spec, + ) + .unwrap() + .fresh_validators_rewards; // Remove validators covered by previous attestations. fresh_validators_rewards @@ -1281,10 +1279,7 @@ mod release_tests { let op_pool = OperationPool::<MainnetEthSpec>::new(); let slashing = harness.make_attester_slashing(vec![1, 3, 5, 7, 9]); - op_pool.insert_attester_slashing( - slashing.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing.clone().validate(&state, spec).unwrap()); op_pool.prune_attester_slashings(&state); assert_eq!( op_pool.get_slashings_and_exits(&state, &harness.spec).1, @@ -1305,22 +1300,10 @@ mod release_tests { let slashing_3 = harness.make_attester_slashing(vec![4, 5, 6]); let slashing_4 = harness.make_attester_slashing(vec![7, 8, 9, 10]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_4.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_4.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_4, slashing_3]); @@ -1339,22 +1322,10 @@ mod release_tests { let slashing_3 = harness.make_attester_slashing(vec![5, 6]); let slashing_4 = harness.make_attester_slashing(vec![6]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_4.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_4.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]); @@ -1374,18 +1345,9 @@ mod release_tests { let a_slashing_3 = harness.make_attester_slashing(vec![5, 6]); op_pool.insert_proposer_slashing(p_slashing.clone().validate(&state, spec).unwrap()); - op_pool.insert_attester_slashing( - a_slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - a_slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - a_slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(a_slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(a_slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(a_slashing_3.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![a_slashing_1, a_slashing_3]); @@ -1406,18 +1368,9 @@ mod release_tests { let slashing_2 = harness.make_attester_slashing(vec![5, 6]); let slashing_3 = harness.make_attester_slashing(vec![1, 2, 3]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]); @@ -1438,18 +1391,9 @@ mod release_tests { let slashing_2 = harness.make_attester_slashing(vec![4, 5, 6]); let slashing_3 = harness.make_attester_slashing(vec![7, 8]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_2, slashing_3]); @@ -1718,4 +1662,289 @@ mod release_tests { expected_bits ); } + + fn cross_fork_harness<E: EthSpec>() -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec) + { + let mut spec = test_spec::<E>(); + + // Give some room to sign surround slashings. + spec.altair_fork_epoch = Some(Epoch::new(3)); + spec.bellatrix_fork_epoch = Some(Epoch::new(6)); + + // To make exits immediately valid. + spec.shard_committee_period = 0; + + let num_validators = 32; + + let harness = get_harness::<E>(num_validators, Some(spec.clone())); + (harness, spec) + } + + /// Test several cross-fork voluntary exits: + /// + /// - phase0 exit (not valid after Bellatrix) + /// - phase0 exit signed with Altair fork version (only valid after Bellatrix) + #[tokio::test] + async fn cross_fork_exits() { + let (harness, spec) = cross_fork_harness::<MainnetEthSpec>(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); + let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + + let op_pool = OperationPool::<MainnetEthSpec>::new(); + + // Sign an exit in phase0 with a phase0 epoch. + let exit1 = harness.make_voluntary_exit(0, Epoch::new(0)); + + // Advance to Altair. + harness + .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .await; + let altair_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + + // Add exit 1 to the op pool during Altair. It's still valid at this point and should be + // returned. + let verified_exit1 = exit1 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_voluntary_exit(verified_exit1); + let exits = + op_pool.get_voluntary_exits(&altair_head.beacon_state, |_| true, &harness.chain.spec); + assert!(exits.contains(&exit1)); + assert_eq!(exits.len(), 1); + + // Advance to Bellatrix. + harness + .extend_to_slot(bellatrix_fork_epoch.start_slot(slots_per_epoch)) + .await; + let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!( + bellatrix_head.beacon_state.current_epoch(), + bellatrix_fork_epoch + ); + + // Sign an exit with the Altair domain and a phase0 epoch. This is a weird type of exit + // that is valid because after the Bellatrix fork we'll use the Altair fork domain to verify + // all prior epochs. + let exit2 = harness.make_voluntary_exit(2, Epoch::new(0)); + let verified_exit2 = exit2 + .clone() + .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_voluntary_exit(verified_exit2); + + // Attempting to fetch exit1 now should fail, despite it still being in the pool. + // exit2 should still be valid, because it was signed with the Altair fork domain. + assert_eq!(op_pool.voluntary_exits.read().len(), 2); + let exits = + op_pool.get_voluntary_exits(&bellatrix_head.beacon_state, |_| true, &harness.spec); + assert_eq!(&exits, &[exit2]); + } + + /// Test several cross-fork proposer slashings: + /// + /// - phase0 slashing (not valid after Bellatrix) + /// - Bellatrix signed with Altair fork version (not valid after Bellatrix) + /// - phase0 exit signed with Altair fork version (only valid after Bellatrix) + #[tokio::test] + async fn cross_fork_proposer_slashings() { + let (harness, spec) = cross_fork_harness::<MainnetEthSpec>(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); + let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(slots_per_epoch); + + let op_pool = OperationPool::<MainnetEthSpec>::new(); + + // Sign a proposer slashing in phase0 with a phase0 epoch. + let slashing1 = harness.make_proposer_slashing_at_slot(0, Some(Slot::new(1))); + + // Advance to Altair. + harness + .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .await; + let altair_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + + // Add slashing1 to the op pool during Altair. It's still valid at this point and should be + // returned. + let verified_slashing1 = slashing1 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_proposer_slashing(verified_slashing1); + let (proposer_slashings, _, _) = + op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + assert!(proposer_slashings.contains(&slashing1)); + assert_eq!(proposer_slashings.len(), 1); + + // Sign a proposer slashing with a Bellatrix slot using the Altair fork domain. + // + // This slashing is valid only before the Bellatrix fork epoch. + let slashing2 = harness.make_proposer_slashing_at_slot(1, Some(bellatrix_fork_slot)); + let verified_slashing2 = slashing2 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_proposer_slashing(verified_slashing2); + let (proposer_slashings, _, _) = + op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + assert!(proposer_slashings.contains(&slashing1)); + assert!(proposer_slashings.contains(&slashing2)); + assert_eq!(proposer_slashings.len(), 2); + + // Advance to Bellatrix. + harness.extend_to_slot(bellatrix_fork_slot).await; + let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!( + bellatrix_head.beacon_state.current_epoch(), + bellatrix_fork_epoch + ); + + // Sign a proposer slashing with the Altair domain and a phase0 slot. This is a weird type + // of slashing that is only valid after the Bellatrix fork because we'll use the Altair fork + // domain to verify all prior epochs. + let slashing3 = harness.make_proposer_slashing_at_slot(2, Some(Slot::new(1))); + let verified_slashing3 = slashing3 + .clone() + .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_proposer_slashing(verified_slashing3); + + // Attempting to fetch slashing1 now should fail, despite it still being in the pool. + // Likewise slashing2 is also invalid now because it should be signed with the + // Bellatrix fork version. + // slashing3 should still be valid, because it was signed with the Altair fork domain. + assert_eq!(op_pool.proposer_slashings.read().len(), 3); + let (proposer_slashings, _, _) = + op_pool.get_slashings_and_exits(&bellatrix_head.beacon_state, &harness.spec); + assert!(proposer_slashings.contains(&slashing3)); + assert_eq!(proposer_slashings.len(), 1); + } + + /// Test several cross-fork attester slashings: + /// + /// - both target epochs in phase0 (not valid after Bellatrix) + /// - both target epochs in Bellatrix but signed with Altair domain (not valid after Bellatrix) + /// - Altair attestation that surrounds a phase0 attestation (not valid after Bellatrix) + /// - both target epochs in phase0 but signed with Altair domain (only valid after Bellatrix) + #[tokio::test] + async fn cross_fork_attester_slashings() { + let (harness, spec) = cross_fork_harness::<MainnetEthSpec>(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + let zero_epoch = Epoch::new(0); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); + let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(slots_per_epoch); + + let op_pool = OperationPool::<MainnetEthSpec>::new(); + + // Sign an attester slashing with the phase0 fork version, with both target epochs in phase0. + let slashing1 = harness.make_attester_slashing_with_epochs( + vec![0], + None, + Some(zero_epoch), + None, + Some(zero_epoch), + ); + + // Advance to Altair. + harness + .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .await; + let altair_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + + // Add slashing1 to the op pool during Altair. It's still valid at this point and should be + // returned. + let verified_slashing1 = slashing1 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing1); + + // Sign an attester slashing with two Bellatrix epochs using the Altair fork domain. + // + // This slashing is valid only before the Bellatrix fork epoch. + let slashing2 = harness.make_attester_slashing_with_epochs( + vec![1], + None, + Some(bellatrix_fork_epoch), + None, + Some(bellatrix_fork_epoch), + ); + let verified_slashing2 = slashing2 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing2); + let (_, attester_slashings, _) = + op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + assert!(attester_slashings.contains(&slashing1)); + assert!(attester_slashings.contains(&slashing2)); + assert_eq!(attester_slashings.len(), 2); + + // Sign an attester slashing where an Altair attestation surrounds a phase0 one. + // + // This slashing is valid only before the Bellatrix fork epoch. + let slashing3 = harness.make_attester_slashing_with_epochs( + vec![2], + Some(Epoch::new(0)), + Some(altair_fork_epoch), + Some(Epoch::new(1)), + Some(altair_fork_epoch - 1), + ); + let verified_slashing3 = slashing3 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing3); + + // All three slashings should be valid and returned from the pool at this point. + // Seeing as we can only extract 2 at time we'll just pretend that validator 0 is already + // slashed. + let mut to_be_slashed = hashset! {0}; + let attester_slashings = + op_pool.get_attester_slashings(&altair_head.beacon_state, &mut to_be_slashed); + assert!(attester_slashings.contains(&slashing2)); + assert!(attester_slashings.contains(&slashing3)); + assert_eq!(attester_slashings.len(), 2); + + // Advance to Bellatrix. + harness.extend_to_slot(bellatrix_fork_slot).await; + let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!( + bellatrix_head.beacon_state.current_epoch(), + bellatrix_fork_epoch + ); + + // Sign an attester slashing with the Altair domain and phase0 epochs. This is a weird type + // of slashing that is only valid after the Bellatrix fork because we'll use the Altair fork + // domain to verify all prior epochs. + let slashing4 = harness.make_attester_slashing_with_epochs( + vec![3], + Some(Epoch::new(0)), + Some(altair_fork_epoch - 1), + Some(Epoch::new(0)), + Some(altair_fork_epoch - 1), + ); + let verified_slashing4 = slashing4 + .clone() + .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing4); + + // All slashings except slashing4 are now invalid (despite being present in the pool). + assert_eq!(op_pool.attester_slashings.read().len(), 4); + let (_, attester_slashings, _) = + op_pool.get_slashings_and_exits(&bellatrix_head.beacon_state, &harness.spec); + assert!(attester_slashings.contains(&slashing4)); + assert_eq!(attester_slashings.len(), 1); + + // Pruning the attester slashings should remove all but slashing4. + op_pool.prune_attester_slashings(&bellatrix_head.beacon_state); + assert_eq!(op_pool.attester_slashings.read().len(), 1); + } } diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index 8e50b8152e..2e629f786b 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -11,16 +11,21 @@ use itertools::Itertools; pub trait MaxCover: Clone { /// The result type, of which we would eventually like a collection of maximal quality. type Object: Clone; + /// The intermediate object type, which can be converted to `Object`. + type Intermediate: Clone; /// The type used to represent sets. type Set: Clone; - /// Extract an object for inclusion in a solution. - fn object(&self) -> &Self::Object; + /// Extract the intermediate object. + fn intermediate(&self) -> &Self::Intermediate; + + /// Convert the borrowed intermediate object to an owned object for the solution. + fn convert_to_object(intermediate: &Self::Intermediate) -> Self::Object; /// Get the set of elements covered. fn covering_set(&self) -> &Self::Set; /// Update the set of items covered, for the inclusion of some object in the solution. - fn update_covering_set(&mut self, max_obj: &Self::Object, max_set: &Self::Set); + fn update_covering_set(&mut self, max_obj: &Self::Intermediate, max_set: &Self::Set); /// The quality of this item's covering set, usually its cardinality. fn score(&self) -> usize; } @@ -86,7 +91,7 @@ where .filter(|x| x.available && x.item.score() != 0) .for_each(|x| { x.item - .update_covering_set(best.object(), best.covering_set()) + .update_covering_set(best.intermediate(), best.covering_set()) }); result.push(best); @@ -106,7 +111,7 @@ where .into_iter() .merge_by(cover2, |item1, item2| item1.score() >= item2.score()) .take(limit) - .map(|item| item.object().clone()) + .map(|item| T::convert_to_object(item.intermediate())) .collect() } @@ -121,12 +126,17 @@ mod test { T: Clone + Eq + Hash, { type Object = Self; + type Intermediate = Self; type Set = Self; - fn object(&self) -> &Self { + fn intermediate(&self) -> &Self { self } + fn convert_to_object(set: &Self) -> Self { + set.clone() + } + fn covering_set(&self) -> &Self { self } diff --git a/beacon_node/operation_pool/src/metrics.rs b/beacon_node/operation_pool/src/metrics.rs index 3fa5208a3d..6fd8567cef 100644 --- a/beacon_node/operation_pool/src/metrics.rs +++ b/beacon_node/operation_pool/src/metrics.rs @@ -3,6 +3,10 @@ use lazy_static::lazy_static; pub use lighthouse_metrics::*; lazy_static! { + pub static ref BUILD_REWARD_CACHE_TIME: Result<Histogram> = try_create_histogram( + "op_pool_build_reward_cache_time", + "Time to build the reward cache before packing attestations" + ); pub static ref ATTESTATION_PREV_EPOCH_PACKING_TIME: Result<Histogram> = try_create_histogram( "op_pool_attestation_prev_epoch_packing_time", "Time to pack previous epoch attestations" diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 0769786097..ed15369df7 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,12 +1,13 @@ use crate::attestation_id::AttestationId; +use crate::attestation_storage::AttestationMap; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; use derivative::Derivative; use parking_lot::RwLock; -use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use state_processing::SigVerifiedOp; use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; @@ -17,32 +18,42 @@ type PersistedSyncContributions<T> = Vec<(SyncAggregateId, Vec<SyncCommitteeCont /// Operations are stored in arbitrary order, so it's not a good idea to compare instances /// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first. #[superstruct( - variants(Altair), + variants(V5, V12), variant_attributes( - derive(Derivative, PartialEq, Debug, Serialize, Deserialize, Encode, Decode), - serde(bound = "T: EthSpec", deny_unknown_fields), + derive(Derivative, PartialEq, Debug, Encode, Decode), derivative(Clone), ), partial_getter_error(ty = "OpPoolError", expr = "OpPoolError::IncorrectOpPoolVariant") )] -#[derive(PartialEq, Debug, Serialize, Deserialize, Encode)] -#[serde(untagged)] -#[serde(bound = "T: EthSpec")] +#[derive(PartialEq, Debug, Encode)] #[ssz(enum_behaviour = "transparent")] pub struct PersistedOperationPool<T: EthSpec> { - /// Mapping from attestation ID to attestation mappings. - // We could save space by not storing the attestation ID, but it might - // be difficult to make that roundtrip due to eager aggregation. - attestations: Vec<(AttestationId, Vec<Attestation<T>>)>, + /// [DEPRECATED] Mapping from attestation ID to attestation mappings. + #[superstruct(only(V5))] + pub attestations_v5: Vec<(AttestationId, Vec<Attestation<T>>)>, + /// Attestations and their attesting indices. + #[superstruct(only(V12))] + pub attestations: Vec<(Attestation<T>, Vec<u64>)>, /// Mapping from sync contribution ID to sync contributions and aggregate. - #[superstruct(only(Altair))] - sync_contributions: PersistedSyncContributions<T>, + pub sync_contributions: PersistedSyncContributions<T>, + /// [DEPRECATED] Attester slashings. + #[superstruct(only(V5))] + pub attester_slashings_v5: Vec<(AttesterSlashing<T>, ForkVersion)>, /// Attester slashings. - attester_slashings: Vec<(AttesterSlashing<T>, ForkVersion)>, - /// Proposer slashings. - proposer_slashings: Vec<ProposerSlashing>, - /// Voluntary exits. - voluntary_exits: Vec<SignedVoluntaryExit>, + #[superstruct(only(V12))] + pub attester_slashings: Vec<SigVerifiedOp<AttesterSlashing<T>, T>>, + /// [DEPRECATED] Proposer slashings. + #[superstruct(only(V5))] + pub proposer_slashings_v5: Vec<ProposerSlashing>, + /// Proposer slashings with fork information. + #[superstruct(only(V12))] + pub proposer_slashings: Vec<SigVerifiedOp<ProposerSlashing, T>>, + /// [DEPRECATED] Voluntary exits. + #[superstruct(only(V5))] + pub voluntary_exits_v5: Vec<SignedVoluntaryExit>, + /// Voluntary exits with fork information. + #[superstruct(only(V12))] + pub voluntary_exits: Vec<SigVerifiedOp<SignedVoluntaryExit, T>>, } impl<T: EthSpec> PersistedOperationPool<T> { @@ -52,7 +63,12 @@ impl<T: EthSpec> PersistedOperationPool<T> { .attestations .read() .iter() - .map(|(att_id, att)| (att_id.clone(), att.clone())) + .map(|att| { + ( + att.clone_as_attestation(), + att.indexed.attesting_indices.clone(), + ) + }) .collect(); let sync_contributions = operation_pool @@ -83,7 +99,7 @@ impl<T: EthSpec> PersistedOperationPool<T> { .map(|(_, exit)| exit.clone()) .collect(); - PersistedOperationPool::Altair(PersistedOperationPoolAltair { + PersistedOperationPool::V12(PersistedOperationPoolV12 { attestations, sync_contributions, attester_slashings, @@ -92,45 +108,62 @@ impl<T: EthSpec> PersistedOperationPool<T> { }) } - /// Reconstruct an `OperationPool`. Sets `sync_contributions` to its `Default` if `self` matches - /// `PersistedOperationPool::Base`. + /// Reconstruct an `OperationPool`. pub fn into_operation_pool(self) -> Result<OperationPool<T>, OpPoolError> { - let attestations = RwLock::new(self.attestations().iter().cloned().collect()); - let attester_slashings = RwLock::new(self.attester_slashings().iter().cloned().collect()); + let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); let proposer_slashings = RwLock::new( - self.proposer_slashings() + self.proposer_slashings()? .iter() .cloned() - .map(|slashing| (slashing.signed_header_1.message.proposer_index, slashing)) + .map(|slashing| (slashing.as_inner().proposer_index(), slashing)) .collect(), ); let voluntary_exits = RwLock::new( - self.voluntary_exits() + self.voluntary_exits()? .iter() .cloned() - .map(|exit| (exit.message.validator_index, exit)) + .map(|exit| (exit.as_inner().message.validator_index, exit)) .collect(), ); - let op_pool = match self { - PersistedOperationPool::Altair(_) => { - let sync_contributions = - RwLock::new(self.sync_contributions()?.iter().cloned().collect()); - - OperationPool { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - _phantom: Default::default(), + let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); + let attestations = match self { + PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant), + PersistedOperationPool::V12(pool) => { + let mut map = AttestationMap::default(); + for (att, attesting_indices) in pool.attestations { + map.insert(att, attesting_indices); } + RwLock::new(map) } }; + let op_pool = OperationPool { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + reward_cache: Default::default(), + _phantom: Default::default(), + }; Ok(op_pool) } } -/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::Altair`. +impl<T: EthSpec> StoreItem for PersistedOperationPoolV5<T> { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Vec<u8> { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> { + PersistedOperationPoolV5::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl<T: EthSpec> StoreItem for PersistedOperationPool<T> { fn db_column() -> DBColumn { DBColumn::OpPool @@ -141,9 +174,9 @@ impl<T: EthSpec> StoreItem for PersistedOperationPool<T> { } fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> { - // Default deserialization to the Altair variant. - PersistedOperationPoolAltair::from_ssz_bytes(bytes) - .map(Self::Altair) + // Default deserialization to the latest variant. + PersistedOperationPoolV12::from_ssz_bytes(bytes) + .map(Self::V12) .map_err(Into::into) } } diff --git a/beacon_node/operation_pool/src/reward_cache.rs b/beacon_node/operation_pool/src/reward_cache.rs new file mode 100644 index 0000000000..5b9d4258e9 --- /dev/null +++ b/beacon_node/operation_pool/src/reward_cache.rs @@ -0,0 +1,122 @@ +use crate::OpPoolError; +use bitvec::vec::BitVec; +use types::{BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, ParticipationFlags}; + +#[derive(Debug, PartialEq, Eq, Clone)] +struct Initialization { + current_epoch: Epoch, + latest_block_root: Hash256, +} + +/// Cache to store pre-computed information for block proposal. +#[derive(Debug, Clone, Default)] +pub struct RewardCache { + initialization: Option<Initialization>, + /// `BitVec` of validator indices which don't have default participation flags for the prev. epoch. + /// + /// We choose to only track whether validators have *any* participation flag set because + /// it's impossible to include a new attestation which is better than the existing participation + /// UNLESS the validator makes a slashable attestation, and we assume that this is rare enough + /// that it's acceptable to be slightly sub-optimal in this case. + previous_epoch_participation: BitVec, + /// `BitVec` of validator indices which don't have default participation flags for the current epoch. + current_epoch_participation: BitVec, +} + +impl RewardCache { + pub fn has_attested_in_epoch( + &self, + validator_index: u64, + epoch: Epoch, + ) -> Result<bool, OpPoolError> { + if let Some(init) = &self.initialization { + if init.current_epoch == epoch { + Ok(*self + .current_epoch_participation + .get(validator_index as usize) + .ok_or(OpPoolError::RewardCacheOutOfBounds)?) + } else if init.current_epoch == epoch + 1 { + Ok(*self + .previous_epoch_participation + .get(validator_index as usize) + .ok_or(OpPoolError::RewardCacheOutOfBounds)?) + } else { + Err(OpPoolError::RewardCacheWrongEpoch) + } + } else { + Err(OpPoolError::RewardCacheWrongEpoch) + } + } + + /// Return the root of the latest block applied to `state`. + /// + /// For simplicity at genesis we return the zero hash, which will cause one unnecessary + /// re-calculation in `update`. + fn latest_block_root<E: EthSpec>(state: &BeaconState<E>) -> Result<Hash256, OpPoolError> { + if state.slot() == 0 { + Ok(Hash256::zero()) + } else { + Ok(*state + .get_block_root(state.slot() - 1) + .map_err(OpPoolError::RewardCacheGetBlockRoot)?) + } + } + + /// Update the cache. + pub fn update<E: EthSpec>(&mut self, state: &BeaconState<E>) -> Result<(), OpPoolError> { + if matches!(state, BeaconState::Base(_)) { + return Ok(()); + } + + let current_epoch = state.current_epoch(); + let latest_block_root = Self::latest_block_root(state)?; + + let new_init = Initialization { + current_epoch, + latest_block_root, + }; + + // The participation flags change every block, and will almost always need updating when + // this function is called at a new slot. + if self + .initialization + .as_ref() + .map_or(true, |init| *init != new_init) + { + self.update_previous_epoch_participation(state) + .map_err(OpPoolError::RewardCacheUpdatePrevEpoch)?; + self.update_current_epoch_participation(state) + .map_err(OpPoolError::RewardCacheUpdateCurrEpoch)?; + + self.initialization = Some(new_init); + } + + Ok(()) + } + + fn update_previous_epoch_participation<E: EthSpec>( + &mut self, + state: &BeaconState<E>, + ) -> Result<(), BeaconStateError> { + let default_participation = ParticipationFlags::default(); + self.previous_epoch_participation = state + .previous_epoch_participation()? + .iter() + .map(|participation| *participation != default_participation) + .collect(); + Ok(()) + } + + fn update_current_epoch_participation<E: EthSpec>( + &mut self, + state: &BeaconState<E>, + ) -> Result<(), BeaconStateError> { + let default_participation = ParticipationFlags::default(); + self.current_epoch_participation = state + .current_epoch_participation()? + .iter() + .map(|participation| *participation != default_participation) + .collect(); + Ok(()) + } +} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index bbb904717b..3df95a0a5d 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -727,6 +727,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("250") .takes_value(true) ) + .arg( + Arg::with_name("paranoid-block-proposal") + .long("paranoid-block-proposal") + .help("Paranoid enough to be reading the source? Nice. This flag reverts some \ + block proposal optimisations and forces the node to check every attestation \ + it includes super thoroughly. This may be useful in an emergency, but not \ + otherwise.") + .hidden(true) + .takes_value(false) + ) .arg( Arg::with_name("builder-fallback-skips") .long("builder-fallback-skips") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e885275b04..f08981b103 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -644,6 +644,8 @@ pub fn get_config<E: EthSpec>( client_config.chain.count_unrealized = clap_utils::parse_required(cli_args, "count-unrealized")?; + client_config.chain.paranoid_block_proposal = cli_args.is_present("paranoid-block-proposal"); + /* * Builder fallback configs. */ diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index d72dbcd23d..4f35c4c072 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(11); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(12); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index de7ced1331..c31e373b48 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -23,6 +23,7 @@ validator client or the slasher**. | v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | | v2.5.0 | Aug 2022 | v11 | yes | | v3.0.0 | Aug 2022 | v11 | yes | +| v3.1.0 | Sep 2022 | v12 | yes | > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > (e.g. v2.3.0). diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index c7ed4b308d..46ac2bae57 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -14,6 +14,7 @@ bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" eth2_ssz = "0.4.1" +eth2_ssz_derive = "0.3.0" eth2_ssz_types = "0.2.2" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } @@ -26,6 +27,7 @@ smallvec = "1.6.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } lighthouse_metrics = { path = "../../common/lighthouse_metrics", optional = true } lazy_static = { version = "1.4.0", optional = true } +derivative = "2.1.1" [features] default = ["legacy-arith", "metrics"] diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index fb636f861e..d7d02c3601 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -1,12 +1,10 @@ use types::*; /// Returns validator indices which participated in the attestation, sorted by increasing index. -/// -/// Spec v0.12.1 pub fn get_attesting_indices<T: EthSpec>( committee: &[usize], bitlist: &BitList<T::MaxValidatorsPerCommittee>, -) -> Result<Vec<usize>, BeaconStateError> { +) -> Result<Vec<u64>, BeaconStateError> { if bitlist.len() != committee.len() { return Err(BeaconStateError::InvalidBitfield); } @@ -15,7 +13,7 @@ pub fn get_attesting_indices<T: EthSpec>( for (i, validator_index) in committee.iter().enumerate() { if let Ok(true) = bitlist.get(i) { - indices.push(*validator_index) + indices.push(*validator_index as u64) } } @@ -23,3 +21,12 @@ pub fn get_attesting_indices<T: EthSpec>( Ok(indices) } + +/// Shortcut for getting the attesting indices while fetching the committee from the state's cache. +pub fn get_attesting_indices_from_state<T: EthSpec>( + state: &BeaconState<T>, + att: &Attestation<T>, +) -> Result<Vec<u64>, BeaconStateError> { + let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; + get_attesting_indices::<T>(committee.committee, &att.aggregation_bits) +} diff --git a/consensus/state_processing/src/common/get_indexed_attestation.rs b/consensus/state_processing/src/common/get_indexed_attestation.rs index daa1c09307..63f63698e4 100644 --- a/consensus/state_processing/src/common/get_indexed_attestation.rs +++ b/consensus/state_processing/src/common/get_indexed_attestation.rs @@ -14,9 +14,7 @@ pub fn get_indexed_attestation<T: EthSpec>( let attesting_indices = get_attesting_indices::<T>(committee, &attestation.aggregation_bits)?; Ok(IndexedAttestation { - attesting_indices: VariableList::new( - attesting_indices.into_iter().map(|x| x as u64).collect(), - )?, + attesting_indices: VariableList::new(attesting_indices)?, data: attestation.data.clone(), signature: attestation.signature.clone(), }) diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 334a293ed5..8a2e2439bb 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -10,7 +10,7 @@ pub mod base; pub use deposit_data_tree::DepositDataTree; pub use get_attestation_participation::get_attestation_participation_flag_indices; -pub use get_attesting_indices::get_attesting_indices; +pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_from_state}; pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index b40f91ce5a..26d2536e5f 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -278,8 +278,8 @@ impl ValidatorStatuses { // Loop through the participating validator indices and update the status vec. for validator_index in attesting_indices { self.statuses - .get_mut(validator_index) - .ok_or(BeaconStateError::UnknownValidator(validator_index))? + .get_mut(validator_index as usize) + .ok_or(BeaconStateError::UnknownValidator(validator_index as usize))? .update(&status); } } diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 5e4fcbcf55..176f1af15c 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -32,8 +32,8 @@ pub fn translate_participation<E: EthSpec>( for index in attesting_indices { for flag_index in &participation_flag_indices { epoch_participation - .get_mut(index) - .ok_or(Error::UnknownValidator(index))? + .get_mut(index as usize) + .ok_or(Error::UnknownValidator(index as usize))? .add_flag(*flag_index)?; } } diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 25c2839edd..80dee28f62 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -5,36 +5,120 @@ use crate::per_block_processing::{ verify_attester_slashing, verify_exit, verify_proposer_slashing, }; use crate::VerifySignatures; +use derivative::Derivative; +use smallvec::{smallvec, SmallVec}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::marker::PhantomData; use types::{ - AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit, + AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, + SignedVoluntaryExit, }; +const MAX_FORKS_VERIFIED_AGAINST: usize = 2; + /// Wrapper around an operation type that acts as proof that its signature has been checked. /// -/// The inner field is private, meaning instances of this type can only be constructed +/// The inner `op` field is private, meaning instances of this type can only be constructed /// by calling `validate`. -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct SigVerifiedOp<T>(T); +#[derive(Derivative, Debug, Clone, Encode, Decode)] +#[derivative( + PartialEq, + Eq, + Hash(bound = "T: Encode + Decode + std::hash::Hash, E: EthSpec") +)] +pub struct SigVerifiedOp<T: Encode + Decode, E: EthSpec> { + op: T, + verified_against: VerifiedAgainst, + #[ssz(skip_serializing, skip_deserializing)] + _phantom: PhantomData<E>, +} + +/// Information about the fork versions that this message was verified against. +/// +/// In general it is not safe to assume that a `SigVerifiedOp` constructed at some point in the past +/// will continue to be valid in the presence of a changing `state.fork()`. The reason for this +/// is that the fork versions that the message's epochs map to might change. +/// +/// For example a proposer slashing at a phase0 slot verified against an Altair state will use +/// the phase0 fork version, but will become invalid once the Bellatrix fork occurs because that +/// slot will start to map to the Altair fork version. This is because `Fork::get_fork_version` only +/// remembers the most recent two forks. +/// +/// In the other direction, a proposer slashing at a Bellatrix slot verified against an Altair state +/// will use the Altair fork version, but will become invalid once the Bellatrix fork occurs because +/// that slot will start to map to the Bellatrix fork version. +/// +/// We need to store multiple `ForkVersion`s because attester slashings contain two indexed +/// attestations which may be signed using different versions. +#[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode)] +pub struct VerifiedAgainst { + fork_versions: SmallVec<[ForkVersion; MAX_FORKS_VERIFIED_AGAINST]>, +} + +impl<T, E> SigVerifiedOp<T, E> +where + T: VerifyOperation<E>, + E: EthSpec, +{ + /// This function must be private because it assumes that `op` has already been verified. + fn new(op: T, state: &BeaconState<E>) -> Self { + let verified_against = VerifiedAgainst { + fork_versions: op + .verification_epochs() + .into_iter() + .map(|epoch| state.fork().get_fork_version(epoch)) + .collect(), + }; + + SigVerifiedOp { + op, + verified_against, + _phantom: PhantomData, + } + } -impl<T> SigVerifiedOp<T> { pub fn into_inner(self) -> T { - self.0 + self.op } pub fn as_inner(&self) -> &T { - &self.0 + &self.op + } + + pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { + self.as_inner() + .verification_epochs() + .into_iter() + .zip(self.verified_against.fork_versions.iter()) + .all(|(epoch, verified_fork_version)| { + current_fork.get_fork_version(epoch) == *verified_fork_version + }) + } + + /// Return one of the fork versions this message was verified against. + /// + /// This is only required for the v12 schema downgrade and can be deleted once all nodes + /// are upgraded to v12. + pub fn first_fork_verified_against(&self) -> Option<ForkVersion> { + self.verified_against.fork_versions.first().copied() } } /// Trait for operations that can be verified and transformed into a `SigVerifiedOp`. -pub trait VerifyOperation<E: EthSpec>: Sized { +pub trait VerifyOperation<E: EthSpec>: Encode + Decode + Sized { type Error; fn validate( self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error>; + ) -> Result<SigVerifiedOp<Self, E>, Self::Error>; + + /// Return the epochs at which parts of this message were verified. + /// + /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } impl<E: EthSpec> VerifyOperation<E> for SignedVoluntaryExit { @@ -44,9 +128,14 @@ impl<E: EthSpec> VerifyOperation<E> for SignedVoluntaryExit { self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error> { + ) -> Result<SigVerifiedOp<Self, E>, Self::Error> { verify_exit(state, &self, VerifySignatures::True, spec)?; - Ok(SigVerifiedOp(self)) + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![self.message.epoch] } } @@ -57,9 +146,17 @@ impl<E: EthSpec> VerifyOperation<E> for AttesterSlashing<E> { self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error> { + ) -> Result<SigVerifiedOp<Self, E>, Self::Error> { verify_attester_slashing(state, &self, VerifySignatures::True, spec)?; - Ok(SigVerifiedOp(self)) + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![ + self.attestation_1.data.target.epoch, + self.attestation_2.data.target.epoch + ] } } @@ -70,8 +167,18 @@ impl<E: EthSpec> VerifyOperation<E> for ProposerSlashing { self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error> { + ) -> Result<SigVerifiedOp<Self, E>, Self::Error> { verify_proposer_slashing(&self, state, VerifySignatures::True, spec)?; - Ok(SigVerifiedOp(self)) + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + // Only need a single epoch because the slots of the two headers must be equal. + smallvec![self + .signed_header_1 + .message + .slot + .epoch(E::slots_per_epoch())] } } diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index ff12b0611a..ca048b149a 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -18,6 +18,13 @@ pub struct ProposerSlashing { pub signed_header_2: SignedBeaconBlockHeader, } +impl ProposerSlashing { + /// Get proposer index, assuming slashing validity has already been checked. + pub fn proposer_index(&self) -> u64 { + self.signed_header_1.message.proposer_index + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7fd4ad91cf..14934a5669 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -132,6 +132,21 @@ fn fork_choice_before_proposal_timeout_zero() { .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); } +#[test] +fn paranoid_block_proposal_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.paranoid_block_proposal)); +} + +#[test] +fn paranoid_block_proposal_on() { + CommandLineTest::new() + .flag("paranoid-block-proposal", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.paranoid_block_proposal)); +} + #[test] fn count_unrealized_default() { CommandLineTest::new() From 2ce86a08308af4bd0543dda8982a202454738ae4 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Mon, 29 Aug 2022 11:35:59 +0000 Subject: [PATCH 157/184] Validator registration request failures do not cause us to mark BNs offline (#3488) ## Issue Addressed Relates to https://github.com/sigp/lighthouse/issues/3416 ## Proposed Changes - Add an `OfflineOnFailure` enum to the `first_success` method for querying beacon nodes so that a val registration request failure from the BN -> builder does not result in the BN being marked offline. This seems important because these failures could be coming directly from a connected relay and actually have no bearing on BN health. Other messages that are sent to a relay have a local fallback so shouldn't result in errors - Downgrade the following log to a `WARN` ``` ERRO Unable to publish validator registrations to the builder network, error: All endpoints failed https://BN_B => RequestFailed(ServerMessage(ErrorMessage { code: 500, message: "UNHANDLED_ERROR: BuilderMissing", stacktraces: [] })), https://XXXX/ => Unavailable(Offline), [omitted] ``` ## Additional Info I think this change at least improves the UX of having a VC connected to some builder and some non-builder beacon nodes. I think we need to balance potentially alerting users that there is a BN <> VC misconfiguration and also allowing this type of fallback to work. If we want to fully support this type of configuration we may want to consider adding a flag `--builder-beacon-nodes` and track whether a VC should be making builder queries on a per-beacon node basis. But I think the changes in this PR are independent of that type of extension. PS: Sorry for the big diff here, it's mostly formatting changes after I added a new arg to a bunch of methods calls. Co-authored-by: realbigsean <sean@sigmaprime.io> --- validator_client/src/attestation_service.rs | 107 ++++++---- validator_client/src/beacon_node_fallback.rs | 12 +- validator_client/src/block_service.rs | 195 +++++++++--------- validator_client/src/doppelganger_service.rs | 37 ++-- validator_client/src/duties_service.rs | 96 +++++---- validator_client/src/duties_service/sync.rs | 15 +- validator_client/src/lib.rs | 19 +- validator_client/src/preparation_service.rs | 29 ++- .../src/sync_committee_service.rs | 70 ++++--- 9 files changed, 339 insertions(+), 241 deletions(-) diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index cdc9b88f68..a7118aa945 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -3,6 +3,7 @@ use crate::{ duties_service::{DutiesService, DutyAndProof}, http_metrics::metrics, validator_store::ValidatorStore, + OfflineOnFailure, }; use environment::RuntimeContext; use futures::future::join_all; @@ -337,17 +338,21 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let attestation_data = self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_GET], - ); - beacon_node - .get_validator_attestation_data(slot, committee_index) - .await - .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_GET], + ); + beacon_node + .get_validator_attestation_data(slot, committee_index) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) + .map(|result| result.data) + }, + ) .await .map_err(|e| e.to_string())?; @@ -414,15 +419,19 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { // Post the attestations to the BN. match self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_POST], - ); - beacon_node - .post_beacon_pool_attestations(attestations) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_POST], + ); + beacon_node + .post_beacon_pool_attestations(attestations) + .await + }, + ) .await { Ok(()) => info!( @@ -470,21 +479,27 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let aggregated_attestation = &self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_GET], - ); - beacon_node - .get_validator_aggregate_attestation( - attestation_data.slot, - attestation_data.tree_hash_root(), - ) - .await - .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))? - .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_GET], + ); + beacon_node + .get_validator_aggregate_attestation( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) + .map(|result| result.data) + }, + ) .await .map_err(|e| e.to_string())?; @@ -535,15 +550,19 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice(); match self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_POST], - ); - beacon_node - .post_validator_aggregate_and_proof(signed_aggregate_and_proofs_slice) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_POST], + ); + beacon_node + .post_validator_aggregate_and_proof(signed_aggregate_and_proofs_slice) + .await + }, + ) .await { Ok(()) => { diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 0b808e71bb..df6c949aef 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -70,6 +70,13 @@ pub enum RequireSynced { No, } +/// Indicates if a beacon node should be set to `Offline` if a request fails. +#[derive(PartialEq, Clone, Copy)] +pub enum OfflineOnFailure { + Yes, + No, +} + impl PartialEq<bool> for RequireSynced { fn eq(&self, other: &bool) -> bool { if *other { @@ -387,6 +394,7 @@ impl<T: SlotClock, E: EthSpec> BeaconNodeFallback<T, E> { pub async fn first_success<'a, F, O, Err, R>( &'a self, require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, func: F, ) -> Result<O, AllErrored<Err>> where @@ -415,7 +423,9 @@ impl<T: SlotClock, E: EthSpec> BeaconNodeFallback<T, E> { // There exists a race condition where the candidate may have been marked // as ready between the `func` call and now. We deem this an acceptable // inefficiency. - $candidate.set_offline().await; + if matches!(offline_on_failure, OfflineOnFailure::Yes) { + $candidate.set_offline().await; + } errors.push(($candidate.beacon_node.to_string(), Error::RequestFailed(e))); inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index d47546eb0d..2a8d164225 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -2,6 +2,7 @@ use crate::beacon_node_fallback::{AllErrored, Error as FallbackError}; use crate::{ beacon_node_fallback::{BeaconNodeFallback, RequireSynced}, graffiti_file::GraffitiFile, + OfflineOnFailure, }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; @@ -329,70 +330,74 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { // Request block from first responsive beacon node. let block = self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let block = match Payload::block_type() { - BlockType::Full => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blocks::<E, Payload>( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - BlockType::Blinded => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blinded_blocks::<E, Payload>( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - }; + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let block = match Payload::block_type() { + BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blocks::<E, Payload>( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blinded_blocks::<E, Payload>( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + }; - // Ensure the correctness of the execution payload's fee recipient. - if strict_fee_recipient { - if let Ok(execution_payload) = block.body().execution_payload() { - if Some(execution_payload.fee_recipient()) != fee_recipient { - return Err(BlockError::Recoverable( - "Incorrect fee recipient used by builder".to_string(), - )); + // Ensure the correctness of the execution payload's fee recipient. + if strict_fee_recipient { + if let Ok(execution_payload) = block.body().execution_payload() { + if Some(execution_payload.fee_recipient()) != fee_recipient { + return Err(BlockError::Recoverable( + "Incorrect fee recipient used by builder".to_string(), + )); + } } } - } - if proposer_index != Some(block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged" - .to_string(), - )); - } + if proposer_index != Some(block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged" + .to_string(), + )); + } - Ok::<_, BlockError>(block) - }) + Ok::<_, BlockError>(block) + }, + ) .await?; let signed_block = self_ref @@ -403,41 +408,45 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { // Publish block with first available beacon node. self.beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async { - match Payload::block_type() { - BlockType::Full => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async { + match Payload::block_type() { + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } } - BlockType::Blinded => { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], - ); - beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? - } - } - Ok::<_, BlockError>(()) - }) + Ok::<_, BlockError>(()) + }, + ) .await?; info!( diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 9e134f94da..e6934ed48b 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -31,6 +31,7 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::validator_store::ValidatorStore; +use crate::OfflineOnFailure; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use parking_lot::RwLock; @@ -176,13 +177,17 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( } else { // Request the previous epoch liveness state from the beacon node. beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_lighthouse_liveness(validator_indices, previous_epoch) - .await - .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_lighthouse_liveness(validator_indices, previous_epoch) + .await + .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) + .map(|result| result.data) + }, + ) .await .unwrap_or_else(|e| { crit!( @@ -199,13 +204,17 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( // Request the current epoch liveness state from the beacon node. let current_epoch_responses = beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_lighthouse_liveness(validator_indices, current_epoch) - .await - .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_lighthouse_liveness(validator_indices, current_epoch) + .await + .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) + .map(|result| result.data) + }, + ) .await .unwrap_or_else(|e| { crit!( diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index f8ca5a3d44..3e15b39ab6 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -8,7 +8,7 @@ mod sync; -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced}; use crate::{ block_service::BlockServiceNotification, http_metrics::metrics, @@ -382,18 +382,22 @@ async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>( // Query the remote BN to resolve a pubkey to a validator index. let download_result = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::VALIDATOR_ID_HTTP_GET], - ); - beacon_node - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(pubkey), - ) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::VALIDATOR_ID_HTTP_GET], + ); + beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey), + ) + .await + }, + ) .await; match download_result { @@ -559,15 +563,19 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>( let subscriptions_ref = &subscriptions; if let Err(e) = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::SUBSCRIPTIONS_HTTP_POST], - ); - beacon_node - .post_validator_beacon_committee_subscriptions(subscriptions_ref) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::SUBSCRIPTIONS_HTTP_POST], + ); + beacon_node + .post_validator_beacon_committee_subscriptions(subscriptions_ref) + .await + }, + ) .await { error!( @@ -619,15 +627,19 @@ async fn poll_beacon_attesters_for_epoch<T: SlotClock + 'static, E: EthSpec>( let response = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTER_DUTIES_HTTP_POST], - ); - beacon_node - .post_validator_duties_attester(epoch, local_indices) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::ATTESTER_DUTIES_HTTP_POST], + ); + beacon_node + .post_validator_duties_attester(epoch, local_indices) + .await + }, + ) .await .map_err(|e| Error::FailedToDownloadAttesters(e.to_string()))?; @@ -779,15 +791,19 @@ async fn poll_beacon_proposers<T: SlotClock + 'static, E: EthSpec>( if !local_pubkeys.is_empty() { let download_result = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::PROPOSER_DUTIES_HTTP_GET], - ); - beacon_node - .get_validator_duties_proposer(current_epoch) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::PROPOSER_DUTIES_HTTP_GET], + ); + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + }, + ) .await; match download_result { diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 9857561c48..b9d4d70306 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -1,3 +1,4 @@ +use crate::beacon_node_fallback::OfflineOnFailure; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, @@ -420,11 +421,15 @@ pub async fn poll_sync_committee_duties_for_period<T: SlotClock + 'static, E: Et let duties_response = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - beacon_node - .post_validator_duties_sync(period_start_epoch, local_indices) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_duties_sync(period_start_epoch, local_indices) + .await + }, + ) .await; let duties = match duties_response { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index bb7b296d23..ac6969f4c5 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -26,7 +26,8 @@ use monitoring_api::{MonitoringHttpClient, ProcessType}; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use crate::beacon_node_fallback::{ - start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, RequireSynced, + start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, OfflineOnFailure, + RequireSynced, }; use crate::doppelganger_service::DoppelgangerService; use account_utils::validator_definitions::ValidatorDefinitions; @@ -570,9 +571,11 @@ async fn init_from_beacon_node<E: EthSpec>( let genesis = loop { match beacon_nodes - .first_success(RequireSynced::No, |node| async move { - node.get_beacon_genesis().await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |node| async move { node.get_beacon_genesis().await }, + ) .await { Ok(genesis) => break genesis.data, @@ -659,9 +662,11 @@ async fn poll_whilst_waiting_for_genesis<E: EthSpec>( ) -> Result<(), String> { loop { match beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node.get_lighthouse_staking().await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { beacon_node.get_lighthouse_staking().await }, + ) .await { Ok(is_staking) => { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 6dc8e7d56e..d4178f2c48 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,9 +1,10 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::validator_store::{DoppelgangerStatus, ValidatorStore}; +use crate::OfflineOnFailure; use bls::PublicKeyBytes; use environment::RuntimeContext; use parking_lot::RwLock; -use slog::{debug, error, info}; +use slog::{debug, error, info, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::hash::Hash; @@ -330,11 +331,15 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { let preparation_entries = preparation_data.as_slice(); match self .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_validator_prepare_beacon_proposer(preparation_entries) - .await - }) + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_prepare_beacon_proposer(preparation_entries) + .await + }, + ) .await { Ok(()) => debug!( @@ -445,9 +450,13 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) { match self .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node.post_validator_register_validator(batch).await - }) + .first_success( + RequireSynced::Yes, + OfflineOnFailure::No, + |beacon_node| async move { + beacon_node.post_validator_register_validator(batch).await + }, + ) .await { Ok(()) => info!( @@ -455,7 +464,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { "Published validator registrations to the builder network"; "count" => registration_data_len, ), - Err(e) => error!( + Err(e) => warn!( log, "Unable to publish validator registrations to the builder network"; "error" => %e, diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 73d0066f20..1e6ff7a5b5 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -1,5 +1,5 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::{duties_service::DutiesService, validator_store::ValidatorStore}; +use crate::{duties_service::DutiesService, validator_store::ValidatorStore, OfflineOnFailure}; use environment::RuntimeContext; use eth2::types::BlockId; use futures::future::join_all; @@ -177,7 +177,7 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { // Fetch `block_root` and `execution_optimistic` for `SyncCommitteeContribution`. let response = self .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { + .first_success(RequireSynced::Yes, OfflineOnFailure::Yes,|beacon_node| async move { beacon_node.get_beacon_blocks_root(BlockId::Head).await }) .await @@ -284,11 +284,15 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { .collect::<Vec<_>>(); self.beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node - .post_beacon_pool_sync_committee_signatures(committee_signatures) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_beacon_pool_sync_committee_signatures(committee_signatures) + .await + }, + ) .await .map_err(|e| { error!( @@ -351,17 +355,21 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { let contribution = &self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let sync_contribution_data = SyncContributionData { - slot, - beacon_block_root, - subcommittee_index: subnet_id.into(), - }; + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let sync_contribution_data = SyncContributionData { + slot, + beacon_block_root, + subcommittee_index: subnet_id.into(), + }; - beacon_node - .get_validator_sync_committee_contribution::<E>(&sync_contribution_data) - .await - }) + beacon_node + .get_validator_sync_committee_contribution::<E>(&sync_contribution_data) + .await + }, + ) .await .map_err(|e| { crit!( @@ -418,11 +426,15 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { // Publish to the beacon node. self.beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node - .post_validator_contribution_and_proofs(signed_contributions) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_contribution_and_proofs(signed_contributions) + .await + }, + ) .await .map_err(|e| { error!( @@ -556,11 +568,15 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { if let Err(e) = self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node - .post_validator_sync_committee_subscriptions(subscriptions_slice) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_sync_committee_subscriptions(subscriptions_slice) + .await + }, + ) .await { error!( From 8609cced0e4fe5c83a7485b3e7d11bbaaff975ae Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Mon, 29 Aug 2022 14:34:41 +0000 Subject: [PATCH 158/184] Reset payload statuses when resuming fork choice (#3498) ## Issue Addressed NA ## Proposed Changes This PR is motivated by a recent consensus failure in Geth where it returned `INVALID` for an `VALID` block. Without this PR, the only way to recover is by re-syncing Lighthouse. Whilst ELs "shouldn't have consensus failures", in reality it's something that we can expect from time to time due to the complex nature of Ethereum. Being able to recover easily will help the network recover and EL devs to troubleshoot. The risk introduced with this PR is that genuinely INVALID payloads get a "second chance" at being imported. I believe the DoS risk here is negligible since LH needs to be restarted in order to re-process the payload. Furthermore, there's no reason to think that a well-performing EL will accept a truly invalid payload the second-time-around. ## Additional Info This implementation has the following intricacies: 1. Instead of just resetting *invalid* payloads to optimistic, we'll also reset *valid* payloads. This is an artifact of our existing implementation. 1. We will only reset payload statuses when we detect an invalid payload present in `proto_array` - This helps save us from forgetting that all our blocks are valid in the "best case scenario" where there are no invalid blocks. 1. If we fail to revert the payload statuses we'll log a `CRIT` and just continue with a `proto_array` that *does not* have reverted payload statuses. - The code to revert statuses needs to deal with balances and proposer-boost, so it's a failure point. This is a defensive measure to avoid introducing new show-stopping bugs to LH. --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 19 ++-- beacon_node/beacon_chain/src/builder.rs | 6 +- .../beacon_chain/src/canonical_head.rs | 11 ++- beacon_node/beacon_chain/src/chain_config.rs | 6 ++ beacon_node/src/cli.rs | 8 ++ beacon_node/src/config.rs | 3 + consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 89 ++++++++++++++++++- .../fork_choice/src/fork_choice_store.rs | 3 +- consensus/fork_choice/src/lib.rs | 2 +- .../src/proto_array_fork_choice.rs | 11 +++ lighthouse/tests/beacon_node.rs | 15 ++++ 13 files changed, 161 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 46cd2d96f0..faad708f41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2235,6 +2235,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "proto_array", + "slog", "state_processing", "store", "tokio", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index fdcd3eed88..e1d7a5cfc2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -58,7 +58,7 @@ use execution_layer::{ }; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, - InvalidationOperation, PayloadVerificationStatus, + InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, }; use futures::channel::mpsc::Sender; use itertools::process_results; @@ -432,7 +432,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> { /// Load fork choice from disk, returning `None` if it isn't found. pub fn load_fork_choice( store: BeaconStore<T>, + reset_payload_statuses: ResetPayloadStatuses, spec: &ChainSpec, + log: &Logger, ) -> Result<Option<BeaconForkChoice<T>>, Error> { let persisted_fork_choice = match store.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)? { @@ -445,8 +447,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> { Ok(Some(ForkChoice::from_persisted( persisted_fork_choice.fork_choice, + reset_payload_statuses, fc_store, spec, + log, )?)) } @@ -2925,10 +2929,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> { // Since the write failed, try to revert the canonical head back to what was stored // in the database. This attempts to prevent inconsistency between the database and // fork choice. - if let Err(e) = - self.canonical_head - .restore_from_store(fork_choice, &self.store, &self.spec) - { + if let Err(e) = self.canonical_head.restore_from_store( + fork_choice, + ResetPayloadStatuses::always_reset_conditionally( + self.config.always_reset_payload_statuses, + ), + &self.store, + &self.spec, + &self.log, + ) { crit!( self.log, "No stored fork choice found to restore from"; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index cba9a56982..2704690442 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -17,7 +17,7 @@ use crate::{ }; use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; -use fork_choice::ForkChoice; +use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; @@ -245,7 +245,11 @@ where let fork_choice = BeaconChain::<Witness<TSlotClock, TEth1Backend, _, _, _>>::load_fork_choice( store.clone(), + ResetPayloadStatuses::always_reset_conditionally( + self.chain_config.always_reset_payload_statuses, + ), &self.spec, + log, ) .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? .ok_or("Fork choice not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 6559487980..166ba85720 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -43,7 +43,9 @@ use crate::{ BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; -use fork_choice::{ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock}; +use fork_choice::{ + ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, +}; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use slog::{crit, debug, error, warn, Logger}; @@ -249,11 +251,14 @@ impl<T: BeaconChainTypes> CanonicalHead<T> { // and it needs to be dropped to prevent a dead-lock. Requiring it to be passed here is // defensive programming. mut fork_choice_write_lock: RwLockWriteGuard<BeaconForkChoice<T>>, + reset_payload_statuses: ResetPayloadStatuses, store: &BeaconStore<T>, spec: &ChainSpec, + log: &Logger, ) -> Result<(), Error> { - let fork_choice = <BeaconChain<T>>::load_fork_choice(store.clone(), spec)? - .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice = + <BeaconChain<T>>::load_fork_choice(store.clone(), reset_payload_statuses, spec, log)? + .ok_or(Error::MissingPersistedForkChoice)?; let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; let beacon_block = store diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index ba3a0b628c..ad2b7abe5a 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -34,7 +34,12 @@ pub struct ChainConfig { pub builder_fallback_epochs_since_finalization: usize, /// Whether any chain health checks should be considered when deciding whether to use the builder API. pub builder_fallback_disable_checks: bool, + /// When set to `true`, weigh the "unrealized" FFG progression when choosing a head in fork + /// choice. pub count_unrealized: bool, + /// When set to `true`, forget any valid/invalid/optimistic statuses in fork choice during start + /// up. + pub always_reset_payload_statuses: bool, /// Whether to apply paranoid checks to blocks proposed by this beacon node. pub paranoid_block_proposal: bool, } @@ -54,6 +59,7 @@ impl Default for ChainConfig { builder_fallback_epochs_since_finalization: 3, builder_fallback_disable_checks: false, count_unrealized: true, + always_reset_payload_statuses: false, paranoid_block_proposal: false, } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 3df95a0a5d..fe2afb0213 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -786,4 +786,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("true") ) + .arg( + Arg::with_name("reset-payload-statuses") + .long("reset-payload-statuses") + .help("When present, Lighthouse will forget the payload statuses of any \ + already-imported blocks. This can assist in the recovery from a consensus \ + failure caused by the execution layer.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f08981b103..caa10f555d 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -644,6 +644,9 @@ pub fn get_config<E: EthSpec>( client_config.chain.count_unrealized = clap_utils::parse_required(cli_args, "count-unrealized")?; + client_config.chain.always_reset_payload_statuses = + cli_args.is_present("reset-payload-statuses"); + client_config.chain.paranoid_block_proposal = cli_args.is_present("paranoid-block-proposal"); /* diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index b2570092e6..52a738351e 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -12,6 +12,7 @@ state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" +slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] beacon_chain = { path = "../../beacon_node/beacon_chain" } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 3341fc5c22..f55a283ed1 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,5 +1,6 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; +use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, @@ -79,6 +80,26 @@ impl<T> From<state_processing::EpochProcessingError> for Error<T> { } } +#[derive(Debug, Clone, Copy)] +/// Controls how fork choice should behave when restoring from a persisted fork choice. +pub enum ResetPayloadStatuses { + /// Reset all payload statuses back to "optimistic". + Always, + /// Only reset all payload statuses back to "optimistic" when an "invalid" block is present. + OnlyWithInvalidPayload, +} + +impl ResetPayloadStatuses { + /// When `should_always_reset == True`, return `ResetPayloadStatuses::Always`. + pub fn always_reset_conditionally(should_always_reset: bool) -> Self { + if should_always_reset { + ResetPayloadStatuses::Always + } else { + ResetPayloadStatuses::OnlyWithInvalidPayload + } + } +} + #[derive(Debug)] pub enum InvalidBlock { UnknownParent(Hash256), @@ -1425,15 +1446,68 @@ where .map_err(Into::into) } + /// Instantiate `Self` from some `PersistedForkChoice` generated by a earlier call to + /// `Self::to_persisted`. + pub fn proto_array_from_persisted( + persisted: &PersistedForkChoice, + reset_payload_statuses: ResetPayloadStatuses, + spec: &ChainSpec, + log: &Logger, + ) -> Result<ProtoArrayForkChoice, Error<T::Error>> { + let mut proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) + .map_err(Error::InvalidProtoArrayBytes)?; + let contains_invalid_payloads = proto_array.contains_invalid_payloads(); + + debug!( + log, + "Restoring fork choice from persisted"; + "reset_payload_statuses" => ?reset_payload_statuses, + "contains_invalid_payloads" => contains_invalid_payloads, + ); + + // Exit early if there are no "invalid" payloads, if requested. + if matches!( + reset_payload_statuses, + ResetPayloadStatuses::OnlyWithInvalidPayload + ) && !contains_invalid_payloads + { + return Ok(proto_array); + } + + // Reset all blocks back to being "optimistic". This helps recover from an EL consensus + // fault where an invalid payload becomes valid. + if let Err(e) = proto_array.set_all_blocks_to_optimistic::<E>(spec) { + // If there is an error resetting the optimistic status then log loudly and revert + // back to a proto-array which does not have the reset applied. This indicates a + // significant error in Lighthouse and warrants detailed investigation. + crit!( + log, + "Failed to reset payload statuses"; + "error" => e, + "info" => "please report this error", + ); + ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) + .map_err(Error::InvalidProtoArrayBytes) + } else { + debug!( + log, + "Successfully reset all payload statuses"; + ); + Ok(proto_array) + } + } + /// Instantiate `Self` from some `PersistedForkChoice` generated by a earlier call to /// `Self::to_persisted`. pub fn from_persisted( persisted: PersistedForkChoice, + reset_payload_statuses: ResetPayloadStatuses, fc_store: T, spec: &ChainSpec, + log: &Logger, ) -> Result<Self, Error<T::Error>> { - let proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) - .map_err(Error::InvalidProtoArrayBytes)?; + let proto_array = + Self::proto_array_from_persisted(&persisted, reset_payload_statuses, spec, log)?; let current_slot = fc_store.get_current_slot(); @@ -1456,7 +1530,16 @@ where // If a call to `get_head` fails, the only known cause is because the only head with viable // FFG properties is has an invalid payload. In this scenario, set all the payloads back to // an optimistic status so that we can have a head to start from. - if fork_choice.get_head(current_slot, spec).is_err() { + if let Err(e) = fork_choice.get_head(current_slot, spec) { + warn!( + log, + "Could not find head on persisted FC"; + "info" => "resetting all payload statuses and retrying", + "error" => ?e + ); + // Although we may have already made this call whilst loading `proto_array`, try it + // again since we may have mutated the `proto_array` during `get_head` and therefore may + // get a different result. fork_choice .proto_array .set_all_blocks_to_optimistic::<E>(spec)?; diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 6a4616e9f3..9604e25475 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,4 +1,5 @@ use std::collections::BTreeSet; +use std::fmt::Debug; use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": @@ -18,7 +19,7 @@ use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash2 /// concrete struct is to allow this crate to be free from "impure" on-disk database logic, /// hopefully making auditing easier. pub trait ForkChoiceStore<T: EthSpec>: Sized { - type Error; + type Error: Debug; /// Returns the last value passed to `Self::set_current_slot`. fn get_current_slot(&self) -> Slot; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 6cb2010f1a..397a2ff893 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -4,7 +4,7 @@ mod fork_choice_store; pub use crate::fork_choice::{ AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, - PersistedForkChoice, QueuedAttestation, + PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 9902ccb1cc..cc3f92d46e 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -317,6 +317,17 @@ impl ProtoArrayForkChoice { .map_err(|e| format!("find_head failed: {:?}", e)) } + /// Returns `true` if there are any blocks in `self` with an `INVALID` execution payload status. + /// + /// This will operate on *all* blocks, even those that do not descend from the finalized + /// ancestor. + pub fn contains_invalid_payloads(&mut self) -> bool { + self.proto_array + .nodes + .iter() + .any(|node| node.execution_status.is_invalid()) + } + /// For all nodes, regardless of their relationship to the finalized block, set their execution /// status to be optimistic. /// diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 14934a5669..4e110b85a1 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -178,6 +178,21 @@ fn count_unrealized_true() { .with_config(|config| assert!(config.chain.count_unrealized)); } +#[test] +fn reset_payload_statuses_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.always_reset_payload_statuses)); +} + +#[test] +fn reset_payload_statuses_present() { + CommandLineTest::new() + .flag("reset-payload-statuses", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.always_reset_payload_statuses)); +} + #[test] fn freezer_dir_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); From 1a833ecc17420afab3b60c12a10c63e3ed792e8c Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Mon, 29 Aug 2022 14:34:42 +0000 Subject: [PATCH 159/184] Add more logging for invalid payloads (#3515) ## Issue Addressed NA ## Proposed Changes Adds more `debug` logging to help troubleshoot invalid execution payload blocks. I was doing some of this recently and found it to be challenging. With this PR we should be able to grep `Invalid execution payload` and get one-liners that will show the block, slot and details about the proposer. I also changed the log in `process_invalid_execution_payload` since it was a little misleading; the `block_root` wasn't necessary the block which had an invalid payload. ## Additional Info NA --- beacon_node/beacon_chain/src/beacon_chain.rs | 29 +++++++++++++--- .../beacon_chain/src/execution_payload.rs | 33 +++++++++++++++++-- consensus/proto_array/src/proto_array.rs | 2 +- 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e1d7a5cfc2..6637b8fd53 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3678,9 +3678,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> { ) -> Result<(), Error> { debug!( self.log, - "Invalid execution payload in block"; - "latest_valid_ancestor" => ?op.latest_valid_ancestor(), - "block_root" => ?op.block_root(), + "Processing payload invalidation"; + "op" => ?op, ); // Update the execution status in fork choice. @@ -4160,8 +4159,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> { Ok(()) } PayloadStatus::Invalid { - latest_valid_hash, .. + latest_valid_hash, + ref validation_error, } => { + debug!( + self.log, + "Invalid execution payload"; + "validation_error" => ?validation_error, + "latest_valid_hash" => ?latest_valid_hash, + "head_hash" => ?head_hash, + "head_block_root" => ?head_block_root, + "method" => "fcU", + ); warn!( self.log, "Fork choice update invalidated payload"; @@ -4192,7 +4201,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> { Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } - PayloadStatus::InvalidBlockHash { .. } => { + PayloadStatus::InvalidBlockHash { + ref validation_error, + } => { + debug!( + self.log, + "Invalid execution payload block hash"; + "validation_error" => ?validation_error, + "head_hash" => ?head_hash, + "head_block_root" => ?head_block_root, + "method" => "fcU", + ); warn!( self.log, "Fork choice update invalidated payload"; diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 7af171b794..2221d1fc7c 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -23,6 +23,7 @@ use state_processing::per_block_processing::{ }; use std::sync::Arc; use tokio::task::JoinHandle; +use tree_hash::TreeHash; use types::*; pub type PreparePayloadResult<Payload> = Result<Payload, BlockProductionError>; @@ -112,8 +113,22 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( Ok(PayloadVerificationStatus::Optimistic) } PayloadStatus::Invalid { - latest_valid_hash, .. + latest_valid_hash, + ref validation_error, } => { + debug!( + chain.log, + "Invalid execution payload"; + "validation_error" => ?validation_error, + "latest_valid_hash" => ?latest_valid_hash, + "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "root" => ?block.tree_hash_root(), + "graffiti" => block.body().graffiti().as_utf8_lossy(), + "proposer_index" => block.proposer_index(), + "slot" => block.slot(), + "method" => "new_payload", + ); + // latest_valid_hash == 0 implies that this was the terminal block // Hence, we don't need to run `BeaconChain::process_invalid_execution_payload`. if latest_valid_hash == ExecutionBlockHash::zero() { @@ -132,7 +147,21 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } - PayloadStatus::InvalidBlockHash { .. } => { + PayloadStatus::InvalidBlockHash { + ref validation_error, + } => { + debug!( + chain.log, + "Invalid execution payload block hash"; + "validation_error" => ?validation_error, + "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "root" => ?block.tree_hash_root(), + "graffiti" => block.body().graffiti().as_utf8_lossy(), + "proposer_index" => block.proposer_index(), + "slot" => block.slot(), + "method" => "new_payload", + ); + // Returning an error here should be sufficient to invalidate the block. We have no // information to indicate its parent is invalid, so no need to run // `BeaconChain::process_invalid_execution_payload`. diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 390eb902a7..9486f0bfc1 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -16,7 +16,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); /// Defines an operation which may invalidate the `execution_status` of some nodes. -#[derive(Clone)] +#[derive(Clone, Debug)] pub enum InvalidationOperation { /// Invalidate only `block_root` and it's descendants. Don't invalidate any ancestors. InvalidateOne { block_root: Hash256 }, From 7a50684741abd707b06a0a726439da2ec737edf6 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 29 Aug 2022 14:34:43 +0000 Subject: [PATCH 160/184] Harden slot notifier against clock drift (#3519) ## Issue Addressed Partly resolves #3518 ## Proposed Changes Change the slot notifier to use `duration_to_next_slot` rather than an interval timer. This makes it robust against underlying clock changes. --- beacon_node/client/src/notifier.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 11f0f6e2a2..1da7a79707 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -33,20 +33,9 @@ pub fn spawn_notifier<T: BeaconChainTypes>( seconds_per_slot: u64, ) -> Result<(), String> { let slot_duration = Duration::from_secs(seconds_per_slot); - let duration_to_next_slot = beacon_chain - .slot_clock - .duration_to_next_slot() - .ok_or("slot_notifier unable to determine time to next slot")?; - - // Run this half way through each slot. - let start_instant = tokio::time::Instant::now() + duration_to_next_slot + (slot_duration / 2); - - // Run this each slot. - let interval_duration = slot_duration; let speedo = Mutex::new(Speedo::default()); let log = executor.log().clone(); - let mut interval = tokio::time::interval_at(start_instant, interval_duration); // Keep track of sync state and reset the speedo on specific sync state changes. // Specifically, if we switch between a sync and a backfill sync, reset the speedo. @@ -82,7 +71,20 @@ pub fn spawn_notifier<T: BeaconChainTypes>( let mut last_backfill_log_slot = None; loop { - interval.tick().await; + // Run the notifier half way through each slot. + // + // Keep remeasuring the offset rather than using an interval, so that we can correct + // for system time clock adjustments. + let wait = match beacon_chain.slot_clock.duration_to_next_slot() { + Some(duration) => duration + slot_duration / 2, + None => { + warn!(log, "Unable to read current slot"); + sleep(slot_duration).await; + continue; + } + }; + sleep(wait).await; + let connected_peer_count = network.connected_peers(); let sync_state = network.sync_state(); From ebd0e0e2d95259c10deb251016ff3130770bc6c3 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Mon, 29 Aug 2022 18:31:27 +0000 Subject: [PATCH 161/184] Docker builds in GitHub actions (#3523) ## Issue Addressed I think the antithesis is failing due to an OOM which may be resolved by updating the ubuntu image it runs on. The lcli build looks like it's failing because the image lacks the `libclang` dependency Co-authored-by: realbigsean <sean@sigmaprime.io> --- .github/workflows/docker-antithesis.yml | 2 +- lcli/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml index b7b35d1207..40de0bd0a5 100644 --- a/.github/workflows/docker-antithesis.yml +++ b/.github/workflows/docker-antithesis.yml @@ -15,7 +15,7 @@ env: jobs: build-docker: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - name: Update Rust diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 2a0e5a9d47..8fd3567cdc 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -2,7 +2,7 @@ # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE From 661307dce198200b067dd9935ec95d256a154c87 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 30 Aug 2022 05:47:31 +0000 Subject: [PATCH 162/184] Separate committee subscriptions queue (#3508) ## Issue Addressed NA ## Proposed Changes As we've seen on Prater, there seems to be a correlation between these messages ``` WARN Not enough time for a discovery search subnet_id: ExactSubnet { subnet_id: SubnetId(19), slot: Slot(3742336) }, service: attestation_service ``` ... and nodes falling 20-30 slots behind the head for short periods. These nodes are running ~20k Prater validators. After running some metrics, I can see that the `network_recv` channel is processing ~250k `AttestationSubscribe` messages per minute. It occurred to me that perhaps the `AttestationSubscribe` messages are "washing out" the `SendRequest` and `SendResponse` messages. In this PR I separate the `AttestationSubscribe` and `SyncCommitteeSubscribe` messages into their own queue so the `tokio::select!` in the `NetworkService` can still process the other messages in the `network_recv` channel without necessarily having to clear all the subscription messages first. ~~I've also added filter to the HTTP API to prevent duplicate subscriptions going to the network service.~~ ## Additional Info - Currently being tested on Prater --- beacon_node/client/src/builder.rs | 24 ++--- beacon_node/http_api/src/lib.rs | 105 ++++++++++++++------- beacon_node/http_api/tests/common.rs | 14 +-- beacon_node/http_api/tests/tests.rs | 36 +++---- beacon_node/network/src/lib.rs | 4 +- beacon_node/network/src/metrics.rs | 14 +++ beacon_node/network/src/service.rs | 134 ++++++++++++++++++++------- common/eth2/src/types.rs | 2 +- 8 files changed, 231 insertions(+), 102 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d4c41244d2..752ba3b7bc 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -22,7 +22,7 @@ use execution_layer::ExecutionLayer; use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; -use network::{NetworkConfig, NetworkMessage, NetworkService}; +use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; @@ -31,7 +31,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; +use tokio::sync::oneshot; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, @@ -66,7 +66,7 @@ pub struct ClientBuilder<T: BeaconChainTypes> { beacon_chain: Option<Arc<BeaconChain<T>>>, eth1_service: Option<Eth1Service>, network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, - network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>, + network_senders: Option<NetworkSenders<T::EthSpec>>, gossipsub_registry: Option<Registry>, db_path: Option<PathBuf>, freezer_db_path: Option<PathBuf>, @@ -98,7 +98,7 @@ where beacon_chain: None, eth1_service: None, network_globals: None, - network_send: None, + network_senders: None, gossipsub_registry: None, db_path: None, freezer_db_path: None, @@ -397,7 +397,7 @@ where > = Arc::new(http_api::Context { config: self.http_api_config.clone(), chain: None, - network_tx: None, + network_senders: None, network_globals: None, eth1_service: Some(genesis_service.eth1_service.clone()), log: context.log().clone(), @@ -481,7 +481,7 @@ where None }; - let (network_globals, network_send) = NetworkService::start( + let (network_globals, network_senders) = NetworkService::start( beacon_chain, config, context.executor, @@ -493,7 +493,7 @@ where .map_err(|e| format!("Failed to start network: {:?}", e))?; self.network_globals = Some(network_globals); - self.network_send = Some(network_send); + self.network_senders = Some(network_senders); self.gossipsub_registry = gossipsub_registry; Ok(self) @@ -537,16 +537,16 @@ where .beacon_chain .clone() .ok_or("slasher service requires a beacon chain")?; - let network_send = self - .network_send + let network_senders = self + .network_senders .clone() - .ok_or("slasher service requires a network sender")?; + .ok_or("slasher service requires network senders")?; let context = self .runtime_context .as_ref() .ok_or("slasher requires a runtime_context")? .service_context("slasher_service_ctxt".into()); - SlasherService::new(beacon_chain, network_send).run(&context.executor) + SlasherService::new(beacon_chain, network_senders.network_send()).run(&context.executor) } /// Start the explorer client which periodically sends beacon @@ -616,7 +616,7 @@ where let ctx = Arc::new(http_api::Context { config: self.http_api_config.clone(), chain: self.beacon_chain.clone(), - network_tx: self.network_send.clone(), + network_senders: self.network_senders.clone(), network_globals: self.network_globals.clone(), eth1_service: self.eth1_service.clone(), log: log.clone(), diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 48178f4f0d..a21b674175 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -25,11 +25,10 @@ use beacon_chain::{ BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; -use eth2::types::ValidatorStatus; -use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; +use eth2::types::{self as api_types, EndpointVersion, ValidatorId, ValidatorStatus}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; -use network::NetworkMessage; +use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -42,7 +41,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; -use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, @@ -93,7 +92,7 @@ pub struct TlsConfig { pub struct Context<T: BeaconChainTypes> { pub config: Config, pub chain: Option<Arc<BeaconChain<T>>>, - pub network_tx: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>, + pub network_senders: Option<NetworkSenders<T::EthSpec>>, pub network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, pub eth1_service: Option<eth1::Service>, pub log: Logger, @@ -337,14 +336,35 @@ pub fn serve<T: BeaconChainTypes>( }); // Create a `warp` filter that provides access to the network sender channel. - let inner_ctx = ctx.clone(); - let network_tx_filter = warp::any() - .map(move || inner_ctx.network_tx.clone()) - .and_then(|network_tx| async move { - match network_tx { - Some(network_tx) => Ok(network_tx), + let network_tx = ctx + .network_senders + .as_ref() + .map(|senders| senders.network_send()); + let network_tx_filter = + warp::any() + .map(move || network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (network_tx).".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the network attestation subscription channel. + let validator_subscriptions_tx = ctx + .network_senders + .as_ref() + .map(|senders| senders.validator_subscription_send()); + let validator_subscription_tx_filter = warp::any() + .map(move || validator_subscriptions_tx.clone()) + .and_then(|validator_subscriptions_tx| async move { + match validator_subscriptions_tx { + Some(validator_subscriptions_tx) => Ok(validator_subscriptions_tx), None => Err(warp_utils::reject::custom_not_found( - "The networking stack has not yet started.".to_string(), + "The networking stack has not yet started (validator_subscription_tx)." + .to_string(), )), } }); @@ -2083,7 +2103,7 @@ pub fn serve<T: BeaconChainTypes>( .to_ref() .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - // Pose as a V2 endpoint so we return the fork `version`. + // Pose as a V2 endpoint so we return the fork `version`. fork_versioned_response(V2, fork_name, block) .map(|response| warp::reply::json(&response)) }, @@ -2345,7 +2365,7 @@ pub fn serve<T: BeaconChainTypes>( .and(not_while_syncing_filter.clone()) .and(chain_filter.clone()) .and(warp::body::json()) - .and(network_tx_filter.clone()) + .and(network_tx_filter) .and(log_filter.clone()) .and_then( |chain: Arc<BeaconChain<T>>, @@ -2370,12 +2390,14 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path("beacon_committee_subscriptions")) .and(warp::path::end()) .and(warp::body::json()) - .and(network_tx_filter.clone()) + .and(validator_subscription_tx_filter.clone()) .and(chain_filter.clone()) + .and(log_filter.clone()) .and_then( |subscriptions: Vec<api_types::BeaconCommitteeSubscription>, - network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, - chain: Arc<BeaconChain<T>>| { + validator_subscription_tx: Sender<ValidatorSubscriptionMessage>, + chain: Arc<BeaconChain<T>>, + log: Logger| { blocking_json_task(move || { for subscription in &subscriptions { chain @@ -2383,7 +2405,7 @@ pub fn serve<T: BeaconChainTypes>( .write() .auto_register_local_validator(subscription.validator_index); - let subscription = api_types::ValidatorSubscription { + let validator_subscription = api_types::ValidatorSubscription { validator_index: subscription.validator_index, attestation_committee_index: subscription.committee_index, slot: subscription.slot, @@ -2391,12 +2413,20 @@ pub fn serve<T: BeaconChainTypes>( is_aggregator: subscription.is_aggregator, }; - publish_network_message( - &network_tx, - NetworkMessage::AttestationSubscribe { - subscriptions: vec![subscription], - }, - )?; + let message = ValidatorSubscriptionMessage::AttestationSubscribe { + subscriptions: vec![validator_subscription], + }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + log, + "Unable to process committee subscriptions"; + "info" => "the host may be overloaded or resource-constrained", + "error" => ?e, + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down".to_string(), + )); + } } Ok(()) @@ -2581,12 +2611,15 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path("sync_committee_subscriptions")) .and(warp::path::end()) .and(warp::body::json()) - .and(network_tx_filter) + .and(validator_subscription_tx_filter) .and(chain_filter.clone()) + .and(log_filter.clone()) .and_then( |subscriptions: Vec<types::SyncCommitteeSubscription>, - network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, - chain: Arc<BeaconChain<T>>| { + validator_subscription_tx: Sender<ValidatorSubscriptionMessage>, + chain: Arc<BeaconChain<T>>, + log: Logger + | { blocking_json_task(move || { for subscription in subscriptions { chain @@ -2594,12 +2627,20 @@ pub fn serve<T: BeaconChainTypes>( .write() .auto_register_local_validator(subscription.validator_index); - publish_network_message( - &network_tx, - NetworkMessage::SyncCommitteeSubscribe { + let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { subscriptions: vec![subscription], - }, - )?; + }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + log, + "Unable to process sync subscriptions"; + "info" => "the host may be overloaded or resource-constrained", + "error" => ?e + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down".to_string(), + )); + } } Ok(()) diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 1dd7aea923..032e1346fb 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -11,14 +11,14 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager, }; -use network::NetworkMessage; +use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; use slog::Logger; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use types::{ChainSpec, EthSpec}; pub const TCP_PORT: u16 = 42; @@ -30,7 +30,7 @@ pub const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000"; pub struct InteractiveTester<E: EthSpec> { pub harness: BeaconChainHarness<EphemeralHarnessType<E>>, pub client: BeaconNodeHttpClient, - pub network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>, + pub network_rx: NetworkReceivers<E>, _server_shutdown: oneshot::Sender<()>, } @@ -41,7 +41,7 @@ pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> { pub server: SFut, pub listening_socket: SocketAddr, pub shutdown_tx: oneshot::Sender<()>, - pub network_rx: tokio::sync::mpsc::UnboundedReceiver<NetworkMessage<E>>, + pub network_rx: NetworkReceivers<E>, pub local_enr: Enr, pub external_peer_id: PeerId, } @@ -97,7 +97,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( log: Logger, port: u16, ) -> ApiServer<T::EthSpec, impl Future<Output = ()>> { - let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (network_senders, network_receivers) = NetworkSenders::new(); // Default metadata let meta_data = MetaData::V2(MetaDataV2 { @@ -146,7 +146,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( spec_fork_name: None, }, chain: Some(chain.clone()), - network_tx: Some(network_tx), + network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), log, @@ -163,7 +163,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>( server, listening_socket, shutdown_tx, - network_rx, + network_rx: network_receivers, local_enr: enr, external_peer_id: peer_id, } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 3144060f10..c8e647be82 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -17,14 +17,14 @@ use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{BlockId, StateId}; use lighthouse_network::{Enr, EnrExt, PeerId}; -use network::NetworkMessage; +use network::NetworkReceivers; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; @@ -65,7 +65,7 @@ struct ApiTester { proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, _server_shutdown: oneshot::Sender<()>, - network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>, + network_rx: NetworkReceivers<E>, local_enr: Enr, external_peer_id: PeerId, mock_builder: Option<Arc<TestingBuilder<E>>>, @@ -899,7 +899,7 @@ impl ApiTester { self.client.post_beacon_blocks(next_block).await.unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid blocks should be sent to network" ); @@ -913,7 +913,7 @@ impl ApiTester { assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "invalid blocks should be sent to network" ); @@ -1041,7 +1041,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid attestation should be sent to network" ); @@ -1078,7 +1078,7 @@ impl ApiTester { } assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "if some attestations are valid, we should send them to the network" ); @@ -1108,7 +1108,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid attester slashing should be sent to network" ); @@ -1125,7 +1125,7 @@ impl ApiTester { .unwrap_err(); assert!( - self.network_rx.recv().now_or_never().is_none(), + self.network_rx.network_recv.recv().now_or_never().is_none(), "invalid attester slashing should not be sent to network" ); @@ -1154,7 +1154,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid proposer slashing should be sent to network" ); @@ -1171,7 +1171,7 @@ impl ApiTester { .unwrap_err(); assert!( - self.network_rx.recv().now_or_never().is_none(), + self.network_rx.network_recv.recv().now_or_never().is_none(), "invalid proposer slashing should not be sent to network" ); @@ -1200,7 +1200,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid exit should be sent to network" ); @@ -1217,7 +1217,7 @@ impl ApiTester { .unwrap_err(); assert!( - self.network_rx.recv().now_or_never().is_none(), + self.network_rx.network_recv.recv().now_or_never().is_none(), "invalid exit should not be sent to network" ); @@ -2351,7 +2351,7 @@ impl ApiTester { .await .unwrap(); - assert!(self.network_rx.recv().await.is_some()); + assert!(self.network_rx.network_recv.recv().await.is_some()); self } @@ -2366,7 +2366,7 @@ impl ApiTester { .await .unwrap_err(); - assert!(self.network_rx.recv().now_or_never().is_none()); + assert!(self.network_rx.network_recv.recv().now_or_never().is_none()); self } @@ -2385,7 +2385,11 @@ impl ApiTester { .await .unwrap(); - self.network_rx.recv().now_or_never().unwrap(); + self.network_rx + .validator_subscription_recv + .recv() + .now_or_never() + .unwrap(); self } diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 283d8dfb9e..648c636acc 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -18,4 +18,6 @@ mod subnet_service; mod sync; pub use lighthouse_network::NetworkConfig; -pub use service::{NetworkMessage, NetworkService}; +pub use service::{ + NetworkMessage, NetworkReceivers, NetworkSenders, NetworkService, ValidatorSubscriptionMessage, +}; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 3605b94acf..defb9c6000 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -252,6 +252,20 @@ lazy_static! { "Gossipsub sync_committee errors per error type", &["type"] ); + + /* + * Network queue metrics + */ + pub static ref NETWORK_RECEIVE_EVENTS: Result<IntCounterVec> = try_create_int_counter_vec( + "network_receive_events", + "Count of events received by the channel to the network service", + &["type"] + ); + pub static ref NETWORK_RECEIVE_TIMES: Result<HistogramVec> = try_create_histogram_vec( + "network_receive_times", + "Time taken for network to handle an event sent to the network service.", + &["type"] + ); } lazy_static! { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 9e3302af24..f5e32dcff0 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -26,6 +26,7 @@ use lighthouse_network::{ use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; +use strum::IntoStaticStr; use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; @@ -42,6 +43,9 @@ const METRIC_UPDATE_INTERVAL: u64 = 5; const SUBSCRIBE_DELAY_SLOTS: u64 = 2; /// Delay after a fork where we unsubscribe from pre-fork topics. const UNSUBSCRIBE_DELAY_EPOCHS: u64 = 2; +/// Size of the queue for validator subnet subscriptions. The number is chosen so that we may be +/// able to run tens of thousands of validators on one BN. +const VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE: usize = 65_536; /// Application level requests sent to the network. #[derive(Debug, Clone, Copy)] @@ -51,15 +55,9 @@ pub enum RequestId { } /// Types of messages that the network service can receive. -#[derive(Debug)] +#[derive(Debug, IntoStaticStr)] +#[strum(serialize_all = "snake_case")] pub enum NetworkMessage<T: EthSpec> { - /// Subscribes a list of validators to specific slots for attestation duties. - AttestationSubscribe { - subscriptions: Vec<ValidatorSubscription>, - }, - SyncCommitteeSubscribe { - subscriptions: Vec<SyncCommitteeSubscription>, - }, /// Subscribes the beacon node to the core gossipsub topics. We do this when we are either /// synced or close to the head slot. SubscribeCoreTopics, @@ -115,6 +113,59 @@ pub enum NetworkMessage<T: EthSpec> { }, } +/// Messages triggered by validators that may trigger a subscription to a subnet. +/// +/// These messages can be very numerous with large validator counts (hundreds of thousands per +/// minute). Therefore we separate them from the separated from the `NetworkMessage` to provide +/// fairness regarding message processing. +#[derive(Debug, IntoStaticStr)] +#[strum(serialize_all = "snake_case")] +pub enum ValidatorSubscriptionMessage { + /// Subscribes a list of validators to specific slots for attestation duties. + AttestationSubscribe { + subscriptions: Vec<ValidatorSubscription>, + }, + SyncCommitteeSubscribe { + subscriptions: Vec<SyncCommitteeSubscription>, + }, +} + +#[derive(Clone)] +pub struct NetworkSenders<E: EthSpec> { + network_send: mpsc::UnboundedSender<NetworkMessage<E>>, + validator_subscription_send: mpsc::Sender<ValidatorSubscriptionMessage>, +} + +pub struct NetworkReceivers<E: EthSpec> { + pub network_recv: mpsc::UnboundedReceiver<NetworkMessage<E>>, + pub validator_subscription_recv: mpsc::Receiver<ValidatorSubscriptionMessage>, +} + +impl<E: EthSpec> NetworkSenders<E> { + pub fn new() -> (Self, NetworkReceivers<E>) { + let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage<E>>(); + let (validator_subscription_send, validator_subscription_recv) = + mpsc::channel(VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE); + let senders = Self { + network_send, + validator_subscription_send, + }; + let receivers = NetworkReceivers { + network_recv, + validator_subscription_recv, + }; + (senders, receivers) + } + + pub fn network_send(&self) -> mpsc::UnboundedSender<NetworkMessage<E>> { + self.network_send.clone() + } + + pub fn validator_subscription_send(&self) -> mpsc::Sender<ValidatorSubscriptionMessage> { + self.validator_subscription_send.clone() + } +} + /// Service that handles communication between internal services and the `lighthouse_network` network service. pub struct NetworkService<T: BeaconChainTypes> { /// A reference to the underlying beacon chain. @@ -127,6 +178,8 @@ pub struct NetworkService<T: BeaconChainTypes> { sync_committee_service: SyncCommitteeService<T>, /// The receiver channel for lighthouse to communicate with the network service. network_recv: mpsc::UnboundedReceiver<NetworkMessage<T::EthSpec>>, + /// The receiver channel for lighthouse to send validator subscription requests. + validator_subscription_recv: mpsc::Receiver<ValidatorSubscriptionMessage>, /// The sending channel for the network service to send messages to be routed throughout /// lighthouse. router_send: mpsc::UnboundedSender<RouterMessage<T::EthSpec>>, @@ -168,18 +221,15 @@ impl<T: BeaconChainTypes> NetworkService<T> { config: &NetworkConfig, executor: task_executor::TaskExecutor, gossipsub_registry: Option<&'_ mut Registry>, - ) -> error::Result<( - Arc<NetworkGlobals<T::EthSpec>>, - mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, - )> { + ) -> error::Result<(Arc<NetworkGlobals<T::EthSpec>>, NetworkSenders<T::EthSpec>)> { let network_log = executor.log().clone(); - // build the network channel - let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage<T::EthSpec>>(); + // build the channels for external comms + let (network_senders, network_recievers) = NetworkSenders::new(); // try and construct UPnP port mappings if required. let upnp_config = crate::nat::UPnPConfig::from(config); let upnp_log = network_log.new(o!("service" => "UPnP")); - let upnp_network_send = network_send.clone(); + let upnp_network_send = network_senders.network_send(); if config.upnp_enabled { executor.spawn_blocking( move || { @@ -244,7 +294,7 @@ impl<T: BeaconChainTypes> NetworkService<T> { let router_send = Router::spawn( beacon_chain.clone(), network_globals.clone(), - network_send.clone(), + network_senders.network_send(), executor.clone(), network_log.clone(), )?; @@ -263,6 +313,11 @@ impl<T: BeaconChainTypes> NetworkService<T> { // create a timer for updating gossipsub parameters let gossipsub_parameter_update = tokio::time::interval(Duration::from_secs(60)); + let NetworkReceivers { + network_recv, + validator_subscription_recv, + } = network_recievers; + // create the network service and spawn the task let network_log = network_log.new(o!("service" => "network")); let network_service = NetworkService { @@ -271,6 +326,7 @@ impl<T: BeaconChainTypes> NetworkService<T> { attestation_service, sync_committee_service, network_recv, + validator_subscription_recv, router_send, store, network_globals: network_globals.clone(), @@ -290,7 +346,7 @@ impl<T: BeaconChainTypes> NetworkService<T> { network_service.spawn_service(executor); - Ok((network_globals, network_send)) + Ok((network_globals, network_senders)) } /// Returns the required fork digests that gossipsub needs to subscribe to based on the current slot. @@ -358,6 +414,9 @@ impl<T: BeaconChainTypes> NetworkService<T> { // handle a message sent to the network Some(msg) = self.network_recv.recv() => self.on_network_msg(msg, &mut shutdown_sender).await, + // handle a message from a validator requesting a subscription to a subnet + Some(msg) = self.validator_subscription_recv.recv() => self.on_validator_subscription_msg(msg).await, + // process any attestation service events Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), @@ -505,6 +564,9 @@ impl<T: BeaconChainTypes> NetworkService<T> { msg: NetworkMessage<T::EthSpec>, shutdown_sender: &mut Sender<ShutdownReason>, ) { + metrics::inc_counter_vec(&metrics::NETWORK_RECEIVE_EVENTS, &[(&msg).into()]); + let _timer = metrics::start_timer_vec(&metrics::NETWORK_RECEIVE_TIMES, &[(&msg).into()]); + match msg { NetworkMessage::SendRequest { peer_id, @@ -606,22 +668,6 @@ impl<T: BeaconChainTypes> NetworkService<T> { reason, source, } => self.libp2p.goodbye_peer(&peer_id, reason, source), - NetworkMessage::AttestationSubscribe { subscriptions } => { - if let Err(e) = self - .attestation_service - .validator_subscriptions(subscriptions) - { - warn!(self.log, "Attestation validator subscription failed"; "error" => e); - } - } - NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { - if let Err(e) = self - .sync_committee_service - .validator_subscriptions(subscriptions) - { - warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); - } - } NetworkMessage::SubscribeCoreTopics => { if self.shutdown_after_sync { if let Err(e) = shutdown_sender @@ -704,6 +750,28 @@ impl<T: BeaconChainTypes> NetworkService<T> { } } + /// Handle a message sent to the network service. + async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { + match msg { + ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions } => { + if let Err(e) = self + .attestation_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Attestation validator subscription failed"; "error" => e); + } + } + ValidatorSubscriptionMessage::SyncCommitteeSubscribe { subscriptions } => { + if let Err(e) = self + .sync_committee_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); + } + } + } + } + fn update_gossipsub_parameters(&mut self) { if let Ok(slot) = self.beacon_chain.slot() { let active_validators_opt = self diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 340d38b85a..0f8ec51233 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -682,7 +682,7 @@ pub struct ValidatorAggregateAttestationQuery { pub slot: Slot, } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { #[serde(with = "eth2_serde_utils::quoted_u64")] pub validator_index: u64, From c5785887a97e5f61d83f2bbe1f5ff8886dad4adb Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay <pawandhananjay@gmail.com> Date: Tue, 30 Aug 2022 05:47:32 +0000 Subject: [PATCH 163/184] Log fee recipients in VC (#3526) ## Issue Addressed Resolves #3524 ## Proposed Changes Log fee recipient in the `Validator exists in beacon chain` log. Logging in the BN already happens here https://github.com/sigp/lighthouse/blob/18c61a5e8be3e54226a86a69b96f8f4f7fd790e4/beacon_node/beacon_chain/src/beacon_chain.rs#L3858-L3865 I also think it's good practice to encourage users to set the fee recipient in the VC rather than the BN because of issues mentioned here https://github.com/sigp/lighthouse/issues/3432 Some example logs from prater: ``` Aug 30 03:47:09.922 INFO Validator exists in beacon chain fee_recipient: 0xab97_ad88, validator_index: 213615, pubkey: 0xb542b69ba14ddbaf717ca1762ece63a4804c08d38a1aadf156ae718d1545942e86763a1604f5065d4faa550b7259d651, service: duties Aug 30 03:48:05.505 INFO Validator exists in beacon chain fee_recipient: Fee recipient for validator not set in validator_definitions.yml or provided with the `--suggested-fee-recipient flag`, validator_index: 210710, pubkey: 0xad5d67cc7f990590c7b3fa41d593c4cf12d9ead894be2311fbb3e5c733d8c1b909e9d47af60ea3480fb6b37946c35390, service: duties ``` Co-authored-by: Paul Hauner <paul@paulhauner.com> --- validator_client/src/duties_service.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 3e15b39ab6..60b617e6c8 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -400,13 +400,23 @@ async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>( ) .await; + let fee_recipient = duties_service + .validator_store + .get_fee_recipient(&pubkey) + .map(|fr| fr.to_string()) + .unwrap_or_else(|| { + "Fee recipient for validator not set in validator_definitions.yml \ + or provided with the `--suggested-fee-recipient` flag" + .to_string() + }); match download_result { Ok(Some(response)) => { info!( log, "Validator exists in beacon chain"; "pubkey" => ?pubkey, - "validator_index" => response.data.index + "validator_index" => response.data.index, + "fee_recipient" => fee_recipient ); duties_service .validator_store @@ -420,7 +430,8 @@ async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>( debug!( log, "Validator without index"; - "pubkey" => ?pubkey + "pubkey" => ?pubkey, + "fee_recipient" => fee_recipient ) } // Don't exit early on an error, keep attempting to resolve other indices. @@ -430,6 +441,7 @@ async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>( "Failed to resolve pubkey to index"; "error" => %e, "pubkey" => ?pubkey, + "fee_recipient" => fee_recipient ) } } From aa022f46855df2a1420a6a80a788c73dc2779aa7 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 31 Aug 2022 22:21:55 +0000 Subject: [PATCH 164/184] v3.1.0 (#3525) ## Issue Addressed NA ## Proposed Changes - Bump versions ## Additional Info - ~~Blocked on #3508~~ - ~~Blocked on #3526~~ - ~~Requires additional testing.~~ - Expected release date is 2022-09-01 --- Cargo.lock | 16 ++++++++-------- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index faad708f41..ec7315c6ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -436,7 +436,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.0.0" +version = "3.1.0" dependencies = [ "beacon_chain", "clap", @@ -594,7 +594,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.0.0" +version = "3.1.0" dependencies = [ "beacon_node", "clap", @@ -3106,7 +3106,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.0.0" +version = "3.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -3606,7 +3606,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.0.0" +version = "3.1.0" dependencies = [ "account_manager", "account_utils", @@ -4585,9 +4585,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pest" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69486e2b8c2d2aeb9762db7b4e00b0331156393555cff467f4163ff06821eef8" +checksum = "4b0560d531d1febc25a3c9398a62a71256c0178f2e3443baedd9ad4bb8c9deb4" dependencies = [ "thiserror", "ucd-trie", @@ -4706,9 +4706,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9428003b84df1496fb9d6eeee9c5f8145cb41ca375eb0dad204328888832811f" +checksum = "716b4eeb6c4a1d3ecc956f75b43ec2e8e8ba80026413e70a3f41fd3313d3492b" dependencies = [ "num-traits", "plotters-backend", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index e580a7e968..7245258bb2 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.0.0" +version = "3.1.0" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] edition = "2021" diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index b6aa9b4f34..4c44eaa602 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "3.0.0" +version = "3.1.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 4963f98cd5..85baa47fbb 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.0.0-", - fallback = "Lighthouse/v3.0.0" + prefix = "Lighthouse/v3.1.0-", + fallback = "Lighthouse/v3.1.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 8c76b8f39b..a39abb3f78 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.0.0" +version = "3.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9250ef82a8..b74e1516f4 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.0.0" +version = "3.1.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false From 473abc14cacbe70c2b4f32f546265c860e91f4c1 Mon Sep 17 00:00:00 2001 From: Divma <divma@protonmail.com> Date: Mon, 5 Sep 2022 00:22:48 +0000 Subject: [PATCH 165/184] Subscribe to subnets only when needed (#3419) ## Issue Addressed We currently subscribe to attestation subnets as soon as the subscription arrives (one epoch in advance), this makes it so that subscriptions for future slots are scheduled instead of done immediately. ## Proposed Changes - Schedule subscriptions to subnets for future slots. - Finish removing hashmap_delay, in favor of [delay_map](https://github.com/AgeManning/delay_map). This was the only remaining service to do this. - Subscriptions for past slots are rejected, before we would subscribe for one slot. - Add a new test for subscriptions that are not consecutive. ## Additional Info This is also an effort in making the code easier to understand --- Cargo.lock | 23 +- Cargo.toml | 1 - beacon_node/lighthouse_network/Cargo.toml | 2 +- .../src/peer_manager/mod.rs | 2 +- beacon_node/network/Cargo.toml | 2 +- .../src/subnet_service/attestation_subnets.rs | 694 ++++++++++-------- .../src/subnet_service/sync_subnets.rs | 2 +- .../network/src/subnet_service/tests/mod.rs | 207 +++++- common/hashset_delay/Cargo.toml | 12 - common/hashset_delay/src/hashset_delay.rs | 197 ----- common/hashset_delay/src/lib.rs | 12 - 11 files changed, 587 insertions(+), 567 deletions(-) delete mode 100644 common/hashset_delay/Cargo.toml delete mode 100644 common/hashset_delay/src/hashset_delay.rs delete mode 100644 common/hashset_delay/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index ec7315c6ca..d742276687 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1250,6 +1250,16 @@ version = "0.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" +[[package]] +name = "delay_map" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6716ce9729be9628979ae1ff63e8bc8b7ad53b5472a2633bf079607a55328d36" +dependencies = [ + "futures", + "tokio-util 0.6.10", +] + [[package]] name = "deposit_contract" version = "0.2.0" @@ -2554,15 +2564,6 @@ dependencies = [ "hashbrown 0.11.2", ] -[[package]] -name = "hashset_delay" -version = "0.2.0" -dependencies = [ - "futures", - "tokio", - "tokio-util 0.6.10", -] - [[package]] name = "headers" version = "0.3.7" @@ -3656,6 +3657,7 @@ dependencies = [ name = "lighthouse_network" version = "0.2.0" dependencies = [ + "delay_map", "directory", "dirs", "discv5", @@ -3666,7 +3668,6 @@ dependencies = [ "exit-future", "fnv", "futures", - "hashset_delay", "hex", "lazy_static", "libp2p", @@ -4146,6 +4147,7 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", + "delay_map", "derivative", "environment", "error-chain", @@ -4155,7 +4157,6 @@ dependencies = [ "fnv", "futures", "genesis", - "hashset_delay", "hex", "if-addrs 0.6.7", "igd", diff --git a/Cargo.toml b/Cargo.toml index 819f92d99e..a71a97a959 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,6 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/hashset_delay", "common/lighthouse_metrics", "common/lighthouse_version", "common/lockfile", diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index bbef8a301b..c6ba530508 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -8,7 +8,6 @@ edition = "2021" discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -hashset_delay = { path = "../../common/hashset_delay" } eth2_ssz_types = "0.2.2" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" @@ -40,6 +39,7 @@ strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" prometheus-client = "0.16.0" unused_port = { path = "../../common/unused_port" } +delay_map = "0.1.1" [dependencies.libp2p] version = "0.45.1" diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 55b3884454..63d0816604 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -5,8 +5,8 @@ use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCo use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; use crate::{Subnet, SubnetDiscovery}; +use delay_map::HashSetDelay; use discv5::Enr; -use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 5aae8652e7..87c7650fb5 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -17,7 +17,6 @@ environment = { path = "../../lighthouse/environment" } beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } lighthouse_network = { path = "../lighthouse_network" } -hashset_delay = { path = "../../common/hashset_delay" } types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } @@ -44,3 +43,4 @@ if-addrs = "0.6.4" strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" +delay_map = "0.1.1" diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 475bd7f17d..ecca3c9682 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -3,19 +3,20 @@ //! determines whether attestations should be aggregated and/or passed to the beacon node. use super::SubnetServiceMessage; -use std::collections::{HashMap, HashSet, VecDeque}; +#[cfg(test)] +use std::collections::HashSet; +use std::collections::{HashMap, VecDeque}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use std::time::{Duration, Instant}; - -use futures::prelude::*; -use rand::seq::SliceRandom; -use slog::{debug, error, o, trace, warn}; +use std::time::Duration; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use hashset_delay::HashSetDelay; +use delay_map::{HashMapDelay, HashSetDelay}; +use futures::prelude::*; use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; +use rand::seq::SliceRandom; +use slog::{debug, error, o, trace, warn}; use slot_clock::SlotClock; use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; @@ -24,20 +25,29 @@ use crate::metrics; /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. /// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; -/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random -/// gossip topics that we subscribed to due to the validator connection. -const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; +pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; +/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from +/// the random gossip topics that we subscribed to due to the validator connection. +const LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS: u32 = 150; /// The fraction of a slot that we subscribe to a subnet before the required slot. /// -/// Note: The time is calculated as `time = seconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`. -const ADVANCE_SUBSCRIBE_TIME: u32 = 3; -/// The default number of slots before items in hash delay sets used by this class should expire. -/// 36s at 12s slot time -const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3; +/// Currently a whole slot ahead. +const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] +pub(crate) enum SubscriptionKind { + /// Long lived subscriptions. + /// + /// These have a longer duration and are advertised in our ENR. + LongLived, + /// Short lived subscriptions. + /// + /// Subscribing to these subnets has a short duration and we don't advertise it in our ENR. + ShortLived, +} /// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] +#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] pub struct ExactSubnet { /// The `SubnetId` associated with this subnet. pub subnet_id: SubnetId, @@ -52,17 +62,22 @@ pub struct AttestationService<T: BeaconChainTypes> { /// A reference to the beacon chain to process received attestations. pub(crate) beacon_chain: Arc<BeaconChain<T>>, - /// The collection of currently subscribed random subnets mapped to their expiry deadline. - pub(crate) random_subnets: HashSetDelay<SubnetId>, + /// Subnets we are currently subscribed to as short lived subscriptions. + /// + /// Once they expire, we unsubscribe from these. + short_lived_subscriptions: HashMapDelay<SubnetId, Slot>, - /// The collection of all currently subscribed subnets (long-lived **and** short-lived). - subscriptions: HashSet<SubnetId>, + /// Subnets we are currently subscribed to as long lived subscriptions. + /// + /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. + long_lived_subscriptions: HashMapDelay<SubnetId, Slot>, - /// A collection of timeouts for when to unsubscribe from a shard subnet. - unsubscriptions: HashSetDelay<ExactSubnet>, + /// Short lived subscriptions that need to be done in the future. + scheduled_short_lived_subscriptions: HashSetDelay<ExactSubnet>, - /// A collection timeouts to track the existence of aggregate validator subscriptions at an `ExactSubnet`. - aggregate_validators_on_subnet: HashSetDelay<ExactSubnet>, + /// A collection timeouts to track the existence of aggregate validator subscriptions at an + /// `ExactSubnet`. + aggregate_validators_on_subnet: Option<HashSetDelay<ExactSubnet>>, /// A collection of seen validators. These dictate how many random subnets we should be /// subscribed to. As these time out, we unsubscribe for the required random subnets and update @@ -79,8 +94,8 @@ pub struct AttestationService<T: BeaconChainTypes> { /// We are always subscribed to all subnets. subscribe_all_subnets: bool, - /// We process and aggregate all attestations on subscribed subnets. - import_all_attestations: bool, + /// For how many slots we subscribe to long lived subnets. + long_lived_subnet_subscription_slots: u64, /// The logger for the attestation service. log: slog::Logger, @@ -96,34 +111,36 @@ impl<T: BeaconChainTypes> AttestationService<T> { ) -> Self { let log = log.new(o!("service" => "attestation_service")); - // calculate the random subnet duration from the spec constants + // Calculate the random subnet duration from the spec constants. let spec = &beacon_chain.spec; let slot_duration = beacon_chain.slot_clock.slot_duration(); - let random_subnet_duration_millis = spec + let long_lived_subnet_subscription_slots = spec .epochs_per_random_subnet_subscription - .saturating_mul(T::EthSpec::slots_per_epoch()) - .saturating_mul(slot_duration.as_millis() as u64); + .saturating_mul(T::EthSpec::slots_per_epoch()); + let long_lived_subscription_duration = Duration::from_millis( + slot_duration.as_millis() as u64 * long_lived_subnet_subscription_slots, + ); - // Panics on overflow. Ensure LAST_SEEN_VALIDATOR_TIMEOUT is not too large. + // Panics on overflow. Ensure LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS is not too large. let last_seen_val_timeout = slot_duration - .checked_mul(LAST_SEEN_VALIDATOR_TIMEOUT) + .checked_mul(LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS) .expect("LAST_SEEN_VALIDATOR_TIMEOUT must not be ridiculously large"); - let default_timeout = slot_duration - .checked_mul(DEFAULT_EXPIRATION_TIMEOUT) - .expect("DEFAULT_EXPIRATION_TIMEOUT must not be ridiculoustly large"); + let track_validators = !config.import_all_attestations; + let aggregate_validators_on_subnet = + track_validators.then(|| HashSetDelay::new(slot_duration)); AttestationService { events: VecDeque::with_capacity(10), beacon_chain, - random_subnets: HashSetDelay::new(Duration::from_millis(random_subnet_duration_millis)), - subscriptions: HashSet::new(), - unsubscriptions: HashSetDelay::new(default_timeout), - aggregate_validators_on_subnet: HashSetDelay::new(default_timeout), + short_lived_subscriptions: HashMapDelay::new(slot_duration), + long_lived_subscriptions: HashMapDelay::new(long_lived_subscription_duration), + scheduled_short_lived_subscriptions: HashSetDelay::default(), + aggregate_validators_on_subnet, known_validators: HashSetDelay::new(last_seen_val_timeout), waker: None, - subscribe_all_subnets: config.subscribe_all_subnets, - import_all_attestations: config.import_all_attestations, discovery_disabled: config.disable_discovery, + subscribe_all_subnets: config.subscribe_all_subnets, + long_lived_subnet_subscription_slots, log, } } @@ -134,10 +151,25 @@ impl<T: BeaconChainTypes> AttestationService<T> { if self.subscribe_all_subnets { self.beacon_chain.spec.attestation_subnet_count as usize } else { - self.subscriptions.len() + self.short_lived_subscriptions + .keys() + .chain(self.long_lived_subscriptions.keys()) + .collect::<HashSet<_>>() + .len() } } + /// Give access to the current subscriptions for testing purposes. + #[cfg(test)] + pub(crate) fn subscriptions( + &self, + subscription_kind: SubscriptionKind, + ) -> &HashMapDelay<SubnetId, Slot> { + match subscription_kind { + SubscriptionKind::LongLived => &self.long_lived_subscriptions, + SubscriptionKind::ShortLived => &self.short_lived_subscriptions, + } + } /// Processes a list of validator subscriptions. /// /// This will: @@ -158,7 +190,6 @@ impl<T: BeaconChainTypes> AttestationService<T> { let mut subnets_to_discover: HashMap<SubnetId, Slot> = HashMap::new(); for subscription in subscriptions { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); - //NOTE: We assume all subscriptions have been verified before reaching this service // Registers the validator with the attestation service. // This will subscribe to long-lived random subnets if required. @@ -205,8 +236,7 @@ impl<T: BeaconChainTypes> AttestationService<T> { if subscription.is_aggregator { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); - // set the subscription timer to subscribe to the next subnet if required - if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { + if let Err(e) = self.subscribe_to_subnet(exact_subnet) { warn!(self.log, "Subscription to subnet error"; "error" => e, @@ -234,10 +264,6 @@ impl<T: BeaconChainTypes> AttestationService<T> { }; } - // pre-emptively wake the thread to check for new events - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } Ok(()) } @@ -248,19 +274,27 @@ impl<T: BeaconChainTypes> AttestationService<T> { subnet: SubnetId, attestation: &Attestation<T::EthSpec>, ) -> bool { - if self.import_all_attestations { - return true; - } - - let exact_subnet = ExactSubnet { - subnet_id: subnet, - slot: attestation.data.slot, - }; - self.aggregate_validators_on_subnet.contains(&exact_subnet) + self.aggregate_validators_on_subnet + .as_ref() + .map(|tracked_vals| { + tracked_vals.contains_key(&ExactSubnet { + subnet_id: subnet, + slot: attestation.data.slot, + }) + }) + .unwrap_or(true) } /* Internal private functions */ + /// Adds an event to the event queue and notifies that this service is ready to be polled + /// again. + fn queue_event(&mut self, ev: SubnetServiceMessage) { + self.events.push_back(ev); + if let Some(waker) = &self.waker { + waker.wake_by_ref() + } + } /// Checks if there are currently queued discovery requests and the time required to make the /// request. /// @@ -277,12 +311,13 @@ impl<T: BeaconChainTypes> AttestationService<T> { let discovery_subnets: Vec<SubnetDiscovery> = exact_subnets .filter_map(|exact_subnet| { - // check if there is enough time to perform a discovery lookup + // Check if there is enough time to perform a discovery lookup. if exact_subnet.slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { - // if the slot is more than epoch away, add an event to start looking for peers - // add one slot to ensure we keep the peer for the subscription slot + // Send out an event to start looking for peers. + // Require the peer for an additional slot to ensure we keep the peer for the + // duration of the subscription. let min_ttl = self .beacon_chain .slot_clock @@ -305,244 +340,279 @@ impl<T: BeaconChainTypes> AttestationService<T> { .collect(); if !discovery_subnets.is_empty() { - self.events - .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); } Ok(()) } - /// Checks the current random subnets and subscriptions to determine if a new subscription for this - /// subnet is required for the given slot. - /// - /// If required, adds a subscription event and an associated unsubscription event. - fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { - // initialise timing variables - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; + // Subscribes to the subnet if it should be done immediately, or schedules it if required. + fn subscribe_to_subnet( + &mut self, + ExactSubnet { subnet_id, slot }: ExactSubnet, + ) -> Result<(), &'static str> { + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - // Calculate the duration to the unsubscription event. - // There are two main cases. Attempting to subscribe to the current slot and all others. - let expected_end_subscription_duration = if current_slot >= exact_subnet.slot { - self.beacon_chain + // Calculate how long before we need to subscribe to the subnet. + let time_to_subscription_start = { + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain .slot_clock - .duration_to_next_slot() - .ok_or("Unable to determine duration to next slot")? - } else { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // the duration until we no longer need this subscription. We assume a single slot is - // sufficient. - self.beacon_chain - .slot_clock - .duration_to_slot(exact_subnet.slot) - .ok_or("Unable to determine duration to subscription slot")? - + slot_duration + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + time_to_subscription_slot.saturating_sub(advance_subscription_duration) }; - // Regardless of whether or not we have already subscribed to a subnet, track the expiration - // of aggregate validator subscriptions to exact subnets so we know whether or not to drop - // attestations for a given subnet + slot - self.aggregate_validators_on_subnet - .insert_at(exact_subnet.clone(), expected_end_subscription_duration); - - // Checks on current subscriptions - // Note: We may be connected to a long-lived random subnet. In this case we still add the - // subscription timeout and check this case when the timeout fires. This is because a - // long-lived random subnet can be unsubscribed at any time when a validator becomes - // in-active. This case is checked on the subscription event (see `handle_subscriptions`). - - // Return if we already have a subscription for this subnet_id and slot - if self.unsubscriptions.contains(&exact_subnet) || self.subscribe_all_subnets { - return Ok(()); + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + tracked_vals.insert(ExactSubnet { subnet_id, slot }); } - // We are not currently subscribed and have no waiting subscription, create one - self.handle_subscriptions(exact_subnet.clone()); + // If the subscription should be done in the future, schedule it. Otherwise subscribe + // immediately. + if time_to_subscription_start.is_zero() { + // This is a current or past slot, we subscribe immediately. + self.subscribe_to_subnet_immediately( + subnet_id, + SubscriptionKind::ShortLived, + slot + 1, + )?; + } else { + // This is a future slot, schedule subscribing. + trace!(self.log, "Scheduling subnet subscription"; "subnet" => ?subnet_id, "time_to_subscription_start" => ?time_to_subscription_start); + self.scheduled_short_lived_subscriptions + .insert_at(ExactSubnet { subnet_id, slot }, time_to_subscription_start); + } - // if there is an unsubscription event for the slot prior, we remove it to prevent - // unsubscriptions immediately after the subscription. We also want to minimize - // subscription churn and maintain a consecutive subnet subscriptions. - self.unsubscriptions.retain(|subnet| { - !(subnet.subnet_id == exact_subnet.subnet_id && subnet.slot <= exact_subnet.slot) - }); - // add an unsubscription event to remove ourselves from the subnet once completed - self.unsubscriptions - .insert_at(exact_subnet, expected_end_subscription_duration); Ok(()) } - /// Updates the `known_validators` mapping and subscribes to a set of random subnets if required. - /// - /// This also updates the ENR to indicate our long-lived subscription to the subnet + /// Updates the `known_validators` mapping and subscribes to long lived subnets if required. fn add_known_validator(&mut self, validator_index: u64) { - if self.known_validators.get(&validator_index).is_none() && !self.subscribe_all_subnets { - // New validator has subscribed - // Subscribe to random topics and update the ENR if needed. - - let spec = &self.beacon_chain.spec; - - if self.random_subnets.len() < spec.attestation_subnet_count as usize { - // Still room for subscriptions - self.subscribe_to_random_subnets( - self.beacon_chain.spec.random_subnets_per_validator as usize, - ); - } - } - // add the new validator or update the current timeout for a known validator + let previously_known = self.known_validators.contains_key(&validator_index); + // Add the new validator or update the current timeout for a known validator. self.known_validators.insert(validator_index); + if !previously_known { + // New validator has subscribed. + // Subscribe to random topics and update the ENR if needed. + self.subscribe_to_random_subnets(); + } } /// Subscribe to long-lived random subnets and update the local ENR bitfield. - fn subscribe_to_random_subnets(&mut self, no_subnets_to_subscribe: usize) { - let subnet_count = self.beacon_chain.spec.attestation_subnet_count; + /// The number of subnets to subscribe depends on the number of active validators and number of + /// current subscriptions. + fn subscribe_to_random_subnets(&mut self) { + if self.subscribe_all_subnets { + // This case is not handled by this service. + return; + } - // Build a list of random subnets that we are not currently subscribed to. - let available_subnets = (0..subnet_count) + let max_subnets = self.beacon_chain.spec.attestation_subnet_count; + // Calculate how many subnets we need, + let required_long_lived_subnets = { + let subnets_for_validators = self + .known_validators + .len() + .saturating_mul(self.beacon_chain.spec.random_subnets_per_validator as usize); + subnets_for_validators // How many subnets we need + .min(max_subnets as usize) // Capped by the max + .saturating_sub(self.long_lived_subscriptions.len()) // Minus those we have + }; + + if required_long_lived_subnets == 0 { + // Nothing to do. + return; + } + + // Build a list of the subnets that we are not currently advertising. + let available_subnets = (0..max_subnets) .map(SubnetId::new) - .filter(|subnet_id| self.random_subnets.get(subnet_id).is_none()) + .filter(|subnet_id| !self.long_lived_subscriptions.contains_key(subnet_id)) .collect::<Vec<_>>(); - let to_subscribe_subnets = { - if available_subnets.len() < no_subnets_to_subscribe { - debug!(self.log, "Reached maximum random subnet subscriptions"); - available_subnets - } else { - // select a random sample of available subnets - available_subnets - .choose_multiple(&mut rand::thread_rng(), no_subnets_to_subscribe) - .cloned() - .collect::<Vec<_>>() + let subnets_to_subscribe: Vec<_> = available_subnets + .choose_multiple(&mut rand::thread_rng(), required_long_lived_subnets) + .cloned() + .collect(); + + // Calculate in which slot does this subscription end. + let end_slot = match self.beacon_chain.slot_clock.now() { + Some(slot) => slot + self.long_lived_subnet_subscription_slots, + None => { + return debug!( + self.log, + "Failed to calculate end slot of long lived subnet subscriptions." + ) } }; - for subnet_id in to_subscribe_subnets { - // remove this subnet from any immediate un-subscription events - self.unsubscriptions - .retain(|exact_subnet| exact_subnet.subnet_id != subnet_id); - - // insert a new random subnet - self.random_subnets.insert(subnet_id); - - // send discovery request - // Note: it's wasteful to send a DiscoverPeers request if we already have peers for this subnet. - // However, subscribing to random subnets ideally shouldn't happen very often (once in ~27 hours) and - // this makes it easier to deterministically test the attestations service. - self.events - .push_back(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet: Subnet::Attestation(subnet_id), - min_ttl: None, - }])); - - // if we are not already subscribed, then subscribe - if !self.subscriptions.contains(&subnet_id) { - self.subscriptions.insert(subnet_id); - debug!(self.log, "Subscribing to random subnet"; "subnet_id" => ?subnet_id); - self.events - .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( - subnet_id, - ))); + for subnet_id in &subnets_to_subscribe { + if let Err(e) = self.subscribe_to_subnet_immediately( + *subnet_id, + SubscriptionKind::LongLived, + end_slot, + ) { + debug!(self.log, "Failed to subscribe to long lived subnet"; "subnet" => ?subnet_id, "err" => e); } - - // add the subnet to the ENR bitfield - self.events - .push_back(SubnetServiceMessage::EnrAdd(Subnet::Attestation(subnet_id))); } } /* A collection of functions that handle the various timeouts */ - /// A queued subscription is ready. + /// Registers a subnet as subscribed. /// - /// We add subscriptions events even if we are already subscribed to a random subnet (as these - /// can be unsubscribed at any time by inactive validators). If we are - /// still subscribed at the time the event fires, we don't re-subscribe. - fn handle_subscriptions(&mut self, exact_subnet: ExactSubnet) { - // Check if the subnet currently exists as a long-lasting random subnet - if let Some(expiry) = self.random_subnets.get(&exact_subnet.subnet_id) { - // we are subscribed via a random subnet, if this is to expire during the time we need - // to be subscribed, just extend the expiry - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - let advance_subscription_duration = slot_duration - .checked_div(ADVANCE_SUBSCRIBE_TIME) - .expect("ADVANCE_SUBSCRIPTION_TIME cannot be too large"); - // we require the subnet subscription for at least a slot on top of the initial - // subscription time - let expected_end_subscription_duration = slot_duration + advance_subscription_duration; + /// Checks that the time in which the subscription would end is not in the past. If we are + /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send + /// out the appropriate events. + fn subscribe_to_subnet_immediately( + &mut self, + subnet_id: SubnetId, + subscription_kind: SubscriptionKind, + end_slot: Slot, + ) -> Result<(), &'static str> { + if self.subscribe_all_subnets { + // Case not handled by this service. + return Ok(()); + } - if expiry < &(Instant::now() + expected_end_subscription_duration) { - self.random_subnets - .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); + let time_to_subscription_end = self + .beacon_chain + .slot_clock + .duration_to_slot(end_slot) + .unwrap_or_default(); + + // First check this is worth doing. + if time_to_subscription_end.is_zero() { + return Err("Time when subscription would end has already passed."); + } + + // We need to check and add a subscription for the right kind, regardless of the presence + // of the subnet as a subscription of the other kind. This is mainly since long lived + // subscriptions can be removed at any time when a validator goes offline. + let (subscriptions, already_subscribed_as_other_kind) = match subscription_kind { + SubscriptionKind::ShortLived => ( + &mut self.short_lived_subscriptions, + self.long_lived_subscriptions.contains_key(&subnet_id), + ), + SubscriptionKind::LongLived => ( + &mut self.long_lived_subscriptions, + self.short_lived_subscriptions.contains_key(&subnet_id), + ), + }; + + match subscriptions.get(&subnet_id) { + Some(current_end_slot) => { + // We are already subscribed. Check if we need to extend the subscription. + if &end_slot > current_end_slot { + trace!(self.log, "Extending subscription to subnet"; + "subnet" => ?subnet_id, + "prev_end_slot" => current_end_slot, + "new_end_slot" => end_slot, + "subscription_kind" => ?subscription_kind, + ); + subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); + } } - } else { - // we are also not un-subscribing from a subnet if the next slot requires us to be - // subscribed. Therefore there could be the case that we are already still subscribed - // to the required subnet. In which case we do not issue another subscription request. - if !self.subscriptions.contains(&exact_subnet.subnet_id) { - // we are not already subscribed - debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot.as_u64()); - self.subscriptions.insert(exact_subnet.subnet_id); - self.events - .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( - exact_subnet.subnet_id, + None => { + // This is a new subscription. Add with the corresponding timeout and send the + // notification. + subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); + + // Inform of the subscription. + if !already_subscribed_as_other_kind { + debug!(self.log, "Subscribing to subnet"; + "subnet" => ?subnet_id, + "end_slot" => end_slot, + "subscription_kind" => ?subscription_kind, + ); + self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( + subnet_id, ))); + } + + // If this is a new long lived subscription, send out the appropriate events. + if SubscriptionKind::LongLived == subscription_kind { + let subnet = Subnet::Attestation(subnet_id); + // Advertise this subnet in our ENR. + self.long_lived_subscriptions.insert_at( + subnet_id, + end_slot, + time_to_subscription_end, + ); + self.queue_event(SubnetServiceMessage::EnrAdd(subnet)); + + if !self.discovery_disabled { + self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![ + SubnetDiscovery { + subnet, + min_ttl: None, + }, + ])) + } + } } } - } - /// A queued unsubscription is ready. - /// - /// Unsubscription events are added, even if we are subscribed to long-lived random subnets. If - /// a random subnet is present, we do not unsubscribe from it. - fn handle_unsubscriptions(&mut self, exact_subnet: ExactSubnet) { - // Check if the subnet currently exists as a long-lasting random subnet - if self.random_subnets.contains(&exact_subnet.subnet_id) { - return; - } - - debug!(self.log, "Unsubscribing from subnet"; "subnet" => *exact_subnet.subnet_id, "processed_slot" => exact_subnet.slot.as_u64()); - - self.subscriptions.remove(&exact_subnet.subnet_id); - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - exact_subnet.subnet_id, - ))); + Ok(()) } /// A random subnet has expired. /// /// This function selects a new subnet to join, or extends the expiry if there are no more /// available subnets to choose from. - fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) { + fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId, end_slot: Slot) { let subnet_count = self.beacon_chain.spec.attestation_subnet_count; - if self.random_subnets.len() == (subnet_count - 1) as usize { - // We are at capacity, simply increase the timeout of the current subnet - self.random_subnets.insert(subnet_id); - return; - } - // If there are no unsubscription events for `subnet_id`, we unsubscribe immediately. - if !self - .unsubscriptions - .keys() - .any(|s| s.subnet_id == subnet_id) - { - // we are not at capacity, unsubscribe from the current subnet. - debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id); - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + if self.long_lived_subscriptions.len() == (subnet_count - 1) as usize { + let end_slot = end_slot + self.long_lived_subnet_subscription_slots; + // This is just an extra accuracy precaution, we could use the default timeout if + // needed. + if let Some(time_to_subscription_end) = + self.beacon_chain.slot_clock.duration_to_slot(end_slot) + { + // We are at capacity, simply increase the timeout of the current subnet. + self.long_lived_subscriptions.insert_at( subnet_id, - ))); + end_slot + 1, + time_to_subscription_end, + ); + } else { + self.long_lived_subscriptions.insert(subnet_id, end_slot); + } + return; } // Remove the ENR bitfield bit and choose a new random on from the available subnets - self.events - .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + // Subscribe to a new random subnet. + self.subscribe_to_random_subnets(); + } + + // Unsubscribes from a subnet that was removed if it does not continue to exist as a + // subscription of the other kind. For long lived subscriptions, it also removes the + // advertisement from our ENR. + fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { + let other_subscriptions = match subscription_kind { + SubscriptionKind::LongLived => &self.short_lived_subscriptions, + SubscriptionKind::ShortLived => &self.long_lived_subscriptions, + }; + + if !other_subscriptions.contains_key(&subnet_id) { + // Subscription no longer exists as short lived or long lived. + debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind); + self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( subnet_id, ))); - // Subscribe to a new random subnet - self.subscribe_to_random_subnets(1); + } + + if subscription_kind == SubscriptionKind::LongLived { + // Remove from our ENR even if we remain subscribed in other way. + self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + subnet_id, + ))); + } } /// A known validator has not sent a subscription in a while. They are considered offline and the @@ -552,39 +622,37 @@ impl<T: BeaconChainTypes> AttestationService<T> { /// validators to random subnets. So when a validator goes offline, we can simply remove the /// allocated amount of random subnets. fn handle_known_validator_expiry(&mut self) { - let spec = &self.beacon_chain.spec; - let subnet_count = spec.attestation_subnet_count; - let random_subnets_per_validator = spec.random_subnets_per_validator; - if self.known_validators.len() as u64 * random_subnets_per_validator >= subnet_count { - // have too many validators, ignore + // Calculate how many subnets should we remove. + let extra_subnet_count = { + let max_subnets = self.beacon_chain.spec.attestation_subnet_count; + let subnets_for_validators = self + .known_validators + .len() + .saturating_mul(self.beacon_chain.spec.random_subnets_per_validator as usize) + .min(max_subnets as usize); + + self.long_lived_subscriptions + .len() + .saturating_sub(subnets_for_validators) + }; + + if extra_subnet_count == 0 { + // Nothing to do return; } - let subscribed_subnets = self.random_subnets.keys().cloned().collect::<Vec<_>>(); - let to_remove_subnets = subscribed_subnets.choose_multiple( - &mut rand::thread_rng(), - random_subnets_per_validator as usize, - ); + let advertised_subnets = self + .long_lived_subscriptions + .keys() + .cloned() + .collect::<Vec<_>>(); + let to_remove_subnets = advertised_subnets + .choose_multiple(&mut rand::thread_rng(), extra_subnet_count) + .cloned(); for subnet_id in to_remove_subnets { - // If there are no unsubscription events for `subnet_id`, we unsubscribe immediately. - if !self - .unsubscriptions - .keys() - .any(|s| s.subnet_id == *subnet_id) - { - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - *subnet_id, - ))); - } - // as the long lasting subnet subscription is being removed, remove the subnet_id from - // the ENR bitfield - self.events - .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( - *subnet_id, - ))); - self.random_subnets.remove(subnet_id); + self.long_lived_subscriptions.remove(&subnet_id); + self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived); } } } @@ -593,7 +661,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> { type Item = SubnetServiceMessage; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { - // update the waker if needed + // Update the waker if needed. if let Some(waker) = &self.waker { if waker.will_wake(cx.waker()) { self.waker = Some(cx.waker().clone()); @@ -602,25 +670,13 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> { self.waker = Some(cx.waker().clone()); } - // process any un-subscription events - match self.unsubscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} + // Send out any generated events. + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); } - // process any random subnet expiries - match self.random_subnets.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(subnet))) => self.handle_random_subnet_expiry(subnet), - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // process any known validator expiries + // Process first any known validator expiries, since these affect how many long lived + // subnets we need. match self.known_validators.poll_next_unpin(cx) { Poll::Ready(Some(Ok(_validator_index))) => { self.handle_known_validator_expiry(); @@ -630,14 +686,52 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> { } Poll::Ready(None) | Poll::Pending => {} } - // poll to remove entries on expiration, no need to act on expiration events - if let Poll::Ready(Some(Err(e))) = self.aggregate_validators_on_subnet.poll_next_unpin(cx) { - error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); + + // Process scheduled subscriptions that might be ready, since those can extend a soon to + // expire subscription. + match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { + if let Err(e) = self.subscribe_to_subnet_immediately( + subnet_id, + SubscriptionKind::ShortLived, + slot + 1, + ) { + debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet_id, "err" => e); + } + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} } - // process any generated events - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); + // Finally process any expired subscriptions. + match self.short_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { + self.handle_removed_subnet(subnet_id, SubscriptionKind::ShortLived); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Process any random subnet expiries. + match self.long_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok((subnet_id, end_slot)))) => { + self.handle_random_subnet_expiry(subnet_id, end_slot) + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Poll to remove entries on expiration, no need to act on expiration events. + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { + error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); + } } Poll::Pending diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index 9e92f62250..0b27ff527f 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -12,7 +12,7 @@ use slog::{debug, error, o, trace, warn}; use super::SubnetServiceMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use hashset_delay::HashSetDelay; +use delay_map::HashSetDelay; use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; use slot_clock::SlotClock; use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 778eb63263..65ca9f2194 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -8,7 +8,7 @@ use futures::prelude::*; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lazy_static::lazy_static; use lighthouse_network::NetworkConfig; -use slog::Logger; +use slog::{o, Drain, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::sync::Arc; @@ -42,7 +42,7 @@ impl TestBeaconChain { let keypairs = generate_deterministic_keypairs(1); - let log = get_logger(); + let log = get_logger(None); let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); @@ -93,16 +93,32 @@ pub fn recent_genesis_time() -> u64 { .as_secs() } -fn get_logger() -> Logger { - NullLoggerBuilder.build().expect("logger should build") +fn get_logger(log_level: Option<slog::Level>) -> Logger { + if let Some(level) = log_level { + let drain = { + let decorator = slog_term::TermDecorator::new().build(); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).chan_size(2048).build(); + drain.filter_level(level) + }; + + Logger::root(drain.fuse(), o!()) + } else { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") + } } lazy_static! { static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock(); } -fn get_attestation_service() -> AttestationService<TestBeaconChainType> { - let log = get_logger(); +fn get_attestation_service( + log_level: Option<slog::Level>, +) -> AttestationService<TestBeaconChainType> { + let log = get_logger(log_level); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); @@ -111,7 +127,7 @@ fn get_attestation_service() -> AttestationService<TestBeaconChainType> { } fn get_sync_committee_service() -> SyncCommitteeService<TestBeaconChainType> { - let log = get_logger(); + let log = get_logger(None); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); @@ -128,28 +144,34 @@ async fn get_events<S: Stream<Item = SubnetServiceMessage> + Unpin>( ) -> Vec<SubnetServiceMessage> { let mut events = Vec::new(); - let collect_stream_fut = async { - loop { - if let Some(result) = stream.next().await { - events.push(result); + let timeout = + tokio::time::sleep(Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout); + futures::pin_mut!(timeout); + + loop { + tokio::select! { + Some(event) = stream.next() => { + events.push(event); if let Some(num) = num_events { if events.len() == num { - return; + break; } } } - } - }; + _ = timeout.as_mut() => { + break; + } - tokio::select! { - _ = collect_stream_fut => events, - _ = tokio::time::sleep( - Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout, - ) => events + } } + + events } mod attestation_service { + + use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; + use super::*; fn get_subscription( @@ -195,7 +217,7 @@ mod attestation_service { let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -237,15 +259,18 @@ mod attestation_service { matches::assert_matches!( events[..3], [ - SubnetServiceMessage::DiscoverPeers(_), SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3) + SubnetServiceMessage::EnrAdd(_any3), + SubnetServiceMessage::DiscoverPeers(_), ] ); // If the long lived and short lived subnets are the same, there should be no more events // as we don't resubscribe already subscribed subnets. - if !attestation_service.random_subnets.contains(&subnet_id) { + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id) + { assert_eq!(expected[..], events[3..]); } // Should be subscribed to only 1 long lived subnet after unsubscription. @@ -267,7 +292,7 @@ mod attestation_service { let com2 = 0; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -319,16 +344,19 @@ mod attestation_service { matches::assert_matches!( events[..3], [ - SubnetServiceMessage::DiscoverPeers(_), SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3) + SubnetServiceMessage::EnrAdd(_any3), + SubnetServiceMessage::DiscoverPeers(_), ] ); let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. - if !attestation_service.random_subnets.contains(&subnet_id1) { + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { assert_eq!(expected, events[3]); assert_eq!(attestation_service.subscription_count(), 2); } else { @@ -339,7 +367,10 @@ mod attestation_service { let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service.random_subnets.contains(&subnet_id1) { + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { assert_eq!( [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( subnet_id1 @@ -360,7 +391,7 @@ mod attestation_service { let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -418,7 +449,7 @@ mod attestation_service { let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -465,6 +496,122 @@ mod attestation_service { assert_eq!(enr_add_count, 64); assert_eq!(unexpected_msg_count, 0); } + + #[tokio::test] + async fn test_subscribe_same_subnet_several_slots_apart() { + // subscription config + let validator_index = 1; + let committee_count = 1; + + // Makes 2 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + let subscription_slot1 = 0; + let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let com1 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let com2 = 0; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(None); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let sub1 = get_subscription( + validator_index, + com1, + current_slot + Slot::new(subscription_slot1), + committee_count, + ); + + let sub2 = get_subscription( + validator_index, + com2, + current_slot + Slot::new(subscription_slot2), + committee_count, + ); + + let subnet_id1 = SubnetId::compute_subnet::<MainnetEthSpec>( + current_slot + Slot::new(subscription_slot1), + com1, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + let subnet_id2 = SubnetId::compute_subnet::<MainnetEthSpec>( + current_slot + Slot::new(subscription_slot2), + com2, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + // Assert that subscriptions are different but their subnet is the same + assert_ne!(sub1, sub2); + assert_eq!(subnet_id1, subnet_id2); + + // submit the subscriptions + attestation_service + .validator_subscriptions(vec![sub1, sub2]) + .unwrap(); + + // Unsubscription event should happen at the end of the slot. + let events = get_events(&mut attestation_service, None, 1).await; + matches::assert_matches!( + events[..3], + [ + SubnetServiceMessage::Subscribe(_any1), + SubnetServiceMessage::EnrAdd(_any3), + SubnetServiceMessage::DiscoverPeers(_), + ] + ); + + let expected_subscription = + SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); + let expected_unsubscription = + SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); + + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { + assert_eq!(expected_subscription, events[3]); + // fourth is a discovery event + assert_eq!(expected_unsubscription, events[5]); + } + assert_eq!(attestation_service.subscription_count(), 1); + + println!("{events:?}"); + let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the + // advance subscription time + let wait_slots = attestation_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; + + let no_events = dbg!(get_events(&mut attestation_service, None, wait_slots as u32).await); + + assert_eq!(no_events, []); + + let second_subscribe_event = get_events(&mut attestation_service, None, 2).await; + // If the long lived and short lived subnets are different, we should get an unsubscription event. + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { + assert_eq!( + [SubnetServiceMessage::Subscribe(Subnet::Attestation( + subnet_id1 + ))], + second_subscribe_event[..] + ); + } + } } mod sync_committee_service { diff --git a/common/hashset_delay/Cargo.toml b/common/hashset_delay/Cargo.toml deleted file mode 100644 index 1aa525a115..0000000000 --- a/common/hashset_delay/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "hashset_delay" -version = "0.2.0" -authors = ["Sigma Prime <contact@sigmaprime.io>"] -edition = "2021" - -[dependencies] -futures = "0.3.7" -tokio-util = { version = "0.6.2", features = ["time"] } - -[dev-dependencies] -tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } diff --git a/common/hashset_delay/src/hashset_delay.rs b/common/hashset_delay/src/hashset_delay.rs deleted file mode 100644 index 052d71fe3b..0000000000 --- a/common/hashset_delay/src/hashset_delay.rs +++ /dev/null @@ -1,197 +0,0 @@ -//NOTE: This is just a specific case of a HashMapDelay. -// The code has been copied to make unique `insert` and `insert_at` functions. - -/// The default delay for entries, in seconds. This is only used when `insert()` is used to add -/// entries. -const DEFAULT_DELAY: u64 = 30; - -use futures::prelude::*; -use std::{ - collections::HashMap, - pin::Pin, - task::{Context, Poll}, - time::{Duration, Instant}, -}; -use tokio_util::time::delay_queue::{self, DelayQueue}; - -pub struct HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - /// The given entries. - entries: HashMap<K, MapEntry>, - /// A queue holding the timeouts of each entry. - expirations: DelayQueue<K>, - /// The default expiration timeout of an entry. - default_entry_timeout: Duration, -} - -/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key. -struct MapEntry { - /// The expiration key for the entry. - key: delay_queue::Key, - /// The actual entry. - value: Instant, -} - -impl<K> Default for HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - fn default() -> Self { - HashSetDelay::new(Duration::from_secs(DEFAULT_DELAY)) - } -} - -impl<K> HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - /// Creates a new instance of `HashSetDelay`. - pub fn new(default_entry_timeout: Duration) -> Self { - HashSetDelay { - entries: HashMap::new(), - expirations: DelayQueue::new(), - default_entry_timeout, - } - } - - /// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`. - pub fn insert(&mut self, key: K) { - self.insert_at(key, self.default_entry_timeout); - } - - /// Inserts an entry that will expire at a given instant. If the entry already exists, the - /// timeout is updated. - pub fn insert_at(&mut self, key: K, entry_duration: Duration) { - if self.contains(&key) { - // update the timeout - self.update_timeout(&key, entry_duration); - } else { - let delay_key = self.expirations.insert(key.clone(), entry_duration); - let entry = MapEntry { - key: delay_key, - value: Instant::now() + entry_duration, - }; - self.entries.insert(key, entry); - } - } - - /// Gets a reference to an entry if it exists. - /// - /// Returns None if the entry does not exist. - pub fn get(&self, key: &K) -> Option<&Instant> { - self.entries.get(key).map(|entry| &entry.value) - } - - /// Returns true if the key exists, false otherwise. - pub fn contains(&self, key: &K) -> bool { - self.entries.contains_key(key) - } - - /// Returns the length of the mapping. - pub fn len(&self) -> usize { - self.entries.len() - } - - /// Checks if the mapping is empty. - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - /// Updates the timeout for a given key. Returns true if the key existed, false otherwise. - /// - /// Panics if the duration is too far in the future. - pub fn update_timeout(&mut self, key: &K, timeout: Duration) -> bool { - if let Some(entry) = self.entries.get(key) { - self.expirations.reset(&entry.key, timeout); - true - } else { - false - } - } - - /// Removes a key from the map returning the value associated with the key that was in the map. - /// - /// Return false if the key was not in the map. - pub fn remove(&mut self, key: &K) -> bool { - if let Some(entry) = self.entries.remove(key) { - self.expirations.remove(&entry.key); - return true; - } - false - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false. - pub fn retain<F: FnMut(&K) -> bool>(&mut self, mut f: F) { - let expiration = &mut self.expirations; - self.entries.retain(|key, entry| { - let result = f(key); - if !result { - expiration.remove(&entry.key); - } - result - }) - } - - /// Removes all entries from the map. - pub fn clear(&mut self) { - self.entries.clear(); - self.expirations.clear(); - } - - /// Returns a vector of referencing all keys in the map. - pub fn keys(&self) -> impl Iterator<Item = &K> { - self.entries.keys() - } -} - -impl<K> Stream for HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - type Item = Result<K, String>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { - match self.expirations.poll_expired(cx) { - Poll::Ready(Some(Ok(key))) => match self.entries.remove(key.get_ref()) { - Some(_) => Poll::Ready(Some(Ok(key.into_inner()))), - None => Poll::Ready(Some(Err("Value no longer exists in expirations".into()))), - }, - Poll::Ready(Some(Err(e))) => { - Poll::Ready(Some(Err(format!("delay queue error: {:?}", e)))) - } - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} - -#[cfg(test)] - -mod tests { - use super::*; - - #[tokio::test] - async fn should_not_panic() { - let key = 2u8; - - let mut map = HashSetDelay::default(); - - map.insert(key); - map.update_timeout(&key, Duration::from_secs(100)); - - let fut = |cx: &mut Context| { - let _ = map.poll_next_unpin(cx); - let _ = map.poll_next_unpin(cx); - Poll::Ready(()) - }; - - future::poll_fn(fut).await; - - map.insert(key); - map.update_timeout(&key, Duration::from_secs(100)); - } -} diff --git a/common/hashset_delay/src/lib.rs b/common/hashset_delay/src/lib.rs deleted file mode 100644 index 175ad72cfa..0000000000 --- a/common/hashset_delay/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! This crate provides a single type (its counter-part HashMapDelay has been removed as it -//! currently is not in use in lighthouse): -//! - `HashSetDelay` -//! -//! # HashSetDelay -//! -//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This -//! allows users to add objects and check their expiry deadlines before the `Stream` -//! consumes them. - -mod hashset_delay; -pub use crate::hashset_delay::HashSetDelay; From 80359d8ddbfbba0fded8829f9404322d474be167 Mon Sep 17 00:00:00 2001 From: Mac L <mjladson@pm.me> Date: Mon, 5 Sep 2022 04:50:45 +0000 Subject: [PATCH 166/184] Fix attestation performance API `InvalidValidatorIndex` error (#3503) ## Issue Addressed When requesting an index which is not active during `start_epoch`, Lighthouse returns: ``` curl "http://localhost:5052/lighthouse/analysis/attestation_performance/999999999?start_epoch=100000&end_epoch=100000" ``` ```json { "code": 500, "message": "INTERNAL_SERVER_ERROR: ParticipationCache(InvalidValidatorIndex(999999999))", "stacktraces": [] } ``` This error occurs even when the index in question becomes active before `end_epoch` which is undesirable as it can prevent larger queries from completing. ## Proposed Changes In the event the index is out-of-bounds (has not yet been activated), simply return all fields as `false`: ``` -> curl "http://localhost:5052/lighthouse/analysis/attestation_performance/999999999?start_epoch=100000&end_epoch=100000" ``` ```json [ { "index": 999999999, "epochs": { "100000": { "active": false, "head": false, "target": false, "source": false } } } ] ``` By doing this, we cover the case where a validator becomes active sometime between `start_epoch` and `end_epoch`. ## Additional Info Note that this error only occurs for epochs after the Altair hard fork. --- .../http_api/src/attestation_performance.rs | 4 +++ .../epoch_processing_summary.rs | 28 ++++++++++++++++--- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 2b4543656d..ca68d4d04c 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -83,6 +83,10 @@ pub fn get_attestation_performance<T: BeaconChainTypes>( } // Either use the global validator set, or the specified index. + // + // Does no further validation of the indices, so in the event an index has not yet been + // activated or does not yet exist (according to the head state), it will return all fields as + // `false`. let index_range = if target.to_lowercase() == "global" { chain .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 5e15aa3e1b..6eb2f97766 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -129,7 +129,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_current_epoch_timely_target_attester(val_index), + } => participation_cache + .is_current_epoch_timely_target_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } @@ -222,7 +227,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_previous_epoch_timely_target_attester(val_index), + } => participation_cache + .is_previous_epoch_timely_target_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } @@ -248,7 +258,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_previous_epoch_timely_head_attester(val_index), + } => participation_cache + .is_previous_epoch_timely_head_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } @@ -274,7 +289,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_previous_epoch_timely_source_attester(val_index), + } => participation_cache + .is_previous_epoch_timely_source_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } From f13dd04f422703803a931d50d5348fbc54f924ff Mon Sep 17 00:00:00 2001 From: MaboroshiChan <bttfus@gmail.com> Date: Mon, 5 Sep 2022 04:50:46 +0000 Subject: [PATCH 167/184] Add timeout for --checkpoint-sync-url (#3521) ## Issue Addressed [Have --checkpoint-sync-url timeout](https://github.com/sigp/lighthouse/issues/3478) ## Proposed Changes I added a parameter for `get_bytes_opt_accept_header<U: IntoUrl>` which accept a timeout duration, and modified the body of `get_beacon_blocks_ssz` and `get_debug_beacon_states_ssz` to pass corresponding timeout durations. --- common/eth2/src/lib.rs | 11 ++++++++--- validator_client/src/lib.rs | 6 ++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 6317523fee..f096aca97e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -112,6 +112,8 @@ pub struct Timeouts { pub proposer_duties: Duration, pub sync_committee_contribution: Duration, pub sync_duties: Duration, + pub get_beacon_blocks_ssz: Duration, + pub get_debug_beacon_states: Duration, } impl Timeouts { @@ -124,6 +126,8 @@ impl Timeouts { proposer_duties: timeout, sync_committee_contribution: timeout, sync_duties: timeout, + get_beacon_blocks_ssz: timeout, + get_debug_beacon_states: timeout, } } } @@ -239,9 +243,10 @@ impl BeaconNodeHttpClient { &self, url: U, accept_header: Accept, + timeout: Duration, ) -> Result<Option<Vec<u8>>, Error> { let opt_response = self - .get_response(url, |b| b.accept(accept_header)) + .get_response(url, |b| b.accept(accept_header).timeout(timeout)) .await .optional()?; match opt_response { @@ -701,7 +706,7 @@ impl BeaconNodeHttpClient { ) -> Result<Option<SignedBeaconBlock<T>>, Error> { let path = self.get_beacon_blocks_path(block_id)?; - self.get_bytes_opt_accept_header(path, Accept::Ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_beacon_blocks_ssz) .await? .map(|bytes| SignedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) .transpose() @@ -1167,7 +1172,7 @@ impl BeaconNodeHttpClient { ) -> Result<Option<BeaconState<T>>, Error> { let path = self.get_debug_beacon_states_path(state_id)?; - self.get_bytes_opt_accept_header(path, Accept::Ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_debug_beacon_states) .await? .map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) .transpose() diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index ac6969f4c5..c05507576e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -75,6 +75,8 @@ const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -285,6 +287,10 @@ impl<T: EthSpec> ProductionValidatorClient<T> { sync_committee_contribution: slot_duration / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, + get_beacon_blocks_ssz: slot_duration + / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_debug_beacon_states: slot_duration + / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, } } else { Timeouts::set_all(slot_duration) From cae40731a221c17145aba08c9db78a40dc407c90 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Mon, 5 Sep 2022 04:50:47 +0000 Subject: [PATCH 168/184] Strict count unrealized (#3522) ## Issue Addressed Add a flag that can increase count unrealized strictness, defaults to false ## Proposed Changes Please list or describe the changes introduced by this PR. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. Co-authored-by: realbigsean <seananderson33@gmail.com> Co-authored-by: sean <seananderson33@gmail.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++ beacon_node/beacon_chain/src/builder.rs | 4 ++ .../beacon_chain/src/canonical_head.rs | 15 ++++-- beacon_node/beacon_chain/src/chain_config.rs | 4 ++ beacon_node/beacon_chain/src/fork_revert.rs | 3 ++ beacon_node/beacon_chain/src/lib.rs | 2 +- .../src/schema_change/migration_schema_v7.rs | 8 ++- beacon_node/src/cli.rs | 8 +++ beacon_node/src/config.rs | 2 + consensus/fork_choice/src/fork_choice.rs | 24 ++++++--- consensus/fork_choice/src/lib.rs | 4 +- .../src/fork_choice_test_definition.rs | 6 ++- consensus/proto_array/src/lib.rs | 2 +- consensus/proto_array/src/proto_array.rs | 50 ++++++++++++++++--- .../src/proto_array_fork_choice.rs | 10 +++- consensus/proto_array/src/ssz_container.rs | 7 +-- lighthouse/tests/beacon_node.rs | 41 ++++++++++++++- 17 files changed, 164 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6637b8fd53..077b425c07 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -65,6 +65,7 @@ use itertools::process_results; use itertools::Itertools; use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; +use proto_array::CountUnrealizedFull; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -433,6 +434,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn load_fork_choice( store: BeaconStore<T>, reset_payload_statuses: ResetPayloadStatuses, + count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result<Option<BeaconForkChoice<T>>, Error> { @@ -449,6 +451,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { persisted_fork_choice.fork_choice, reset_payload_statuses, fc_store, + count_unrealized_full, spec, log, )?)) @@ -2934,6 +2937,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { ResetPayloadStatuses::always_reset_conditionally( self.config.always_reset_payload_statuses, ), + self.config.count_unrealized_full, &self.store, &self.spec, &self.log, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 2704690442..a578629b69 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -248,6 +248,7 @@ where ResetPayloadStatuses::always_reset_conditionally( self.chain_config.always_reset_payload_statuses, ), + self.chain_config.count_unrealized_full, &self.spec, log, ) @@ -365,6 +366,7 @@ where &genesis.beacon_block, &genesis.beacon_state, current_slot, + self.chain_config.count_unrealized_full, &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -482,6 +484,7 @@ where &snapshot.beacon_block, &snapshot.beacon_state, current_slot, + self.chain_config.count_unrealized_full, &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -658,6 +661,7 @@ where Some(current_slot), &self.spec, self.chain_config.count_unrealized.into(), + self.chain_config.count_unrealized_full, )?; } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 166ba85720..644364bc60 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -44,7 +44,8 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; use fork_choice::{ - ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, + CountUnrealizedFull, ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, + ResetPayloadStatuses, }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -252,13 +253,19 @@ impl<T: BeaconChainTypes> CanonicalHead<T> { // defensive programming. mut fork_choice_write_lock: RwLockWriteGuard<BeaconForkChoice<T>>, reset_payload_statuses: ResetPayloadStatuses, + count_unrealized_full: CountUnrealizedFull, store: &BeaconStore<T>, spec: &ChainSpec, log: &Logger, ) -> Result<(), Error> { - let fork_choice = - <BeaconChain<T>>::load_fork_choice(store.clone(), reset_payload_statuses, spec, log)? - .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice = <BeaconChain<T>>::load_fork_choice( + store.clone(), + reset_payload_statuses, + count_unrealized_full, + spec, + log, + )? + .ok_or(Error::MissingPersistedForkChoice)?; let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; let beacon_block = store diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index ad2b7abe5a..5e16a29cf3 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,3 +1,4 @@ +pub use proto_array::CountUnrealizedFull; use serde_derive::{Deserialize, Serialize}; use types::Checkpoint; @@ -42,6 +43,8 @@ pub struct ChainConfig { pub always_reset_payload_statuses: bool, /// Whether to apply paranoid checks to blocks proposed by this beacon node. pub paranoid_block_proposal: bool, + /// Whether to strictly count unrealized justified votes. + pub count_unrealized_full: CountUnrealizedFull, } impl Default for ChainConfig { @@ -61,6 +64,7 @@ impl Default for ChainConfig { count_unrealized: true, always_reset_payload_statuses: false, paranoid_block_proposal: false, + count_unrealized_full: CountUnrealizedFull::default(), } } } diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 1d2787d985..654b2713b1 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,6 +1,7 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; use itertools::process_results; +use proto_array::CountUnrealizedFull; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -100,6 +101,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It current_slot: Option<Slot>, spec: &ChainSpec, count_unrealized_config: CountUnrealized, + count_unrealized_full_config: CountUnrealizedFull, ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -152,6 +154,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It &finalized_snapshot.beacon_block, &finalized_snapshot.beacon_state, current_slot, + count_unrealized_full_config, spec, ) .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fc24a34bbb..1e704deba5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -50,7 +50,7 @@ pub use self::beacon_chain::{ INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; -pub use self::chain_config::ChainConfig; +pub use self::chain_config::{ChainConfig, CountUnrealizedFull}; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index 81147b8af6..4a9a78db7b 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -6,7 +6,7 @@ use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::ForkChoice; -use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice}; +use proto_array::{core::ProtoNode, core::SszContainer, CountUnrealizedFull, ProtoArrayForkChoice}; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use std::collections::{HashMap, HashSet}; @@ -52,6 +52,8 @@ pub(crate) fn update_with_reinitialized_fork_choice<T: BeaconChainTypes>( // Don't provide the current slot here, just use what's in the store. We don't need to know // the head here, plus it's nice to avoid mutating fork choice during this process. None, + // This config will get overwritten on startup. + CountUnrealizedFull::default(), spec, ) .map_err(|e| format!("{:?}", e))?; @@ -88,7 +90,9 @@ pub(crate) fn update_fork_choice<T: BeaconChainTypes>( ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); let ssz_container: SszContainer = ssz_container_v10.into(); - let mut fork_choice: ProtoArrayForkChoice = ssz_container.into(); + // `CountUnrealizedFull::default()` represents the count-unrealized-full config which will be overwritten on startup. + let mut fork_choice: ProtoArrayForkChoice = + (ssz_container, CountUnrealizedFull::default()).into(); update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) .map_err(StoreError::SchemaMigrationError)?; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index fe2afb0213..0f4d8a151c 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -786,6 +786,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("true") ) + .arg( + Arg::with_name("count-unrealized-full") + .long("count-unrealized-full") + .hidden(true) + .help("Stricter version of `count-unrealized`.") + .takes_value(true) + .default_value("false") + ) .arg( Arg::with_name("reset-payload-statuses") .long("reset-payload-statuses") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index caa10f555d..190dbf721e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -643,6 +643,8 @@ pub fn get_config<E: EthSpec>( client_config.chain.count_unrealized = clap_utils::parse_required(cli_args, "count-unrealized")?; + client_config.chain.count_unrealized_full = + clap_utils::parse_required::<bool>(cli_args, "count-unrealized-full")?.into(); client_config.chain.always_reset_payload_statuses = cli_args.is_present("reset-payload-statuses"); diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index f55a283ed1..7f12e1d897 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,5 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; -use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; +use proto_array::{ + Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProtoArrayForkChoice, +}; use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; use state_processing::{ @@ -374,6 +376,7 @@ where anchor_block: &SignedBeaconBlock<E>, anchor_state: &BeaconState<E>, current_slot: Option<Slot>, + count_unrealized_full_config: CountUnrealizedFull, spec: &ChainSpec, ) -> Result<Self, Error<T::Error>> { // Sanity check: the anchor must lie on an epoch boundary. @@ -420,6 +423,7 @@ where current_epoch_shuffling_id, next_epoch_shuffling_id, execution_status, + count_unrealized_full_config, )?; let mut fork_choice = Self { @@ -1451,11 +1455,13 @@ where pub fn proto_array_from_persisted( persisted: &PersistedForkChoice, reset_payload_statuses: ResetPayloadStatuses, + count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result<ProtoArrayForkChoice, Error<T::Error>> { - let mut proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) - .map_err(Error::InvalidProtoArrayBytes)?; + let mut proto_array = + ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) + .map_err(Error::InvalidProtoArrayBytes)?; let contains_invalid_payloads = proto_array.contains_invalid_payloads(); debug!( @@ -1486,7 +1492,7 @@ where "error" => e, "info" => "please report this error", ); - ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) + ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) .map_err(Error::InvalidProtoArrayBytes) } else { debug!( @@ -1503,11 +1509,17 @@ where persisted: PersistedForkChoice, reset_payload_statuses: ResetPayloadStatuses, fc_store: T, + count_unrealized_full: CountUnrealizedFull, spec: &ChainSpec, log: &Logger, ) -> Result<Self, Error<T::Error>> { - let proto_array = - Self::proto_array_from_persisted(&persisted, reset_payload_statuses, spec, log)?; + let proto_array = Self::proto_array_from_persisted( + &persisted, + reset_payload_statuses, + count_unrealized_full, + spec, + log, + )?; let current_slot = fc_store.get_current_slot(); diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 397a2ff893..b307c66d88 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -7,4 +7,6 @@ pub use crate::fork_choice::{ PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; +pub use proto_array::{ + Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, InvalidationOperation, +}; diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index fcb1b94d6f..ba6f3170dc 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -3,6 +3,7 @@ mod ffg_updates; mod no_votes; mod votes; +use crate::proto_array::CountUnrealizedFull; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::InvalidationOperation; use serde_derive::{Deserialize, Serialize}; @@ -87,6 +88,7 @@ impl ForkChoiceTestDefinition { junk_shuffling_id.clone(), junk_shuffling_id, ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), + CountUnrealizedFull::default(), ) .expect("should create fork choice struct"); let equivocating_indices = BTreeSet::new(); @@ -296,8 +298,8 @@ fn get_checkpoint(i: u64) -> Checkpoint { fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { let bytes = original.as_bytes(); - let decoded = - ProtoArrayForkChoice::from_bytes(&bytes).expect("fork choice should decode from bytes"); + let decoded = ProtoArrayForkChoice::from_bytes(&bytes, CountUnrealizedFull::default()) + .expect("fork choice should decode from bytes"); assert!( *original == decoded, "fork choice should encode and decode without change" diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index d6f614b7c3..e7bd9c0ed5 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -4,7 +4,7 @@ mod proto_array; mod proto_array_fork_choice; mod ssz_container; -pub use crate::proto_array::InvalidationOperation; +pub use crate::proto_array::{CountUnrealizedFull, InvalidationOperation}; pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; pub use error::Error; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 9486f0bfc1..590407d7eb 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -118,6 +118,24 @@ impl Default for ProposerBoost { } } +/// Indicate whether we should strictly count unrealized justification/finalization votes. +#[derive(Default, PartialEq, Eq, Debug, Serialize, Deserialize, Copy, Clone)] +pub enum CountUnrealizedFull { + True, + #[default] + False, +} + +impl From<bool> for CountUnrealizedFull { + fn from(b: bool) -> Self { + if b { + CountUnrealizedFull::True + } else { + CountUnrealizedFull::False + } + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes @@ -128,6 +146,7 @@ pub struct ProtoArray { pub nodes: Vec<ProtoNode>, pub indices: HashMap<Hash256, usize>, pub previous_proposer_boost: ProposerBoost, + pub count_unrealized_full: CountUnrealizedFull, } impl ProtoArray { @@ -878,12 +897,14 @@ impl ProtoArray { return false; } + let genesis_epoch = Epoch::new(0); + let checkpoint_match_predicate = |node_justified_checkpoint: Checkpoint, node_finalized_checkpoint: Checkpoint| { let correct_justified = node_justified_checkpoint == self.justified_checkpoint - || self.justified_checkpoint.epoch == Epoch::new(0); + || self.justified_checkpoint.epoch == genesis_epoch; let correct_finalized = node_finalized_checkpoint == self.finalized_checkpoint - || self.finalized_checkpoint.epoch == Epoch::new(0); + || self.finalized_checkpoint.epoch == genesis_epoch; correct_justified && correct_finalized }; @@ -898,13 +919,26 @@ impl ProtoArray { node.justified_checkpoint, node.finalized_checkpoint, ) { - if node.slot.epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { - checkpoint_match_predicate( - unrealized_justified_checkpoint, - unrealized_finalized_checkpoint, - ) + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + + // If previous epoch is justified, pull up all tips to at least the previous epoch + if CountUnrealizedFull::True == self.count_unrealized_full + && (current_epoch > genesis_epoch + && self.justified_checkpoint.epoch + 1 == current_epoch) + { + unrealized_justified_checkpoint.epoch + 1 >= current_epoch + // If previous epoch is not justified, pull up only tips from past epochs up to the current epoch } else { - checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) + // If block is from a previous epoch, filter using unrealized justification & finalization information + if node.slot.epoch(E::slots_per_epoch()) < current_epoch { + checkpoint_match_predicate( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + ) + // If block is from the current epoch, filter using the head state's justification & finalization information + } else { + checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) + } } } else if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = (node.justified_checkpoint, node.finalized_checkpoint) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cc3f92d46e..8f5d062ec6 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,4 +1,5 @@ use crate::error::Error; +use crate::proto_array::CountUnrealizedFull; use crate::proto_array::{ calculate_proposer_boost, InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, }; @@ -186,6 +187,7 @@ impl ProtoArrayForkChoice { current_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId, execution_status: ExecutionStatus, + count_unrealized_full: CountUnrealizedFull, ) -> Result<Self, String> { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -194,6 +196,7 @@ impl ProtoArrayForkChoice { nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), previous_proposer_boost: ProposerBoost::default(), + count_unrealized_full, }; let block = Block { @@ -531,8 +534,12 @@ impl ProtoArrayForkChoice { SszContainer::from(self).as_ssz_bytes() } - pub fn from_bytes(bytes: &[u8]) -> Result<Self, String> { + pub fn from_bytes( + bytes: &[u8], + count_unrealized_full: CountUnrealizedFull, + ) -> Result<Self, String> { SszContainer::from_ssz_bytes(bytes) + .map(|container| (container, count_unrealized_full)) .map(Into::into) .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e)) } @@ -692,6 +699,7 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, + CountUnrealizedFull::default(), ) .unwrap(); diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 7f7ef79fe8..63f75ed0a2 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,6 +1,6 @@ use crate::proto_array::ProposerBoost; use crate::{ - proto_array::{ProtoArray, ProtoNode}, + proto_array::{CountUnrealizedFull, ProtoArray, ProtoNode}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, }; use ssz::{four_byte_option_impl, Encode}; @@ -41,8 +41,8 @@ impl From<&ProtoArrayForkChoice> for SszContainer { } } -impl From<SszContainer> for ProtoArrayForkChoice { - fn from(from: SszContainer) -> Self { +impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { + fn from((from, count_unrealized_full): (SszContainer, CountUnrealizedFull)) -> Self { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, justified_checkpoint: from.justified_checkpoint, @@ -50,6 +50,7 @@ impl From<SszContainer> for ProtoArrayForkChoice { nodes: from.nodes, indices: from.indices.into_iter().collect::<HashMap<_, _>>(), previous_proposer_boost: from.previous_proposer_boost, + count_unrealized_full, }; Self { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4e110b85a1..0988e9e2fd 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,4 +1,4 @@ -use beacon_node::ClientConfig as Config; +use beacon_node::{beacon_chain::CountUnrealizedFull, ClientConfig as Config}; use crate::exec::{CommandLineTestExec, CompletedTest}; use eth1::Eth1Endpoint; @@ -178,6 +178,45 @@ fn count_unrealized_true() { .with_config(|config| assert!(config.chain.count_unrealized)); } +#[test] +fn count_unrealized_full_no_arg() { + CommandLineTest::new() + .flag("count-unrealized-full", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.count_unrealized_full, + CountUnrealizedFull::False + ) + }); +} + +#[test] +fn count_unrealized_full_false() { + CommandLineTest::new() + .flag("count-unrealized-full", Some("false")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.count_unrealized_full, + CountUnrealizedFull::False + ) + }); +} + +#[test] +fn count_unrealized_full_true() { + CommandLineTest::new() + .flag("count-unrealized-full", Some("true")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.count_unrealized_full, + CountUnrealizedFull::True + ) + }); +} + #[test] fn reset_payload_statuses_default() { CommandLineTest::new() From 95c56630a6a9c24a525942e7205fb13af3d34891 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 5 Sep 2022 04:50:48 +0000 Subject: [PATCH 169/184] Fixing a few typos / documentation (#3531) Fixing a few typos in the documentation --- book/src/advanced-datadir.md | 2 +- book/src/advanced_networking.md | 4 ++-- book/src/faq.md | 10 +++++----- book/src/redundancy.md | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/book/src/advanced-datadir.md b/book/src/advanced-datadir.md index 9f81112bdd..074857346e 100644 --- a/book/src/advanced-datadir.md +++ b/book/src/advanced-datadir.md @@ -55,5 +55,5 @@ In this case, the user could solve this warn by following these steps: 1. Restarting the BN process Although there are no known issues with using backwards compatibility functionality, having split -directories is likely to cause confusion for users. Therefore, we recommend affected users migrate +directories is likely to cause confusion for users. Therefore, we recommend that affected users migrate to a consolidated directory structure. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 71155a1c23..d6fcb82a6b 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -22,7 +22,7 @@ Having a large peer count means that your node must act as an honest RPC server to all your connected peers. If there are many that are syncing, they will often be requesting a large number of blocks from your node. This means your node must perform a lot of work reading and responding to these peers. If your -node is over-loaded with peers and cannot respond in time, other Lighthouse +node is overloaded with peers and cannot respond in time, other Lighthouse peers will consider you non-performant and disfavour you from their peer stores. Your node will also have to handle and manage the gossip and extra bandwidth that comes from having these extra peers. Having a non-responsive @@ -63,7 +63,7 @@ settings allow you construct your initial ENR. Their primary intention is for setting up boot-like nodes and having a contactable ENR on boot. On normal operation of a Lighthouse node, none of these flags need to be set. Setting these flags incorrectly can lead to your node being incorrectly added to the -global DHT which will degrades the discovery process for all Ethereum consensus peers. +global DHT which will degrade the discovery process for all Ethereum consensus peers. The ENR of a Lighthouse node is initially set to be non-contactable. The in-built discovery mechanism can determine if your node is publicly accessible, diff --git a/book/src/faq.md b/book/src/faq.md index 6692d61495..5bfae3fa87 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -1,14 +1,14 @@ # Frequently Asked Questions - [Why does it take so long for a validator to be activated?](#why-does-it-take-so-long-for-a-validator-to-be-activated) -- [Do I need to set up any port mappings](#do-i-need-to-set-up-any-port-mappings) +- [Do I need to set up any port mappings?](#do-i-need-to-set-up-any-port-mappings) - [I have a low peer count and it is not increasing](#i-have-a-low-peer-count-and-it-is-not-increasing) - [What should I do if I lose my slashing protection database?](#what-should-i-do-if-i-lose-my-slashing-protection-database) - [How do I update lighthouse?](#how-do-i-update-lighthouse) - [I can't compile lighthouse](#i-cant-compile-lighthouse) -- [What is "Syncing deposit contract block cache"](#what-is-syncing-deposit-contract-block-cache) +- [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) -- [How can I monitor my validators](#how-can-i-monitor-my-validators) +- [How can I monitor my validators?](#how-can-i-monitor-my-validators) ### Why does it take so long for a validator to be activated? @@ -86,7 +86,7 @@ repeats until the queue is cleared. Once a validator has been activated, there's no more waiting! It's time to produce blocks and attestations! -### Do I need to set up any port mappings +### Do I need to set up any port mappings? It is not strictly required to open any ports for Lighthouse to connect and participate in the network. Lighthouse should work out-of-the-box. However, if @@ -154,7 +154,7 @@ You will just also need to make sure the code you have checked out is up to date See [here.](./installation-source.md#troubleshooting) -### What is "Syncing deposit contract block cache" +### What is "Syncing deposit contract block cache"? ``` Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier diff --git a/book/src/redundancy.md b/book/src/redundancy.md index d4156832bd..dae7ac51fe 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -42,7 +42,7 @@ There are a few interesting properties about the list of `--beacon-nodes`: - *Synced is preferred*: the validator client prefers a synced beacon node over one that is still syncing. - *Failure is sticky*: if a beacon node fails, it will be flagged as offline - and wont be retried again for the rest of the slot (12 seconds). This helps prevent the impact + and won't be retried again for the rest of the slot (12 seconds). This helps prevent the impact of time-outs and other lengthy errors. > Note: When supplying multiple beacon nodes the `http://localhost:5052` address must be explicitly @@ -51,7 +51,7 @@ There are a few interesting properties about the list of `--beacon-nodes`: ### Configuring a redundant Beacon Node -In our previous example we listed `http://192.168.1.1:5052` as a redundant +In our previous example, we listed `http://192.168.1.1:5052` as a redundant node. Apart from having sufficient resources, the backup node should have the following flags: From 177aef8f1e3c316b962f8b1998d4950b760f7a29 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Mon, 5 Sep 2022 04:50:49 +0000 Subject: [PATCH 170/184] Builder profit threshold flag (#3534) ## Issue Addressed Resolves https://github.com/sigp/lighthouse/issues/3517 ## Proposed Changes Adds a `--builder-profit-threshold <wei value>` flag to the BN. If an external payload's value field is less than this value, the local payload will be used. The value of the local payload will not be checked (it can't really be checked until the engine API is updated to support this). Co-authored-by: realbigsean <sean@sigmaprime.io> --- beacon_node/execution_layer/src/lib.rs | 26 +++++++- .../src/test_utils/mock_builder.rs | 8 ++- .../src/test_utils/mock_execution_layer.rs | 4 +- .../execution_layer/src/test_utils/mod.rs | 1 + beacon_node/http_api/tests/tests.rs | 60 ++++++++++++++++++- beacon_node/src/cli.rs | 15 +++++ beacon_node/src/config.rs | 2 + book/src/builders.md | 18 +++++- lighthouse/tests/beacon_node.rs | 32 ++++++++++ 9 files changed, 156 insertions(+), 10 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 3bdca82ad0..89dc3f68e9 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -136,6 +136,7 @@ struct Inner<E: EthSpec> { proposers: RwLock<HashMap<ProposerKey, Proposer>>, executor: TaskExecutor, payload_cache: PayloadCache<E>, + builder_profit_threshold: Uint256, log: Logger, } @@ -156,6 +157,8 @@ pub struct Config { pub jwt_version: Option<String>, /// Default directory for the jwt secret if not provided through cli. pub default_datadir: PathBuf, + /// The minimum value of an external payload for it to be considered in a proposal. + pub builder_profit_threshold: u128, } /// Provides access to one execution engine and provides a neat interface for consumption by the @@ -176,6 +179,7 @@ impl<T: EthSpec> ExecutionLayer<T> { jwt_id, jwt_version, default_datadir, + builder_profit_threshold, } = config; if urls.len() > 1 { @@ -225,7 +229,14 @@ impl<T: EthSpec> ExecutionLayer<T> { }; let builder = builder_url - .map(|url| BuilderHttpClient::new(url).map_err(Error::Builder)) + .map(|url| { + let builder_client = BuilderHttpClient::new(url.clone()).map_err(Error::Builder); + info!(log, + "Connected to external block builder"; + "builder_url" => ?url, + "builder_profit_threshold" => builder_profit_threshold); + builder_client + }) .transpose()?; let inner = Inner { @@ -238,6 +249,7 @@ impl<T: EthSpec> ExecutionLayer<T> { execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, payload_cache: PayloadCache::default(), + builder_profit_threshold: Uint256::from(builder_profit_threshold), log, }; @@ -631,7 +643,17 @@ impl<T: EthSpec> ExecutionLayer<T> { "block_hash" => ?header.block_hash(), ); - if header.parent_hash() != parent_hash { + let relay_value = relay.data.message.value; + let configured_value = self.inner.builder_profit_threshold; + if relay_value < configured_value { + info!( + self.log(), + "The value offered by the connected builder does not meet \ + the configured profit threshold. Using local payload."; + "configured_value" => ?configured_value, "relay_value" => ?relay_value + ); + Ok(local) + } else if header.parent_hash() != parent_hash { warn!( self.log(), "Invalid parent hash from connected builder, \ diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 6b565cb3d8..b8f74c1c93 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -33,7 +33,7 @@ use types::{ pub enum Operation { FeeRecipient(Address), GasLimit(usize), - Value(usize), + Value(Uint256), ParentHash(Hash256), PrevRandao(Hash256), BlockNumber(usize), @@ -47,7 +47,7 @@ impl Operation { bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? } Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, - Operation::Value(value) => bid.value = to_ssz_rs(&Uint256::from(value))?, + Operation::Value(value) => bid.value = to_ssz_rs(&value)?, Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, @@ -149,7 +149,9 @@ impl<E: EthSpec> MockBuilder<E> { } pub fn add_operation(&self, op: Operation) { - self.operations.write().push(op); + // Insert operations at the front of the vec to make sure `apply_operations` applies them + // in the order they are added. + self.operations.write().insert(0, op); } pub fn invalid_signatures(&self) { diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index cab2367cd0..065abc9360 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,6 +1,7 @@ use crate::{ test_utils::{ - MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, + MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + DEFAULT_TERMINAL_DIFFICULTY, }, Config, *, }; @@ -66,6 +67,7 @@ impl<T: EthSpec> MockExecutionLayer<T> { builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), + builder_profit_threshold: DEFAULT_BUILDER_THRESHOLD_WEI, ..Default::default() }; let el = diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 18612bf303..aaeea8aa5a 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -28,6 +28,7 @@ pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; +pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; mod execution_block_generator; mod handle_rpc; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index c8e647be82..ca240e64d2 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -13,6 +13,7 @@ use eth2::{ }; use execution_layer::test_utils::Operation; use execution_layer::test_utils::TestingBuilder; +use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{BlockId, StateId}; @@ -341,10 +342,20 @@ impl ApiTester { } pub async fn new_mev_tester() -> Self { - Self::new_with_hard_forks(true, true) + let tester = Self::new_with_hard_forks(true, true) .await .test_post_validator_register_validator() - .await + .await; + // Make sure bids always meet the minimum threshold. + tester + .mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_THRESHOLD_WEI, + ))); + tester } fn skip_slots(self, count: u64) -> Self { @@ -3187,6 +3198,43 @@ impl ApiTester { self } + pub async fn test_payload_rejects_inadequate_builder_threshold(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_THRESHOLD_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -4159,6 +4207,14 @@ async fn builder_chain_health_optimistic_head() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_inadequate_builder_threshold() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_inadequate_builder_threshold() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0f4d8a151c..6473f39076 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -777,6 +777,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { conditions.") .takes_value(false) ) + .arg( + Arg::with_name("builder-profit-threshold") + .long("builder-profit-threshold") + .value_name("WEI_VALUE") + .help("The minimum reward in wei provided to the proposer by a block builder for \ + an external payload to be considered for inclusion in a proposal. If this \ + threshold is not met, the local EE's payload will be used. This is currently \ + *NOT* in comparison to the value of the local EE's payload. It simply checks \ + whether the total proposer reward from an external payload is equal to or \ + greater than this value. In the future, a comparison to a local payload is \ + likely to be added. Example: Use 250000000000000000 to set the threshold to \ + 0.25 ETH.") + .default_value("0") + .takes_value(true) + ) .arg( Arg::with_name("count-unrealized") .long("count-unrealized") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 190dbf721e..54b81fb620 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -309,6 +309,8 @@ pub fn get_config<E: EthSpec>( el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; el_config.default_datadir = client_config.data_dir.clone(); + el_config.builder_profit_threshold = + clap_utils::parse_required(cli_args, "builder-profit-threshold")?; // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and // use `--execution-endpoint` instead. Also, log a deprecation warning. diff --git a/book/src/builders.md b/book/src/builders.md index 110f2450b0..2c24d31003 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -10,8 +10,8 @@ before the validator has committed to (i.e. signed) the block. A primer on MEV c Using the builder API is not known to introduce additional slashing risks, however a live-ness risk (i.e. the ability for the chain to produce valid blocks) is introduced because your node will be -signing blocks without executing the transactions within the block. Therefore it won't know whether -the transactions are valid and it may sign a block that the network will reject. This would lead to +signing blocks without executing the transactions within the block. Therefore, it won't know whether +the transactions are valid, and it may sign a block that the network will reject. This would lead to a missed proposal and the opportunity cost of lost block rewards. ## How to connect to a builder @@ -151,6 +151,20 @@ By default, Lighthouse is strict with these conditions, but we encourage users t - `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder API will always be used for payload construction, regardless of recent chain conditions. +## Builder Profit Threshold + +If you are generally uneasy with the risks associated with outsourced payload production (liveness/censorship) but would +consider using it for the chance of out-sized rewards, this flag may be useful: + +`--builder-profit-threshold <WEI_VALUE>` + +The number provided indicates the minimum reward that an external payload must provide the proposer for it to be considered +for inclusion in a proposal. For example, if you'd only like to use an external payload for a reward of >= 0.25 ETH, you +would provide your beacon node with `--builder-profit-threshold 250000000000000000`. If it's your turn to propose and the +most valuable payload offered by builders is only 0.1 ETH, the local execution engine's payload will be used. Currently, +this threshold just looks at the value of the external payload. No comparison to the local payload is made, although +this feature will likely be added in the future. + [mev-rs]: https://github.com/ralexstokes/mev-rs [mev-boost]: https://github.com/flashbots/mev-boost [gas-limit-api]: https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 0988e9e2fd..ab7978ca0a 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -571,6 +571,38 @@ fn builder_fallback_flags() { assert_eq!(config.chain.builder_fallback_disable_checks, true); }, ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-profit-threshold"), + Some("1000000000000000000000000"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_profit_threshold, + 1000000000000000000000000 + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_profit_threshold, + 0 + ); + }, + ); } fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { From 9a7f7f1c1ea5af5343f9372cc5a9400a13b6927b Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Mon, 5 Sep 2022 08:29:00 +0000 Subject: [PATCH 171/184] Configurable monitoring endpoint frequency (#3530) ## Issue Addressed Closes #3514 ## Proposed Changes - Change default monitoring endpoint frequency to 120 seconds to fit with 30k requests/month limit. - Allow configuration of the monitoring endpoint frequency using `--monitoring-endpoint-frequency N` where `N` is a value in seconds. --- beacon_node/src/cli.rs | 9 +++++++ beacon_node/src/config.rs | 4 ++++ book/src/advanced_metrics.md | 36 ++++++++++++++++++++++++++++ book/src/validator-monitoring.md | 3 +++ common/monitoring_api/src/lib.rs | 17 ++++++++++--- lighthouse/tests/beacon_node.rs | 15 +++++++++++- lighthouse/tests/validator_client.rs | 13 ++++++++++ validator_client/src/cli.rs | 9 +++++++ validator_client/src/config.rs | 3 +++ 9 files changed, 105 insertions(+), 4 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 6473f39076..3c421a1a3f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -320,6 +320,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and never provide an untrusted URL.") .takes_value(true), ) + .arg( + Arg::with_name("monitoring-endpoint-period") + .long("monitoring-endpoint-period") + .value_name("SECONDS") + .help("Defines how many seconds to wait between each message sent to \ + the monitoring-endpoint. Default: 60s") + .requires("monitoring-endpoint") + .takes_value(true), + ) /* * Standard staking flags diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 54b81fb620..b57ba02687 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -178,9 +178,13 @@ pub fn get_config<E: EthSpec>( * Explorer metrics */ if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + let update_period_secs = + clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; + client_config.monitoring_api = Some(monitoring_api::Config { db_path: None, freezer_db_path: None, + update_period_secs, monitoring_endpoint: monitoring_endpoint.to_string(), }); } diff --git a/book/src/advanced_metrics.md b/book/src/advanced_metrics.md index 0d1aa345bf..3141f336a1 100644 --- a/book/src/advanced_metrics.md +++ b/book/src/advanced_metrics.md @@ -48,3 +48,39 @@ Check to ensure that the metrics are available on the default port: ```bash curl localhost:5064/metrics ``` + +## Remote Monitoring + +Lighthouse has the ability to send a subset of metrics to a remote server for collection. Presently +the main server offering remote monitoring is beaconcha.in. Instructions for setting this up +can be found in beaconcha.in's docs: + +- <https://kb.beaconcha.in/beaconcha.in-explorer/mobile-app-less-than-greater-than-beacon-node> + +The Lighthouse flag for setting the monitoring URL is `--monitoring-endpoint`. + +When sending metrics to a remote server you should be conscious of security: + +- Only use a monitoring service that you trust: you are sending detailed information about + your validators and beacon node to this service which could be used to track you. +- Always use an HTTPS URL to prevent the traffic being intercepted in transit. + +The specification for the monitoring endpoint can be found here: + +- <https://github.com/gobitfly/eth2-client-metrics> + +_Note: the similarly named [Validator Monitor](./validator-monitoring.md) feature is entirely +independent of remote metric monitoring_. + +### Update Period + +You can adjust the frequency at which Lighthouse sends metrics to the remote server using the +`--monitoring-endpoint-period` flag. It takes an integer value in seconds, defaulting to 60 +seconds. + +``` +lighthouse bn --monitoring-endpoint-period 60 --monitoring-endpoint "https://url" +``` + +Increasing the monitoring period between can be useful if you are running into rate limits when +posting large amounts of data for multiple nodes. diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index edf80e7308..9074bc0273 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -4,6 +4,9 @@ Lighthouse allows for fine-grained monitoring of specific validators using the " Generally users will want to use this function to track their own validators, however, it can be used for any validator, regardless of who controls it. +_Note: If you are looking for remote metric monitoring, please see the docs on +[Prometheus Metrics](./advanced_metrics.md)_. + ## Monitoring is in the Beacon Node Lighthouse performs validator monitoring in the Beacon Node (BN) instead of the Validator Client diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 03cdf87c25..9592c50a40 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -16,7 +16,7 @@ use types::*; pub use types::ProcessType; /// Duration after which we collect and send metrics to remote endpoint. -pub const UPDATE_DURATION: u64 = 60; +pub const DEFAULT_UPDATE_DURATION: u64 = 60; /// Timeout for HTTP requests. pub const TIMEOUT_DURATION: u64 = 5; @@ -55,6 +55,8 @@ pub struct Config { /// Path for the cold database required for fetching beacon db size metrics. /// Note: not relevant for validator and system metrics. pub freezer_db_path: Option<PathBuf>, + /// User-defined update period in seconds. + pub update_period_secs: Option<u64>, } #[derive(Clone)] @@ -64,6 +66,7 @@ pub struct MonitoringHttpClient { db_path: Option<PathBuf>, /// Path to the freezer database. freezer_db_path: Option<PathBuf>, + update_period: Duration, monitoring_endpoint: SensitiveUrl, log: slog::Logger, } @@ -74,6 +77,9 @@ impl MonitoringHttpClient { client: reqwest::Client::new(), db_path: config.db_path.clone(), freezer_db_path: config.freezer_db_path.clone(), + update_period: Duration::from_secs( + config.update_period_secs.unwrap_or(DEFAULT_UPDATE_DURATION), + ), monitoring_endpoint: SensitiveUrl::parse(&config.monitoring_endpoint) .map_err(|e| format!("Invalid monitoring endpoint: {:?}", e))?, log, @@ -100,10 +106,15 @@ impl MonitoringHttpClient { let mut interval = interval_at( // Have some initial delay for the metrics to get initialized Instant::now() + Duration::from_secs(25), - Duration::from_secs(UPDATE_DURATION), + self.update_period, ); - info!(self.log, "Starting monitoring api"; "endpoint" => %self.monitoring_endpoint); + info!( + self.log, + "Starting monitoring API"; + "endpoint" => %self.monitoring_endpoint, + "update_period" => format!("{}s", self.update_period.as_secs()), + ); let update_future = async move { loop { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index ab7978ca0a..b28c1a0c3e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1416,7 +1416,7 @@ fn slasher_backend_override_to_default() { } #[test] -pub fn malloc_tuning_flag() { +fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) .run_with_zero_port() @@ -1439,3 +1439,16 @@ fn ensure_panic_on_failed_launch() { assert_eq!(slasher_config.chunk_size, 10); }); } + +#[test] +fn monitoring_endpoint() { + CommandLineTest::new() + .flag("monitoring-endpoint", Some("http://example:8000")) + .flag("monitoring-endpoint-period", Some("30")) + .run_with_zero_port() + .with_config(|config| { + let api_conf = config.monitoring_api.as_ref().unwrap(); + assert_eq!(api_conf.monitoring_endpoint.as_str(), "http://example:8000"); + assert_eq!(api_conf.update_period_secs, Some(30)); + }); +} diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 21dc4d7872..39ea2bfaa1 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -443,3 +443,16 @@ fn no_strict_fee_recipient_flag() { .run() .with_config(|config| assert!(!config.strict_fee_recipient)); } + +#[test] +fn monitoring_endpoint() { + CommandLineTest::new() + .flag("monitoring-endpoint", Some("http://example:8000")) + .flag("monitoring-endpoint-period", Some("30")) + .run() + .with_config(|config| { + let api_conf = config.monitoring_api.as_ref().unwrap(); + assert_eq!(api_conf.monitoring_endpoint.as_str(), "http://example:8000"); + assert_eq!(api_conf.update_period_secs, Some(30)); + }); +} diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index e034bd55ca..c59e1cf5dc 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -236,6 +236,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and never provide an untrusted URL.") .takes_value(true), ) + .arg( + Arg::with_name("monitoring-endpoint-period") + .long("monitoring-endpoint-period") + .value_name("SECONDS") + .help("Defines how many seconds to wait between each message sent to \ + the monitoring-endpoint. Default: 60s") + .requires("monitoring-endpoint") + .takes_value(true), + ) .arg( Arg::with_name("enable-doppelganger-protection") .long("enable-doppelganger-protection") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 42c91927ca..fe5f7fc00d 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -296,9 +296,12 @@ impl Config { * Explorer metrics */ if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + let update_period_secs = + clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; config.monitoring_api = Some(monitoring_api::Config { db_path: None, freezer_db_path: None, + update_period_secs, monitoring_endpoint: monitoring_endpoint.to_string(), }); } From 528e150e538ca100bb458915320b6838623822ea Mon Sep 17 00:00:00 2001 From: ZZ <zzgigi2003@163.com> Date: Mon, 5 Sep 2022 08:29:02 +0000 Subject: [PATCH 172/184] Update graffiti.md (#3537) fix typo --- book/src/graffiti.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/book/src/graffiti.md b/book/src/graffiti.md index d657c9229c..75c2a86dd5 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -49,10 +49,12 @@ Below is an example of the validator_definitions.yml with validator specific gra ### 3. Using the "--graffiti" flag on the validator client Users can specify a common graffiti for all their validators using the `--graffiti` flag on the validator client. +Usage: `lighthouse vc --graffiti example` + ### 4. Using the "--graffiti" flag on the beacon node Users can also specify a common graffiti using the `--graffiti` flag on the beacon node as a common graffiti for all validators. -Usage: `lighthouse vc --graffiti fortytwo` +Usage: `lighthouse bn --graffiti fortytwo` > Note: The order of preference for loading the graffiti is as follows: > 1. Read from `--graffiti-file` if provided. From 419c53bf246610c2749ae44111cda067b84a31e1 Mon Sep 17 00:00:00 2001 From: Alexander Cyon <alex.cyon@gmail.com> Date: Tue, 6 Sep 2022 05:58:27 +0000 Subject: [PATCH 173/184] Add flag 'log-color' preserving color of log redirected to file. (#3538) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add flag 'log-color' which preserves colors of log when stdout is redirected to a file. This is my first lighthouse PR, please let me know if I'm not following contribution guidelines, I welcome meta-feeback (feedback on git commit messages, git branch naming, and the contents of the description of this PR.) ## Issue Addressed Solves https://github.com/sigp/lighthouse/issues/3527 ## Proposed Changes Adding a flag which enables log color preserving when stdout is redirected to a file. ### Usage Below I demonstrate current behaviour (without using the new flag) and the new behaviur (when using new flag). In the screenshot below, I have to panes, one on top running `lighthouse` which redirects to file `~/Desktop/test.log` and one pane in the bottom which runs `tail -f ~/Desktop/test.log`. #### Current behaviour ```sh lighthouse --network prater vc |& tee -a ~/Desktop/test.log ``` **Result is no colors** <img width="1624" alt="current" src="https://user-images.githubusercontent.com/864410/188258226-bfcf8271-4c9e-474c-848e-ac92a60df25c.png"> #### New behaviour ```sh lighthouse --network prater vc --log-color |& tee -a ~/Desktop/test.log ``` **Result is colors** 🔴🟢🔵🟡 <img width="1624" alt="new" src="https://user-images.githubusercontent.com/864410/188258223-7d9ecf09-92c8-4cba-8f24-bd4d88fc0353.png"> ## Additional Info I chose American spelling of "color" instead of Brittish "colour' since that was aligned with `slog`'s API - method`force_color()`, let me know if you prefer spelling "colour" instead. I also chose to let it be an arg not taking any argument, just like `logfile-compress` flag, rather than having to write `--log-color true`. --- lcli/src/main.rs | 1 + lighthouse/environment/src/lib.rs | 9 ++++++++- lighthouse/src/main.rs | 10 ++++++++++ testing/simulator/src/eth1_sim.rs | 1 + testing/simulator/src/no_eth1_sim.rs | 1 + testing/simulator/src/sync_sim.rs | 1 + 6 files changed, 22 insertions(+), 1 deletion(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 2fd0538850..e6a4eeeacb 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -747,6 +747,7 @@ fn run<T: EthSpec>( debug_level: "trace", logfile_debug_level: "trace", log_format: None, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 160f696542..679964c0de 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -47,6 +47,7 @@ pub struct LoggerConfig<'a> { pub debug_level: &'a str, pub logfile_debug_level: &'a str, pub log_format: Option<&'a str>, + pub log_color: bool, pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, @@ -139,7 +140,13 @@ impl<E: EthSpec> EnvironmentBuilder<E> { _ => return Err("Logging format provided is not supported".to_string()), } } else { - let stdout_decorator = slog_term::TermDecorator::new().build(); + let stdout_decorator_builder = slog_term::TermDecorator::new(); + let stdout_decorator = if config.log_color { + stdout_decorator_builder.force_color() + } else { + stdout_decorator_builder + } + .build(); let stdout_decorator = logging::AlignedTermDecorator::new(stdout_decorator, logging::MAX_MESSAGE_WIDTH); let stdout_drain = slog_term::FullFormat::new(stdout_decorator).build().fuse(); diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index bd707f7a77..7897494cc4 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -138,6 +138,13 @@ fn main() { .takes_value(true) .global(true), ) + .arg( + Arg::with_name("log-color") + .long("log-color") + .alias("log-colour") + .help("Force outputting colors when emitting logs to the terminal.") + .global(true), + ) .arg( Arg::with_name("debug-level") .long("debug-level") @@ -372,6 +379,8 @@ fn run<E: EthSpec>( let log_format = matches.value_of("log-format"); + let log_color = matches.is_present("log-color"); + let logfile_debug_level = matches .value_of("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; @@ -424,6 +433,7 @@ fn run<E: EthSpec>( debug_level, logfile_debug_level, log_format, + log_color, max_log_size: logfile_max_size * 1_024 * 1_024, max_log_number: logfile_max_number, compression: logfile_compress, diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index c54944c2e1..613573cd0d 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -65,6 +65,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: log_level, logfile_debug_level: "debug", log_format, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 5d2f0be72f..28b8719843 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -50,6 +50,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: log_level, logfile_debug_level: "debug", log_format, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 3bb460c9fe..07d774b8d4 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -51,6 +51,7 @@ fn syncing_sim( debug_level: log_level, logfile_debug_level: "debug", log_format, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, From 81d078bfc79a47c50047522199014fc6a2f36b38 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Thu, 8 Sep 2022 00:06:25 +0000 Subject: [PATCH 174/184] remove strict fee recipient docs (#3551) ## Issue Addressed Related: #3550 Remove references to the `--strict-fee-recipient` flag in docs. The flag will cause missed proposals prior to the merge transition. Co-authored-by: realbigsean <sean@sigmaprime.io> --- book/src/builders.md | 4 +--- book/src/suggested-fee-recipient.md | 14 ++------------ 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/book/src/builders.md b/book/src/builders.md index 2c24d31003..0f4b3fb106 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -55,9 +55,7 @@ Both the gas limit and fee recipient will be passed along as suggestions to conn in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are calculated based on prior execution blocks, so it should be managed by an execution engine, even if it is external. Depending on the connected relay, payment to the proposer might be in the form of a transaction within the block to the fee recipient, -so a discrepancy in fee recipient might not indicate that there is something afoot. If you know the relay you are connected to *should* -only create blocks with a `fee_recipient` field matching the one suggested, you can use -the [strict fee recipient](suggested-fee-recipient.md#strict-fee-recipient) flag. +so a discrepancy in fee recipient might not indicate that there is something afoot. ### Set Gas Limit via HTTP diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index c966481a31..c1739aa937 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -12,8 +12,7 @@ coinbase and the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the -`suggested_fee_recipient`, but users should note this trust assumption. Check out the -[strict fee recipient](#strict-fee-recipient) section for how to mitigate this assumption. +`suggested_fee_recipient`, but users should note this trust assumption. The `suggested_fee_recipient` can be provided to the VC, which will transmit it to the BN. The BN also has a choice regarding the fee recipient it passes to the execution node, creating another @@ -181,15 +180,6 @@ curl -X DELETE \ null ``` -## Strict Fee Recipient - -If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose -`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal -block proposal flow and block proposals through the builder API. Proposals through the builder API are more likely -to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before -using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the -local execution engine for payload construction, where a strict fee recipient check will still be applied. - ## FAQ ### Why do I have to nominate an Ethereum address as the fee recipient? @@ -198,5 +188,5 @@ You might wonder why the validator can't just accumulate transactions fees in th accumulates other staking rewards. The reason for this is that transaction fees are computed and validated by the execution node, and therefore need to be paid to an address that exists on the execution chain. Validators use BLS keys which do not correspond to Ethereum addresses, so they -have no "presence" on the execution chain. Therefore it's necessary for each validator to nominate +have no "presence" on the execution chain. Therefore, it's necessary for each validator to nominate a separate fee recipient address. From a9f075c3c0fb25213609e2fc243fbb3e1e6c1772 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Thu, 8 Sep 2022 23:46:02 +0000 Subject: [PATCH 175/184] Remove strict fee recipient (#3552) ## Issue Addressed Resolves: #3550 Remove the `--strict-fee-recipient` flag. It will cause missed proposals prior to the bellatrix transition. Co-authored-by: realbigsean <sean@sigmaprime.io> --- lighthouse/tests/validator_client.rs | 14 -------------- validator_client/src/block_service.rs | 22 ---------------------- validator_client/src/cli.rs | 19 +++++++++---------- validator_client/src/config.rs | 10 +++++----- validator_client/src/lib.rs | 1 - 5 files changed, 14 insertions(+), 52 deletions(-) diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 39ea2bfaa1..a9b76c2754 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -430,20 +430,6 @@ fn builder_registration_timestamp_override_flag() { assert_eq!(config.builder_registration_timestamp_override, Some(100)) }); } -#[test] -fn strict_fee_recipient_flag() { - CommandLineTest::new() - .flag("strict-fee-recipient", None) - .run() - .with_config(|config| assert!(config.strict_fee_recipient)); -} -#[test] -fn no_strict_fee_recipient_flag() { - CommandLineTest::new() - .run() - .with_config(|config| assert!(!config.strict_fee_recipient)); -} - #[test] fn monitoring_endpoint() { CommandLineTest::new() diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2a8d164225..ac1ba11674 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -43,7 +43,6 @@ pub struct BlockServiceBuilder<T, E: EthSpec> { context: Option<RuntimeContext<E>>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - strict_fee_recipient: bool, } impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { @@ -55,7 +54,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { context: None, graffiti: None, graffiti_file: None, - strict_fee_recipient: false, } } @@ -89,11 +87,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { self } - pub fn strict_fee_recipient(mut self, strict_fee_recipient: bool) -> Self { - self.strict_fee_recipient = strict_fee_recipient; - self - } - pub fn build(self) -> Result<BlockService<T, E>, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -111,7 +104,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, - strict_fee_recipient: self.strict_fee_recipient, }), }) } @@ -125,7 +117,6 @@ pub struct Inner<T, E: EthSpec> { context: RuntimeContext<E>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - strict_fee_recipient: bool, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -324,9 +315,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; - let fee_recipient = self.validator_store.get_fee_recipient(&validator_pubkey); - let strict_fee_recipient = self.strict_fee_recipient; // Request block from first responsive beacon node. let block = self .beacon_nodes @@ -377,17 +366,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { } }; - // Ensure the correctness of the execution payload's fee recipient. - if strict_fee_recipient { - if let Ok(execution_payload) = block.body().execution_payload() { - if Some(execution_payload.fee_recipient()) != fee_recipient { - return Err(BlockError::Recoverable( - "Incorrect fee recipient used by builder".to_string(), - )); - } - } - } - if proposer_index != Some(block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged" diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index c59e1cf5dc..5c7205a4ae 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -268,18 +268,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") .takes_value(false), - ) - .arg( + ).arg( Arg::with_name("strict-fee-recipient") .long("strict-fee-recipient") - .help("If this flag is set, Lighthouse will refuse to sign any block whose \ - `fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. \ - This applies to both the normal block proposal flow, as well as block proposals \ - through the builder API. Proposals through the builder API are more likely to have a \ - discrepancy in `fee_recipient` so you should be aware of how your connected relay \ - sends proposer payments before using this flag. If this flag is used, a fee recipient \ - mismatch in the builder API flow will result in a fallback to the local execution engine \ - for payload construction, where a strict fee recipient check will still be applied.") + .help("[DEPRECATED] If this flag is set, Lighthouse will refuse to sign any block whose \ + `fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. \ + This applies to both the normal block proposal flow, as well as block proposals \ + through the builder API. Proposals through the builder API are more likely to have a \ + discrepancy in `fee_recipient` so you should be aware of how your connected relay \ + sends proposer payments before using this flag. If this flag is used, a fee recipient \ + mismatch in the builder API flow will result in a fallback to the local execution engine \ + for payload construction, where a strict fee recipient check will still be applied.") .takes_value(false), ) .arg( diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index fe5f7fc00d..22472f7512 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -61,9 +61,6 @@ pub struct Config { /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option<Vec<PathBuf>>, - /// Enabling this will make sure the validator client never signs a block whose `fee_recipient` - /// does not match the `suggested_fee_recipient`. - pub strict_fee_recipient: bool, } impl Default for Config { @@ -99,7 +96,6 @@ impl Default for Config { builder_proposals: false, builder_registration_timestamp_override: None, gas_limit: None, - strict_fee_recipient: false, } } } @@ -334,7 +330,11 @@ impl Config { } if cli_args.is_present("strict-fee-recipient") { - config.strict_fee_recipient = true; + warn!( + log, + "The flag `--strict-fee-recipient` has been deprecated due to a bug causing \ + missed proposals. The flag will be ignored." + ); } Ok(config) diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index c05507576e..9db4cc0315 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -420,7 +420,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .strict_fee_recipient(config.strict_fee_recipient) .build()?; let attestation_service = AttestationServiceBuilder::new() From d1a8d6cf918d924f237fb781566d6117bf6cce8c Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Thu, 8 Sep 2022 23:46:03 +0000 Subject: [PATCH 176/184] Pin mev rs deps (#3557) ## Issue Addressed We were unable to update lighthouse by running `cargo update` because some of the `mev-build-rs` deps weren't pinned. But `mev-build-rs` is now pinned here and includes it's own pinned commits for `ssz-rs` and `etheruem-consensus` Co-authored-by: realbigsean <sean@sigmaprime.io> --- Cargo.lock | 14 +++++++------- beacon_node/execution_layer/Cargo.toml | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d742276687..a4595aa13c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -364,7 +364,7 @@ checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "beacon-api-client" version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client#061c1b1bb1f18bcd7cf23d4cd375f99c78d5a2a5" +source = "git+https://github.com/ralexstokes/beacon-api-client?rev=de34eeb#de34eeb92e4fdee5709d142910abf42cf857609b" dependencies = [ "ethereum-consensus", "http", @@ -1935,8 +1935,8 @@ dependencies = [ [[package]] name = "ethereum-consensus" -version = "0.1.0" -source = "git+https://github.com/ralexstokes/ethereum-consensus#592eb44dc24403cc9d152f4b96683ab551533201" +version = "0.1.1" +source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e1188b1#e1188b14f320f225f2e53aa10336614565f04129" dependencies = [ "async-stream", "blst", @@ -3893,8 +3893,8 @@ dependencies = [ [[package]] name = "mev-build-rs" -version = "0.2.0" -source = "git+https://github.com/ralexstokes/mev-rs?tag=v0.2.0#921fa3f7c3497839461964a5297dfe4f2cef3136" +version = "0.2.1" +source = "git+https://github.com/ralexstokes/mev-rs?rev=a088806575805c00d63fa59c002abc5eb1dc7709#a088806575805c00d63fa59c002abc5eb1dc7709" dependencies = [ "async-trait", "axum", @@ -6240,7 +6240,7 @@ dependencies = [ [[package]] name = "ssz-rs" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" dependencies = [ "bitvec 1.0.1", "hex", @@ -6255,7 +6255,7 @@ dependencies = [ [[package]] name = "ssz-rs-derive" version = "0.8.0" -source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" dependencies = [ "proc-macro2", "quote", diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 5c0e66ea44..770bc4cf8c 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -40,8 +40,8 @@ lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } -mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", tag = "v0.2.0"} -ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus"} -ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs"} +mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", rev = "a088806575805c00d63fa59c002abc5eb1dc7709"} +ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e1188b1" } +ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" From 60e9777db81496c851e52a7c811a4b7f1d8245fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Roy?= <remyroy@remyroy.com> Date: Fri, 9 Sep 2022 02:52:35 +0000 Subject: [PATCH 177/184] Add community checkpoint sync endpoints to book (#3558) ## Proposed Changes Add a section on the new community checkpoint sync endpoints in the book. This should help stakers sync faster even without having to create an account. --- book/src/checkpoint-sync.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index fc878f5f65..0ff1dae835 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -41,6 +41,13 @@ Once the checkpoint is loaded Lighthouse will sync forwards to the head of the c If a validator client is connected to the node then it will be able to start completing its duties as soon as forwards sync completes. +### Use a community checkpoint sync endpoint + +The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the url for the `--checkpoint-sync-url` flag. e.g. +``` +lighthouse bn --checkpoint-sync-url https://beaconstate.info/ ... +``` + ### Use Infura as a remote beacon node provider You can use Infura as the remote beacon node provider to load the initial checkpoint state. From f682df51a10ea9f371513d2574650c2d42f39409 Mon Sep 17 00:00:00 2001 From: Nils Effinghausen <n.effinghausen@gmail.com> Date: Sat, 10 Sep 2022 01:35:10 +0000 Subject: [PATCH 178/184] fix description for BALANCES_CACHE_MISSES metric (#3545) ## Issue Addressed fixes metric description Co-authored-by: Nils Effinghausen <nils.effinghausen@t-systems.com> --- beacon_node/beacon_chain/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 4d0f63674a..8030bfa718 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -325,7 +325,7 @@ lazy_static! { pub static ref BALANCES_CACHE_HITS: Result<IntCounter> = try_create_int_counter("beacon_balances_cache_hits_total", "Count of times balances cache fulfils request"); pub static ref BALANCES_CACHE_MISSES: Result<IntCounter> = - try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache fulfils request"); + try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache misses request"); /* * Persisting BeaconChain components to disk From cfa518ab41232b7af45a42057372b73eaf5e322f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Roy?= <remyroy@remyroy.com> Date: Sat, 10 Sep 2022 01:35:11 +0000 Subject: [PATCH 179/184] Use generic domain for community checkpoint sync example (#3560) ## Proposed Changes Use a generic domain for community checkpoint sync example to meet the concern raised in https://github.com/sigp/lighthouse/pull/3558#discussion_r966720171 --- book/src/checkpoint-sync.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 0ff1dae835..736aa08f1c 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -45,7 +45,7 @@ as soon as forwards sync completes. The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the url for the `--checkpoint-sync-url` flag. e.g. ``` -lighthouse bn --checkpoint-sync-url https://beaconstate.info/ ... +lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` ### Use Infura as a remote beacon node provider From 98815516a102fd0b3f7c31c516a183282e4f3a30 Mon Sep 17 00:00:00 2001 From: tim gretler <gretler.tim@gmail.com> Date: Tue, 13 Sep 2022 01:57:44 +0000 Subject: [PATCH 180/184] Support histogram buckets (#3391) ## Issue Addressed #3285 ## Proposed Changes Adds support for specifying histogram with buckets and adds new metric buckets for metrics mentioned in issue. ## Additional Info Need some help for the buckets. Co-authored-by: Michael Sproul <micsproul@gmail.com> --- beacon_node/beacon_chain/src/metrics.rs | 29 +++++++++---- beacon_node/network/src/metrics.rs | 9 +++- common/lighthouse_metrics/src/lib.rs | 55 +++++++++++++++++++++++-- 3 files changed, 78 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 8030bfa718..cad77a378f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -122,14 +122,17 @@ lazy_static! { /* * Block Statistics */ - pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result<Histogram> = try_create_histogram( + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result<Histogram> = try_create_histogram_with_buckets( "beacon_operations_per_block_attestation_total", - "Number of attestations in a block" + "Number of attestations in a block", + // Full block is 128. + Ok(vec![0_f64, 1_f64, 3_f64, 15_f64, 31_f64, 63_f64, 127_f64, 255_f64]) ); - pub static ref BLOCK_SIZE: Result<Histogram> = try_create_histogram( + pub static ref BLOCK_SIZE: Result<Histogram> = try_create_histogram_with_buckets( "beacon_block_total_size", - "Size of a signed beacon block" + "Size of a signed beacon block", + linear_buckets(5120_f64,5120_f64,10) ); /* @@ -775,21 +778,29 @@ lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram( + pub static ref BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets( "beacon_block_observed_slot_start_delay_time", "Duration between the start of the block's slot and the time the block was observed.", + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + decimal_buckets(-1,2) ); - pub static ref BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME: Result<Histogram> = try_create_histogram( + pub static ref BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets( "beacon_block_imported_observed_delay_time", "Duration between the time the block was observed and the time when it was imported.", + // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] + decimal_buckets(-2,0) ); - pub static ref BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME: Result<Histogram> = try_create_histogram( + pub static ref BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets( "beacon_block_head_imported_delay_time", "Duration between the time the block was imported and the time when it was set as head.", - ); - pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram( + // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] + decimal_buckets(-2,-1) + ); + pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets( "beacon_block_head_slot_start_delay_time", "Duration between the start of the block's slot and the time when it was set as head.", + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + decimal_buckets(-1,2) ); pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL: Result<IntCounter> = try_create_int_counter( "beacon_block_head_slot_start_delay_exceeded_total", diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index defb9c6000..b4e7a3bace 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -311,13 +311,18 @@ lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result<Histogram> = try_create_histogram( + pub static ref BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets( "beacon_block_gossip_propagation_verification_delay_time", "Duration between when the block is received and when it is verified for propagation.", + // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] + decimal_buckets(-3,-1) ); - pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram( + pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets( "beacon_block_gossip_slot_start_delay_time", "Duration between when the block is received and the start of the slot it belongs to.", + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + decimal_buckets(-1,2) + ); pub static ref BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL: Result<IntCounter> = try_create_int_counter( "beacon_block_gossip_arrived_late_total", diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 98973de1ad..5d25bb313f 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -54,14 +54,15 @@ //! } //! ``` -use prometheus::{HistogramOpts, Opts}; +use prometheus::{Error, HistogramOpts, Opts}; use std::time::Duration; use prometheus::core::{Atomic, GenericGauge, GenericGaugeVec}; pub use prometheus::{ + exponential_buckets, linear_buckets, proto::{Metric, MetricFamily, MetricType}, Encoder, Gauge, GaugeVec, Histogram, HistogramTimer, HistogramVec, IntCounter, IntCounterVec, - IntGauge, IntGaugeVec, Result, TextEncoder, + IntGauge, IntGaugeVec, Result, TextEncoder, DEFAULT_BUCKETS, }; /// Collect all the metrics for reporting. @@ -99,7 +100,17 @@ pub fn try_create_float_gauge(name: &str, help: &str) -> Result<Gauge> { /// Attempts to create a `Histogram`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_histogram(name: &str, help: &str) -> Result<Histogram> { - let opts = HistogramOpts::new(name, help); + try_create_histogram_with_buckets(name, help, Ok(DEFAULT_BUCKETS.to_vec())) +} + +/// Attempts to create a `Histogram` with specified buckets, returning `Err` if the registry does not accept the counter +/// (potentially due to naming conflict) or no valid buckets are provided. +pub fn try_create_histogram_with_buckets( + name: &str, + help: &str, + buckets: Result<Vec<f64>>, +) -> Result<Histogram> { + let opts = HistogramOpts::new(name, help).buckets(buckets?); let histogram = Histogram::with_opts(opts)?; prometheus::register(Box::new(histogram.clone()))?; Ok(histogram) @@ -112,7 +123,18 @@ pub fn try_create_histogram_vec( help: &str, label_names: &[&str], ) -> Result<HistogramVec> { - let opts = HistogramOpts::new(name, help); + try_create_histogram_vec_with_buckets(name, help, Ok(DEFAULT_BUCKETS.to_vec()), label_names) +} + +/// Attempts to create a `HistogramVec` with specified buckets, returning `Err` if the registry does not accept the counter +/// (potentially due to naming conflict) or no valid buckets are provided. +pub fn try_create_histogram_vec_with_buckets( + name: &str, + help: &str, + buckets: Result<Vec<f64>>, + label_names: &[&str], +) -> Result<HistogramVec> { + let opts = HistogramOpts::new(name, help).buckets(buckets?); let histogram_vec = HistogramVec::new(opts, label_names)?; prometheus::register(Box::new(histogram_vec.clone()))?; Ok(histogram_vec) @@ -357,3 +379,28 @@ fn duration_to_f64(duration: Duration) -> f64 { let nanos = f64::from(duration.subsec_nanos()) / 1e9; duration.as_secs() as f64 + nanos } + +/// Create buckets using divisors of 10 multiplied by powers of 10, e.g., +/// […, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, …] +/// +/// The buckets go from `10^min_power` to `5 × 10^max_power`, inclusively. +/// The total number of buckets is `3 * (max_power - min_power + 1)`. +/// +/// assert_eq!(vec![0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0], decimal_buckets(-1, 1)); +/// assert_eq!(vec![1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0], decimal_buckets(0, 2)); +pub fn decimal_buckets(min_power: i32, max_power: i32) -> Result<Vec<f64>> { + if max_power < min_power { + return Err(Error::Msg(format!( + "decimal_buckets min_power needs to be <= max_power, given {} and {}", + min_power, max_power + ))); + } + + let mut buckets = Vec::with_capacity(3 * (max_power - min_power + 1) as usize); + for n in min_power..=max_power { + for m in &[1f64, 2f64, 5f64] { + buckets.push(m * 10f64.powi(n)) + } + } + Ok(buckets) +} From 88a7e5a2ca9aa51df3652ef1b8923fe852210d73 Mon Sep 17 00:00:00 2001 From: Alessandro Tagliapietra <tagliapietra.alessandro@gmail.com> Date: Tue, 13 Sep 2022 01:57:45 +0000 Subject: [PATCH 181/184] Fix ganache test endpoint for ipv6 machines (#3563) ## Issue Addressed #3562 ## Proposed Changes Change the fork endpoint from `localhost` to `127.0.0.1` to match the ganache default listening host. This way it doesn't try (and fail) to connect to `::1` on IPV6 machines. ## Additional Info First PR --- testing/eth1_test_rig/src/ganache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index 9b6a33ff59..d8df3fd8ae 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -166,7 +166,7 @@ impl GanacheInstance { } fn endpoint(port: u16) -> String { - format!("http://localhost:{}", port) + format!("http://127.0.0.1:{}", port) } impl Drop for GanacheInstance { From 614d74a6d4e342dcf664755ccf8f7e95353aac2c Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@GMAIL.com> Date: Tue, 13 Sep 2022 01:57:46 +0000 Subject: [PATCH 182/184] Fix builder gas limit docs (#3569) ## Issue Addressed Make sure gas limit examples in our docs represent sane values. Thanks @dankrad for raising this in discord. ## Additional Info We could also consider logging warnings about whether the gas limits configured are sane. Prysm has an open issue for this: https://github.com/prysmaticlabs/prysm/issues/10810 Co-authored-by: realbigsean <sean@sigmaprime.io> --- book/src/builders.md | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/book/src/builders.md b/book/src/builders.md index 0f4b3fb106..109a75a040 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -45,17 +45,25 @@ relays, run one of the following services and configure lighthouse to use it wit ## Validator Client Configuration -In the validator client you can configure gas limit, fee recipient and whether to use the builder API on a -per-validator basis or set a configuration for all validators managed by the validator client. CLI flags for each of these -will serve as default values for all validators managed by the validator client. In order to manage the values -per-validator you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests +In the validator client you can configure gas limit and fee recipient on a per-validator basis. If no gas limit is +configured, Lighthouse will use a default gas limit of 30,000,000, which is the current default value used in execution +engines. You can also enable or disable use of external builders on a per-validator basis rather than using +`--builder-proposals`, which enables external builders for all validators. In order to manage these configurations +per-validator, you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests described below. Both the gas limit and fee recipient will be passed along as suggestions to connected builders. If there is a discrepancy -in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are calculated based -on prior execution blocks, so it should be managed by an execution engine, even if it is external. Depending on the -connected relay, payment to the proposer might be in the form of a transaction within the block to the fee recipient, -so a discrepancy in fee recipient might not indicate that there is something afoot. +in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are +calculated based on prior execution blocks, so an honest external builder will make sure that even if your +requested gas limit value is out of the specified range, a valid gas limit in the direction of your request will be +used in constructing the block. Depending on the connected relay, payment to the proposer might be in the form of a +transaction within the block to the fee recipient, so a discrepancy in fee recipient might not indicate that there +is something afoot. + +> Note: The gas limit configured here is effectively a vote on block size, so the configuration should not be taken lightly. +> 30,000,000 is currently seen as a value balancing block size with how expensive it is for +> the network to validate blocks. So if you don't feel comfortable making an informed "vote", using the default value is +> encouraged. We will update the default value if the community reaches a rough consensus on a new value. ### Set Gas Limit via HTTP @@ -91,7 +99,7 @@ Each field is optional. ```json { "builder_proposals": true, - "gas_limit": 3000000001 + "gas_limit": 30000001 } ``` @@ -116,14 +124,14 @@ You can also directly configure these fields in the `validator_definitions.yml` voting_keystore_path: /home/paul/.lighthouse/validators/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007/voting-keystore.json voting_keystore_password_path: /home/paul/.lighthouse/secrets/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" - gas_limit: 3000000001 + gas_limit: 30000001 builder_proposals: true - enabled: false voting_public_key: "0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477" type: local_keystore voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json voting_keystore_password: myStrongpa55word123&$ suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" - gas_limit: 333333333 + gas_limit: 33333333 builder_proposals: true ``` From cd31e54b995144dd109c877406f4b3fe386193f0 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Tue, 13 Sep 2022 01:57:47 +0000 Subject: [PATCH 183/184] Bump `axum` deps (#3570) ## Issue Addressed Fix a `cargo-audit` failure. We don't use `axum` for anything besides tests, but `cargo-audit` is failing due to this vulnerability in `axum-core`: https://rustsec.org/advisories/RUSTSEC-2022-0055 --- Cargo.lock | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a4595aa13c..58c9ec2a72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -285,9 +285,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.13" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9496f0c1d1afb7a2af4338bbe1d969cddfead41d87a9fb3aaa6d0bbc7af648" +checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" dependencies = [ "async-trait", "axum-core", @@ -316,9 +316,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4f44a0e6200e9d11a1cdc989e4b358f6e3d354fbf48478f345a17f4e43f8635" +checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" dependencies = [ "async-trait", "bytes", @@ -326,6 +326,8 @@ dependencies = [ "http", "http-body", "mime", + "tower-layer", + "tower-service", ] [[package]] From 7d3948c8fe32a7624d7fafaf2561a1f8ac291e3f Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 13 Sep 2022 17:19:27 +0000 Subject: [PATCH 184/184] Add metric for re-org distance (#3566) ## Issue Addressed NA ## Proposed Changes Add a metric to track the re-org distance. ## Additional Info NA --- beacon_node/beacon_chain/src/canonical_head.rs | 4 ++++ beacon_node/beacon_chain/src/metrics.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 644364bc60..c9bd6db0e6 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1189,6 +1189,10 @@ fn detect_reorg<E: EthSpec>( metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); + metrics::set_gauge( + &metrics::FORK_CHOICE_REORG_DISTANCE, + reorg_distance.as_u64() as i64, + ); warn!( log, "Beacon chain re-org"; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index cad77a378f..b454a6ff88 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -305,6 +305,10 @@ lazy_static! { "beacon_fork_choice_reorg_total", "Count of occasions fork choice has switched to a different chain" ); + pub static ref FORK_CHOICE_REORG_DISTANCE: Result<IntGauge> = try_create_int_gauge( + "beacon_fork_choice_reorg_distance", + "The distance of each re-org of the fork choice algorithm" + ); pub static ref FORK_CHOICE_REORG_COUNT_INTEROP: Result<IntCounter> = try_create_int_counter( "beacon_reorgs_total", "Count of occasions fork choice has switched to a different chain"