From 758b58c9e97931b34ee8046cfea64780659ef788 Mon Sep 17 00:00:00 2001
From: chonghe <44791194+chong-he@users.noreply.github.com>
Date: Mon, 24 Jun 2024 07:52:15 +0800
Subject: [PATCH 1/2] Update slasher DB size and Lighthouse book (#5934)
* Update book
* Fix
* mdlint
* Revise
* Update slasher doc
* Revise max db size
* change blob to file
* Add checkpoint-blobs
* Thanks Jimmy for the command
* Update schema docs
---
book/src/checkpoint-sync.md | 15 ++++++++++--
book/src/database-migrations.md | 1 +
book/src/faq.md | 42 ++++++++++++++++++++++++++-------
book/src/slasher.md | 4 ++--
book/src/slashing-protection.md | 4 ++--
slasher/src/config.rs | 2 +-
6 files changed, 52 insertions(+), 16 deletions(-)
diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md
index 63d96874c3..2bf028acfe 100644
--- a/book/src/checkpoint-sync.md
+++ b/book/src/checkpoint-sync.md
@@ -146,8 +146,19 @@ For more information on historic state storage see the
To manually specify a checkpoint use the following two flags:
-* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob
-* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob
+* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` file
+* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` file
+* `--checkpoint-blobs`: accepts an SSZ-encoded `Blobs` file
+
+The command is as following:
+
+```bash
+curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/debug/beacon/states/$SLOT" > state.ssz
+curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/beacon/blocks/$SLOT" > block.ssz
+curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v1/beacon/blob_sidecars/$SLOT" > blobs.ssz
+```
+
+where `$SLOT` is the slot number. It can be specified as `head` or `finalized` as well.
_Both_ the state and block must be provided and the state **must** match the block. The
state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary,
diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md
index a81acd7794..611c61cb9c 100644
--- a/book/src/database-migrations.md
+++ b/book/src/database-migrations.md
@@ -16,6 +16,7 @@ validator client or the slasher**.
| Lighthouse version | Release date | Schema version | Downgrade available? |
|--------------------|--------------|----------------|----------------------|
+| v5.2.0 | Jun 2024 | v19 | yes before Deneb |
| v5.1.0 | Mar 2024 | v19 | yes before Deneb |
| v5.0.0 | Feb 2024 | v19 | yes before Deneb |
| v4.6.0 | Dec 2023 | v19 | yes before Deneb |
diff --git a/book/src/faq.md b/book/src/faq.md
index c7fdb6b32f..2de7841343 100644
--- a/book/src/faq.md
+++ b/book/src/faq.md
@@ -15,6 +15,7 @@
- [My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?](#bn-fork-choice)
- [My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?](#bn-queue-full)
- [My beacon node logs `WARN Failed to finalize deposit cache`, what should I do?](#bn-deposit-cache)
+- [My beacon node logs `WARN Could not verify blob sidecar for gossip`, what does it mean?](#bn-blob)
## [Validator](#validator-1)
@@ -214,6 +215,16 @@ This suggests that the computer resources are being overwhelmed. It could be due
This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will fix by itself.
+### My beacon node logs `WARN Could not verify blob sidecar for gossip`, what does it mean?
+
+An example of the full log is shown below:
+
+```text
+Jun 07 23:05:12.170 WARN Could not verify blob sidecar for gossip. Ignoring the blob sidecar, commitment: 0xaa97…6f54, index: 1, root: 0x93b8…c47c, slot: 9248017, error: PastFinalizedSlot { blob_slot: Slot(9248017), finalized_slot: Slot(9248032) }, module: network::network_beacon_processor::gossip_methods:720
+```
+
+The `PastFinalizedSlot` indicates that the time at which the node received the blob has past the finalization period. This could be due to a peer sending an earlier blob. The log will be gone when Lighthouse eventually drops the peer.
+
## Validator
### Why does it take so long for a validator to be activated?
@@ -327,13 +338,24 @@ The first thing is to ensure both consensus and execution clients are synced wit
You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations).
-Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`):
+Another cause for missing attestations is the block arriving late, or there are delays during block processing.
+
+An example of the log: (debug logs can be found under `$datadir/beacon/logs`):
```text
-DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon
+Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441
```
-The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block.
+The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation wil fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late.
+
+Another example of log:
+
+```
+DEBG Delayed head block, set_as_head_time_ms: 22, imported_time_ms: 312, attestable_delay_ms: 7052, available_delay_ms: 6874, execution_time_ms: 4694, blob_delay_ms: 2159, observed_delay_ms: 2179, total_delay_ms: 7209, slot: 1885922, proposer_index: 606896, block_root: 0x9966df24d24e722d7133068186f0caa098428696e9f441ac416d0aca70cc0a23, service: beacon, module: beacon_chain::canonical_head:1441
+/159.69.68.247/tcp/9000, service: libp2p, module: lighthouse_network::service:1811
+```
+
+In this example, we see that the `execution_time_ms` is 4694ms. The `execution_time_ms` is how long the node took to process the block. The `execution_time_ms` of larger than 1 second suggests that there is slowness in processing the block. If the `execution_time_ms` is high, it could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes.
### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?
@@ -514,21 +536,23 @@ If you would still like to subscribe to all subnets, you can use the flag `subsc
### How to know how many of my peers are connected via QUIC?
-With `--metrics` enabled in the beacon node, you can find the number of peers connected via QUIC using:
+With `--metrics` enabled in the beacon node, the [Grafana Network dashboard](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/Network.json) displays the connected by transport, which will show the number of peers connected via QUIC.
+
+Alternatively, you can find the number of peers connected via QUIC manually using:
```bash
- curl -s "http://localhost:5054/metrics" | grep libp2p_quic_peers
+ curl -s "http://localhost:5054/metrics" | grep 'transport="quic"'
```
A response example is:
```text
-# HELP libp2p_quic_peers Count of libp2p peers currently connected via QUIC
-# TYPE libp2p_quic_peers gauge
-libp2p_quic_peers 4
+libp2p_peers_multi{direction="inbound",transport="quic"} 27
+libp2p_peers_multi{direction="none",transport="quic"} 0
+libp2p_peers_multi{direction="outbound",transport="quic"} 9
```
-which shows that there are 4 peers connected via QUIC.
+which shows that there are a total of 36 peers connected via QUIC.
## Miscellaneous
diff --git a/book/src/slasher.md b/book/src/slasher.md
index 5098fe6eda..3310f6c9ef 100644
--- a/book/src/slasher.md
+++ b/book/src/slasher.md
@@ -114,13 +114,13 @@ changed after initialization.
* Flag: `--slasher-max-db-size GIGABYTES`
* Argument: maximum size of the database in gigabytes
-* Default: 256 GB
+* Default: 512 GB
Both database backends LMDB and MDBX place a hard limit on the size of the database
file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after
initialization if the limit is reached.
-By default the limit is set to accommodate the default history length and around 600K validators (with about 30% headroom) but
+By default the limit is set to accommodate the default history length and around 1 million validators but
you can set it lower if running with a reduced history length. The space required scales
approximately linearly in validator count and history length, i.e. if you halve either you can halve
the space required.
diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md
index 88e2bb955c..2d580f1c31 100644
--- a/book/src/slashing-protection.md
+++ b/book/src/slashing-protection.md
@@ -75,7 +75,7 @@ Once you have the slashing protection database from your existing client, you ca
using this command:
```bash
-lighthouse account validator slashing-protection import
+lighthouse account validator slashing-protection import filename.json
```
When importing an interchange file, you still need to import the validator keystores themselves
@@ -86,7 +86,7 @@ separately, using the instructions for [import validator keys](./mainnet-validat
You can export Lighthouse's database for use with another client with this command:
```
-lighthouse account validator slashing-protection export
+lighthouse account validator slashing-protection export filename.json
```
The validator client needs to be stopped in order to export, to guarantee that the data exported is
diff --git a/slasher/src/config.rs b/slasher/src/config.rs
index 4fd74343e7..54161f6ce9 100644
--- a/slasher/src/config.rs
+++ b/slasher/src/config.rs
@@ -11,7 +11,7 @@ pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256;
pub const DEFAULT_HISTORY_LENGTH: usize = 4096;
pub const DEFAULT_UPDATE_PERIOD: u64 = 12;
pub const DEFAULT_SLOT_OFFSET: f64 = 10.5;
-pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB
+pub const DEFAULT_MAX_DB_SIZE: usize = 512 * 1024; // 512 GiB
pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(100_000);
pub const DEFAULT_BROADCAST: bool = false;
From c52c598f6922059900d60723bb541add6865a87e Mon Sep 17 00:00:00 2001
From: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
Date: Mon, 24 Jun 2024 16:08:07 -0500
Subject: [PATCH 2/2] Electra: Remaining Consensus Data Structures (#5712)
* Attestation superstruct changes for EIP 7549 (#5644)
* update
* experiment
* superstruct changes
* revert
* superstruct changes
* fix tests
* indexed attestation
* indexed attestation superstruct
* updated TODOs
* `superstruct` the `AttesterSlashing` (#5636)
* `superstruct` Attester Fork Variants
* Push a little further
* Deal with Encode / Decode of AttesterSlashing
* not so sure about this..
* Stop Encode/Decode Bounds from Propagating Out
* Tons of Changes..
* More Conversions to AttestationRef
* Add AsReference trait (#15)
* Add AsReference trait
* Fix some snafus
* Got it Compiling! :D
* Got Tests Building
* Get beacon chain tests compiling
---------
Co-authored-by: Michael Sproul
* Merge remote-tracking branch 'upstream/unstable' into electra_attestation_changes
* Make EF Tests Fork-Agnostic (#5713)
* Finish EF Test Fork Agnostic (#5714)
* Superstruct `AggregateAndProof` (#5715)
* Upgrade `superstruct` to `0.8.0`
* superstruct `AggregateAndProof`
* Merge remote-tracking branch 'sigp/unstable' into electra_attestation_changes
* cargo fmt
* Merge pull request #5726 from realbigsean/electra_attestation_changes
Merge unstable into Electra attestation changes
* EIP7549 `get_attestation_indices` (#5657)
* get attesting indices electra impl
* fmt
* get tests to pass
* fmt
* fix some beacon chain tests
* fmt
* fix slasher test
* fmt got me again
* fix more tests
* fix tests
* Some small changes (#5739)
* cargo fmt (#5740)
* Sketch op pool changes
* fix get attesting indices (#5742)
* fix get attesting indices
* better errors
* fix compile
* only get committee index once
* Ef test fixes (#5753)
* attestation related ef test fixes
* delete commented out stuff
* Fix Aggregation Pool for Electra (#5754)
* Fix Aggregation Pool for Electra
* Remove Outdated Interface
* fix ssz (#5755)
* Get `electra_op_pool` up to date (#5756)
* fix get attesting indices (#5742)
* fix get attesting indices
* better errors
* fix compile
* only get committee index once
* Ef test fixes (#5753)
* attestation related ef test fixes
* delete commented out stuff
* Fix Aggregation Pool for Electra (#5754)
* Fix Aggregation Pool for Electra
* Remove Outdated Interface
* fix ssz (#5755)
---------
Co-authored-by: realbigsean
* Revert "Get `electra_op_pool` up to date (#5756)" (#5757)
This reverts commit ab9e58aa3d0e6fe2175a4996a5de710e81152896.
* Merge branch 'electra_attestation_changes' of https://github.com/sigp/lighthouse into electra_op_pool
* Compute on chain aggregate impl (#5752)
* add compute_on_chain_agg impl to op pool changes
* fmt
* get op pool tests to pass
* update the naive agg pool interface (#5760)
* Fix bugs in cross-committee aggregation
* Add comment to max cover optimisation
* Fix assert
* Merge pull request #5749 from sigp/electra_op_pool
Optimise Electra op pool aggregation
* update committee offset
* Fix Electra Fork Choice Tests (#5764)
* Subscribe to the correct subnets for electra attestations (#5782)
* subscribe to the correct att subnets for electra
* subscribe to the correct att subnets for electra
* cargo fmt
* fix slashing handling
* Merge remote-tracking branch 'upstream/unstable'
* Send unagg attestation based on fork
* Publish all aggregates
* just one more check bro plz..
* Merge pull request #5832 from ethDreamer/electra_attestation_changes_merge_unstable
Merge `unstable` into `electra_attestation_changes`
* Merge pull request #5835 from realbigsean/fix-validator-logic
Fix validator logic
* Merge pull request #5816 from realbigsean/electra-attestation-slashing-handling
Electra slashing handling
* Electra attestation changes rm decode impl (#5856)
* Remove Crappy Decode impl for Attestation
* Remove Inefficient Attestation Decode impl
* Implement Schema Upgrade / Downgrade
* Update beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs
Co-authored-by: Michael Sproul
---------
Co-authored-by: Michael Sproul
* Fix failing attestation tests and misc electra attestation cleanup (#5810)
* - get attestation related beacon chain tests to pass
- observed attestations are now keyed off of data + committee index
- rename op pool attestationref to compactattestationref
- remove unwraps in agg pool and use options instead
- cherry pick some changes from ef-tests-electra
* cargo fmt
* fix failing test
* Revert dockerfile changes
* make committee_index return option
* function args shouldnt be a ref to attestation ref
* fmt
* fix dup imports
---------
Co-authored-by: realbigsean
* fix some todos (#5817)
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* add consolidations to merkle calc for inclusion proof
* Remove Duplicate KZG Commitment Merkle Proof Code (#5874)
* Remove Duplicate KZG Commitment Merkle Proof Code
* s/tree_lists/fields/
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* fix compile
* Fix slasher tests (#5906)
* Fix electra tests
* Add electra attestations to double vote tests
* Update superstruct to 0.8
* Merge remote-tracking branch 'origin/unstable' into electra_attestation_changes
* Small cleanup in slasher tests
* Clean up Electra observed aggregates (#5929)
* Use consistent key in observed_attestations
* Remove unwraps from observed aggregates
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* De-dup attestation constructor logic
* Remove unwraps in Attestation construction
* Dedup match_attestation_data
* Remove outdated TODO
* Use ForkName Ord in fork-choice tests
* Use ForkName Ord in BeaconBlockBody
* Make to_electra not fallible
* Remove TestRandom impl for IndexedAttestation
* Remove IndexedAttestation faulty Decode impl
* Drop TestRandom impl
* Add PendingAttestationInElectra
* Indexed att on disk (#35)
* indexed att on disk
* fix lints
* Update slasher/src/migrate.rs
Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
---------
Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com>
Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
* add electra fork enabled fn to ForkName impl (#36)
* add electra fork enabled fn to ForkName impl
* remove inadvertent file
* Update common/eth2/src/types.rs
Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
* Dedup attestation constructor logic in attester cache
* Use if let Ok for committee_bits
* Dedup Attestation constructor code
* Diff reduction in tests
* Fix beacon_chain tests
* Diff reduction
* Use Ord for ForkName in pubsub
* Resolve into_attestation_and_indices todo
* Remove stale TODO
* Fix beacon_chain tests
* Test spec invariant
* Use electra_enabled in pubsub
* Remove get_indexed_attestation_from_signed_aggregate
* Use ok_or instead of if let else
* committees are sorted
* remove dup method `get_indexed_attestation_from_committees`
* Merge pull request #5940 from dapplion/electra_attestation_changes_lionreview
Electra attestations #5712 review
* update default persisted op pool deserialization
* ensure aggregate and proof uses serde untagged on ref
* Fork aware ssz static attestation tests
* Electra attestation changes from Lions review (#5971)
* dedup/cleanup and remove unneeded hashset use
* remove irrelevant TODOs
* Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
* Electra attestation changes sean review (#5972)
* instantiate empty bitlist in unreachable code
* clean up error conversion
* fork enabled bool cleanup
* remove a couple todos
* return bools instead of options in `aggregate` and use the result
* delete commented out code
* use map macros in simple transformations
* remove signers_disjoint_from
* get ef tests compiling
* get ef tests compiling
* update intentionally excluded files
* Avoid changing slasher schema for Electra
* Delete slasher schema v4
* Fix clippy
* Fix compilation of beacon_chain tests
* Update database.rs
* Add electra lightclient types
* Update slasher/src/database.rs
* fix imports
* Merge pull request #5980 from dapplion/electra-lightclient
Add electra lightclient types
* Merge pull request #5975 from michaelsproul/electra-slasher-no-migration
Avoid changing slasher schema for Electra
* Update beacon_node/beacon_chain/src/attestation_verification.rs
* Update beacon_node/beacon_chain/src/attestation_verification.rs
---
Cargo.lock | 5 +-
Cargo.toml | 2 +-
.../beacon_chain/src/attestation_simulator.rs | 2 +-
.../src/attestation_verification.rs | 386 ++++++++++-----
.../src/attestation_verification/batch.rs | 9 +-
.../beacon_chain/src/attester_cache.rs | 2 +
.../beacon_chain/src/beacon_block_reward.rs | 2 +-
beacon_node/beacon_chain/src/beacon_chain.rs | 183 ++++---
.../beacon_chain/src/bellatrix_readiness.rs | 4 +-
beacon_node/beacon_chain/src/block_reward.rs | 9 +-
.../beacon_chain/src/block_verification.rs | 2 +-
.../beacon_chain/src/early_attester_cache.rs | 22 +-
beacon_node/beacon_chain/src/errors.rs | 3 +
beacon_node/beacon_chain/src/lib.rs | 2 +-
.../src/naive_aggregation_pool.rs | 353 ++++++++++----
.../beacon_chain/src/observed_aggregates.rs | 166 +++++--
.../beacon_chain/src/observed_operations.rs | 15 +-
beacon_node/beacon_chain/src/schema_change.rs | 9 +
.../src/schema_change/migration_schema_v20.rs | 103 ++++
beacon_node/beacon_chain/src/test_utils.rs | 297 ++++++++----
.../beacon_chain/src/validator_monitor.rs | 49 +-
.../tests/attestation_production.rs | 34 +-
.../tests/attestation_verification.rs | 363 ++++++++++----
.../beacon_chain/tests/block_verification.rs | 178 +++++--
.../tests/payload_invalidation.rs | 24 +-
beacon_node/beacon_chain/tests/store_tests.rs | 38 +-
.../tests/sync_committee_verification.rs | 3 +-
beacon_node/beacon_chain/tests/tests.rs | 2 +-
beacon_node/client/src/notifier.rs | 2 +-
beacon_node/execution_layer/src/lib.rs | 2 +-
.../http_api/src/block_packing_efficiency.rs | 57 ++-
beacon_node/http_api/src/lib.rs | 27 +-
.../http_api/src/publish_attestations.rs | 4 +-
beacon_node/http_api/tests/tests.rs | 53 ++-
.../lighthouse_network/src/types/pubsub.rs | 98 +++-
.../gossip_methods.rs | 78 ++-
.../src/network_beacon_processor/mod.rs | 2 +-
.../src/subnet_service/attestation_subnets.rs | 2 +-
beacon_node/operation_pool/src/attestation.rs | 37 +-
.../operation_pool/src/attestation_storage.rs | 298 ++++++++++--
.../operation_pool/src/attester_slashing.rs | 18 +-
beacon_node/operation_pool/src/lib.rs | 161 +++++--
beacon_node/operation_pool/src/persistence.rs | 182 ++++---
beacon_node/src/lib.rs | 10 +-
beacon_node/store/src/consensus_context.rs | 5 +-
beacon_node/store/src/metadata.rs | 2 +-
common/eth2/src/types.rs | 15 +
consensus/fork_choice/src/fork_choice.rs | 59 ++-
consensus/fork_choice/tests/tests.rs | 36 +-
.../src/common/get_attesting_indices.rs | 174 ++++++-
.../src/common/get_indexed_attestation.rs | 21 -
consensus/state_processing/src/common/mod.rs | 6 +-
.../state_processing/src/consensus_context.rs | 56 +--
consensus/state_processing/src/lib.rs | 2 +-
.../block_signature_verifier.rs | 8 +-
.../src/per_block_processing/errors.rs | 1 +
.../is_valid_indexed_attestation.rs | 8 +-
.../process_operations.rs | 58 ++-
.../per_block_processing/signature_sets.rs | 53 +--
.../src/per_block_processing/tests.rs | 110 ++++-
.../verify_attestation.rs | 16 +-
.../verify_attester_slashing.rs | 21 +-
.../base/validator_statuses.rs | 2 +-
.../state_processing/src/upgrade/altair.rs | 4 +-
.../state_processing/src/verify_operation.rs | 216 ++++++++-
consensus/types/src/aggregate_and_proof.rs | 125 +++--
consensus/types/src/attestation.rs | 449 +++++++++++++++++-
consensus/types/src/attester_slashing.rs | 173 ++++++-
consensus/types/src/beacon_block.rs | 40 +-
consensus/types/src/beacon_block_body.rs | 341 ++++++++-----
consensus/types/src/beacon_state.rs | 8 +
.../types/src/beacon_state/committee_cache.rs | 2 +
consensus/types/src/chain_spec.rs | 2 +-
consensus/types/src/eth_spec.rs | 10 +
.../types/src/execution_payload_header.rs | 17 +-
consensus/types/src/fork_name.rs | 8 +
consensus/types/src/indexed_attestation.rs | 180 ++++++-
consensus/types/src/lib.rs | 30 +-
consensus/types/src/light_client_bootstrap.rs | 36 +-
.../types/src/light_client_finality_update.rs | 99 ++--
consensus/types/src/light_client_header.rs | 54 ++-
.../src/light_client_optimistic_update.rs | 37 +-
consensus/types/src/light_client_update.rs | 31 +-
.../types/src/signed_aggregate_and_proof.rs | 86 +++-
consensus/types/src/signed_beacon_block.rs | 2 +
consensus/types/src/subnet_id.rs | 14 +-
.../types/src/sync_committee_contribution.rs | 14 +-
consensus/types/src/validator.rs | 6 +-
lcli/src/indexed_attestations.rs | 13 +-
lcli/src/transition_blocks.rs | 2 +-
slasher/Cargo.toml | 1 +
slasher/src/array.rs | 20 +-
slasher/src/attestation_queue.rs | 7 +-
slasher/src/attester_record.rs | 18 +-
slasher/src/config.rs | 3 +-
slasher/src/database.rs | 146 +++++-
slasher/src/error.rs | 7 +
slasher/src/lib.rs | 39 +-
slasher/src/slasher.rs | 13 +-
slasher/src/test_utils.rs | 71 ++-
slasher/tests/attester_slashings.rs | 235 ++++++---
slasher/tests/proposer_slashings.rs | 8 +-
slasher/tests/random.rs | 6 +-
slasher/tests/wrap_around.rs | 8 +-
testing/ef_tests/check_all_files_accessed.py | 4 +-
testing/ef_tests/src/cases/fork_choice.rs | 52 +-
testing/ef_tests/src/cases/operations.rs | 25 +-
testing/ef_tests/src/handler.rs | 8 +
testing/ef_tests/src/type_name.rs | 11 +-
testing/ef_tests/tests/tests.rs | 60 ++-
testing/web3signer_tests/src/lib.rs | 20 +-
validator_client/src/attestation_service.rs | 58 ++-
validator_client/src/block_service.rs | 4 +-
.../src/http_api/tests/keystores.rs | 10 +-
validator_client/src/signing_method.rs | 2 +-
.../src/signing_method/web3signer.rs | 2 +-
validator_client/src/validator_store.rs | 27 +-
watch/src/database/mod.rs | 2 +-
118 files changed, 5076 insertions(+), 1741 deletions(-)
create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs
delete mode 100644 consensus/state_processing/src/common/get_indexed_attestation.rs
diff --git a/Cargo.lock b/Cargo.lock
index a1865289b0..90196ea5b3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -7653,6 +7653,7 @@ dependencies = [
"safe_arith",
"serde",
"slog",
+ "ssz_types",
"strum",
"tempfile",
"tree_hash",
@@ -8034,9 +8035,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "superstruct"
-version = "0.7.0"
+version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f4e1f478a7728f8855d7e620e9a152cf8932c6614f86564c886f9b8141f3201"
+checksum = "bf0f31f730ad9e579364950e10d6172b4a9bd04b447edf5988b066a860cc340e"
dependencies = [
"darling",
"itertools",
diff --git a/Cargo.toml b/Cargo.toml
index b942d1719e..d67f6edf1d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -162,7 +162,7 @@ smallvec = "1.11.2"
snap = "1"
ssz_types = "0.6"
strum = { version = "0.24", features = ["derive"] }
-superstruct = "0.7"
+superstruct = "0.8"
syn = "1"
sysinfo = "0.26"
tempfile = "3"
diff --git a/beacon_node/beacon_chain/src/attestation_simulator.rs b/beacon_node/beacon_chain/src/attestation_simulator.rs
index 6453158458..c97c4490af 100644
--- a/beacon_node/beacon_chain/src/attestation_simulator.rs
+++ b/beacon_node/beacon_chain/src/attestation_simulator.rs
@@ -82,7 +82,7 @@ pub fn produce_unaggregated_attestation(
// Store the unaggregated attestation in the validator monitor for later processing
match chain.produce_unaggregated_attestation(current_slot, beacon_committee_index) {
Ok(unaggregated_attestation) => {
- let data = &unaggregated_attestation.data;
+ let data = unaggregated_attestation.data();
debug!(
chain.log,
diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs
index b7019d79b4..06fba937d8 100644
--- a/beacon_node/beacon_chain/src/attestation_verification.rs
+++ b/beacon_node/beacon_chain/src/attestation_verification.rs
@@ -35,17 +35,23 @@
mod batch;
use crate::{
- beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics,
- observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError,
+ beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
+ metrics,
+ observed_aggregates::{ObserveOutcome, ObservedAttestationKey},
+ observed_attesters::Error as ObservedAttestersError,
BeaconChain, BeaconChainError, BeaconChainTypes,
};
use bls::verify_signature_sets;
+use itertools::Itertools;
use proto_array::Block as ProtoBlock;
use slog::debug;
use slot_clock::SlotClock;
use state_processing::{
- common::get_indexed_attestation,
- per_block_processing::errors::AttestationValidationError,
+ common::{
+ attesting_indices_base,
+ attesting_indices_electra::{self, get_committee_indices},
+ },
+ per_block_processing::errors::{AttestationValidationError, BlockOperationError},
signature_sets::{
indexed_attestation_signature_set_from_pubkeys,
signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set,
@@ -55,8 +61,9 @@ use std::borrow::Cow;
use strum::AsRefStr;
use tree_hash::TreeHash;
use types::{
- Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, Hash256,
- IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
+ Attestation, AttestationRef, BeaconCommittee, BeaconStateError::NoCommitteeFound, ChainSpec,
+ CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, SelectionProof,
+ SignedAggregateAndProof, Slot, SubnetId,
};
pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations};
@@ -137,6 +144,12 @@ pub enum Error {
///
/// The peer has sent an invalid message.
ValidatorIndexTooHigh(usize),
+ /// The validator index is not set to zero after Electra.
+ ///
+ /// ## Peer scoring
+ ///
+ /// The peer has sent an invalid message.
+ CommitteeIndexNonZero(usize),
/// The `attestation.data.beacon_block_root` block is unknown.
///
/// ## Peer scoring
@@ -185,6 +198,12 @@ pub enum Error {
///
/// The peer has sent an invalid message.
NotExactlyOneAggregationBitSet(usize),
+ /// The attestation doesn't have only one aggregation bit set.
+ ///
+ /// ## Peer scoring
+ ///
+ /// The peer has sent an invalid message.
+ NotExactlyOneCommitteeBitSet(usize),
/// We have already observed an attestation for the `validator_index` and refuse to process
/// another.
///
@@ -248,7 +267,7 @@ pub enum Error {
impl From for Error {
fn from(e: BeaconChainError) -> Self {
- Error::BeaconChainError(e)
+ Self::BeaconChainError(e)
}
}
@@ -263,10 +282,11 @@ enum CheckAttestationSignature {
/// `IndexedAttestation` can be derived.
///
/// These attestations have *not* undergone signature verification.
+/// The `observed_attestation_key_root` is the hashed value of an `ObservedAttestationKey`.
struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> {
signed_aggregate: &'a SignedAggregateAndProof,
indexed_attestation: IndexedAttestation,
- attestation_data_root: Hash256,
+ observed_attestation_key_root: Hash256,
}
/// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can
@@ -274,7 +294,7 @@ struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> {
///
/// These attestations have *not* undergone signature verification.
struct IndexedUnaggregatedAttestation<'a, T: BeaconChainTypes> {
- attestation: &'a Attestation,
+ attestation: AttestationRef<'a, T::EthSpec>,
indexed_attestation: IndexedAttestation,
subnet_id: SubnetId,
validator_index: u64,
@@ -295,7 +315,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
/// Wraps an `Attestation` that has been fully verified for propagation on the gossip network.
pub struct VerifiedUnaggregatedAttestation<'a, T: BeaconChainTypes> {
- attestation: &'a Attestation,
+ attestation: AttestationRef<'a, T::EthSpec>,
indexed_attestation: IndexedAttestation,
subnet_id: SubnetId,
}
@@ -322,20 +342,20 @@ impl<'a, T: BeaconChainTypes> Clone for IndexedUnaggregatedAttestation<'a, T> {
/// A helper trait implemented on wrapper types that can be progressed to a state where they can be
/// verified for application to fork choice.
pub trait VerifiedAttestation: Sized {
- fn attestation(&self) -> &Attestation;
+ fn attestation(&self) -> AttestationRef;
fn indexed_attestation(&self) -> &IndexedAttestation;
// Inefficient default implementation. This is overridden for gossip verified attestations.
fn into_attestation_and_indices(self) -> (Attestation, Vec) {
- let attestation = self.attestation().clone();
- let attesting_indices = self.indexed_attestation().attesting_indices.clone().into();
+ let attestation = self.attestation().clone_as_attestation();
+ let attesting_indices = self.indexed_attestation().attesting_indices_to_vec();
(attestation, attesting_indices)
}
}
impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttestation<'a, T> {
- fn attestation(&self) -> &Attestation {
+ fn attestation(&self) -> AttestationRef {
self.attestation()
}
@@ -345,7 +365,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttes
}
impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregatedAttestation<'a, T> {
- fn attestation(&self) -> &Attestation {
+ fn attestation(&self) -> AttestationRef {
self.attestation
}
@@ -357,7 +377,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregatedAtt
/// Information about invalid attestations which might still be slashable despite being invalid.
pub enum AttestationSlashInfo<'a, T: BeaconChainTypes, TErr> {
/// The attestation is invalid, but its signature wasn't checked.
- SignatureNotChecked(&'a Attestation, TErr),
+ SignatureNotChecked(AttestationRef<'a, T::EthSpec>, TErr),
/// As for `SignatureNotChecked`, but we know the `IndexedAttestation`.
SignatureNotCheckedIndexed(IndexedAttestation, TErr),
/// The attestation's signature is invalid, so it will never be slashable.
@@ -382,7 +402,7 @@ fn process_slash_info(
let (indexed_attestation, check_signature, err) = match slash_info {
SignatureNotChecked(attestation, err) => {
if let Error::UnknownHeadBlock { .. } = err {
- if attestation.data.beacon_block_root == attestation.data.target.root {
+ if attestation.data().beacon_block_root == attestation.data().target.root {
return err;
}
}
@@ -451,7 +471,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
signed_aggregate: &SignedAggregateAndProof,
chain: &BeaconChain,
) -> Result {
- let attestation = &signed_aggregate.message.aggregate;
+ let attestation = signed_aggregate.message().aggregate();
// Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
@@ -460,30 +480,39 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?;
// Check the attestation's epoch matches its target.
- if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch())
- != attestation.data.target.epoch
+ if attestation.data().slot.epoch(T::EthSpec::slots_per_epoch())
+ != attestation.data().target.epoch
{
return Err(Error::InvalidTargetEpoch {
- slot: attestation.data.slot,
- epoch: attestation.data.target.epoch,
+ slot: attestation.data().slot,
+ epoch: attestation.data().target.epoch,
});
}
- // Ensure the valid aggregated attestation has not already been seen locally.
- let attestation_data = &attestation.data;
- let attestation_data_root = attestation_data.tree_hash_root();
+ let observed_attestation_key_root = ObservedAttestationKey {
+ committee_index: attestation
+ .committee_index()
+ .ok_or(Error::NotExactlyOneCommitteeBitSet(0))?,
+ attestation_data: attestation.data().clone(),
+ }
+ .tree_hash_root();
+
+ // [New in Electra:EIP7549]
+ verify_committee_index(attestation)?;
if chain
.observed_attestations
.write()
- .is_known_subset(attestation, attestation_data_root)
+ .is_known_subset(attestation, observed_attestation_key_root)
.map_err(|e| Error::BeaconChainError(e.into()))?
{
metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS);
- return Err(Error::AttestationSupersetKnown(attestation_data_root));
+ return Err(Error::AttestationSupersetKnown(
+ observed_attestation_key_root,
+ ));
}
- let aggregator_index = signed_aggregate.message.aggregator_index;
+ let aggregator_index = signed_aggregate.message().aggregator_index();
// Ensure there has been no other observed aggregate for the given `aggregator_index`.
//
@@ -491,7 +520,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
match chain
.observed_aggregators
.read()
- .validator_has_been_observed(attestation.data.target.epoch, aggregator_index as usize)
+ .validator_has_been_observed(attestation.data().target.epoch, aggregator_index as usize)
{
Ok(true) => Err(Error::AggregatorAlreadyKnown(aggregator_index)),
Ok(false) => Ok(()),
@@ -523,10 +552,10 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
verify_attestation_target_root::(&head_block, attestation)?;
// Ensure that the attestation has participants.
- if attestation.aggregation_bits.is_zero() {
+ if attestation.is_aggregation_bits_zero() {
Err(Error::EmptyAggregationBitfield)
} else {
- Ok(attestation_data_root)
+ Ok(observed_attestation_key_root)
}
}
@@ -536,23 +565,47 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
chain: &BeaconChain,
) -> Result> {
use AttestationSlashInfo::*;
-
- let attestation = &signed_aggregate.message.aggregate;
- let aggregator_index = signed_aggregate.message.aggregator_index;
- let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) {
+ let observed_attestation_key_root = match Self::verify_early_checks(signed_aggregate, chain)
+ {
Ok(root) => root,
- Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)),
+ Err(e) => {
+ return Err(SignatureNotChecked(
+ signed_aggregate.message().aggregate(),
+ e,
+ ))
+ }
};
+ // Committees must be sorted by ascending index order 0..committees_per_slot
let get_indexed_attestation_with_committee =
- |(committee, _): (BeaconCommittee, CommitteesPerSlot)| {
- // Note: this clones the signature which is known to be a relatively slow operation.
- //
- // Future optimizations should remove this clone.
- let selection_proof =
- SelectionProof::from(signed_aggregate.message.selection_proof.clone());
+ |(committees, _): (Vec, CommitteesPerSlot)| {
+ let (index, aggregator_index, selection_proof, data) = match signed_aggregate {
+ SignedAggregateAndProof::Base(signed_aggregate) => (
+ signed_aggregate.message.aggregate.data.index,
+ signed_aggregate.message.aggregator_index,
+ // Note: this clones the signature which is known to be a relatively slow operation.
+ // Future optimizations should remove this clone.
+ signed_aggregate.message.selection_proof.clone(),
+ signed_aggregate.message.aggregate.data.clone(),
+ ),
+ SignedAggregateAndProof::Electra(signed_aggregate) => (
+ signed_aggregate
+ .message
+ .aggregate
+ .committee_index()
+ .ok_or(Error::NotExactlyOneCommitteeBitSet(0))?,
+ signed_aggregate.message.aggregator_index,
+ signed_aggregate.message.selection_proof.clone(),
+ signed_aggregate.message.aggregate.data.clone(),
+ ),
+ };
+ let slot = data.slot;
- if !selection_proof
+ let committee = committees
+ .get(index as usize)
+ .ok_or(Error::NoCommitteeForSlotAndIndex { slot, index })?;
+
+ if !SelectionProof::from(selection_proof)
.is_aggregator(committee.committee.len(), &chain.spec)
.map_err(|e| Error::BeaconChainError(e.into()))?
{
@@ -564,23 +617,44 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
return Err(Error::AggregatorNotInCommittee { aggregator_index });
}
- get_indexed_attestation(committee.committee, attestation)
- .map_err(|e| BeaconChainError::from(e).into())
+ // p2p aggregates have a single committee, we can assert that aggregation_bits is always
+ // less then MaxValidatorsPerCommittee
+ match signed_aggregate {
+ SignedAggregateAndProof::Base(signed_aggregate) => {
+ attesting_indices_base::get_indexed_attestation(
+ committee.committee,
+ &signed_aggregate.message.aggregate,
+ )
+ .map_err(|e| BeaconChainError::from(e).into())
+ }
+ SignedAggregateAndProof::Electra(signed_aggregate) => {
+ attesting_indices_electra::get_indexed_attestation(
+ &committees,
+ &signed_aggregate.message.aggregate,
+ )
+ .map_err(|e| BeaconChainError::from(e).into())
+ }
+ }
};
- let indexed_attestation = match map_attestation_committee(
+ let attestation = signed_aggregate.message().aggregate();
+ let indexed_attestation = match map_attestation_committees(
chain,
attestation,
get_indexed_attestation_with_committee,
) {
Ok(indexed_attestation) => indexed_attestation,
- Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)),
+ Err(e) => {
+ return Err(SignatureNotChecked(
+ signed_aggregate.message().aggregate(),
+ e,
+ ))
+ }
};
-
Ok(IndexedAggregatedAttestation {
signed_aggregate,
indexed_attestation,
- attestation_data_root,
+ observed_attestation_key_root,
})
}
}
@@ -589,11 +663,11 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
/// Run the checks that happen after the indexed attestation and signature have been checked.
fn verify_late_checks(
signed_aggregate: &SignedAggregateAndProof,
- attestation_data_root: Hash256,
+ observed_attestation_key_root: Hash256,
chain: &BeaconChain,
) -> Result<(), Error> {
- let attestation = &signed_aggregate.message.aggregate;
- let aggregator_index = signed_aggregate.message.aggregator_index;
+ let attestation = signed_aggregate.message().aggregate();
+ let aggregator_index = signed_aggregate.message().aggregator_index();
// Observe the valid attestation so we do not re-process it.
//
@@ -602,11 +676,13 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
if let ObserveOutcome::Subset = chain
.observed_attestations
.write()
- .observe_item(attestation, Some(attestation_data_root))
+ .observe_item(attestation, Some(observed_attestation_key_root))
.map_err(|e| Error::BeaconChainError(e.into()))?
{
metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS);
- return Err(Error::AttestationSupersetKnown(attestation_data_root));
+ return Err(Error::AttestationSupersetKnown(
+ observed_attestation_key_root,
+ ));
}
// Observe the aggregator so we don't process another aggregate from them.
@@ -616,12 +692,12 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
if chain
.observed_aggregators
.write()
- .observe_validator(attestation.data.target.epoch, aggregator_index as usize)
+ .observe_validator(attestation.data().target.epoch, aggregator_index as usize)
.map_err(BeaconChainError::from)?
{
return Err(Error::PriorAttestationKnown {
validator_index: aggregator_index,
- epoch: attestation.data.target.epoch,
+ epoch: attestation.data().target.epoch,
});
}
@@ -666,7 +742,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
let IndexedAggregatedAttestation {
signed_aggregate,
indexed_attestation,
- attestation_data_root,
+ observed_attestation_key_root,
} = signed_aggregate;
match check_signature {
@@ -690,7 +766,9 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
CheckAttestationSignature::No => (),
};
- if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) {
+ if let Err(e) =
+ Self::verify_late_checks(signed_aggregate, observed_attestation_key_root, chain)
+ {
return Err(SignatureValid(indexed_attestation, e));
}
@@ -701,8 +779,8 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
}
/// Returns the underlying `attestation` for the `signed_aggregate`.
- pub fn attestation(&self) -> &Attestation {
- &self.signed_aggregate.message.aggregate
+ pub fn attestation(&self) -> AttestationRef<'a, T::EthSpec> {
+ self.signed_aggregate.message().aggregate()
}
/// Returns the underlying `signed_aggregate`.
@@ -714,16 +792,16 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
/// Run the checks that happen before an indexed attestation is constructed.
pub fn verify_early_checks(
- attestation: &Attestation,
+ attestation: AttestationRef,
chain: &BeaconChain,
) -> Result<(), Error> {
- let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
+ let attestation_epoch = attestation.data().slot.epoch(T::EthSpec::slots_per_epoch());
// Check the attestation's epoch matches its target.
- if attestation_epoch != attestation.data.target.epoch {
+ if attestation_epoch != attestation.data().target.epoch {
return Err(Error::InvalidTargetEpoch {
- slot: attestation.data.slot,
- epoch: attestation.data.target.epoch,
+ slot: attestation.data().slot,
+ epoch: attestation.data().target.epoch,
});
}
@@ -735,11 +813,14 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
// Check to ensure that the attestation is "unaggregated". I.e., it has exactly one
// aggregation bit set.
- let num_aggregation_bits = attestation.aggregation_bits.num_set_bits();
+ let num_aggregation_bits = attestation.num_set_aggregation_bits();
if num_aggregation_bits != 1 {
return Err(Error::NotExactlyOneAggregationBitSet(num_aggregation_bits));
}
+ // [New in Electra:EIP7549]
+ verify_committee_index(attestation)?;
+
// Attestations must be for a known block. If the block is unknown, we simply drop the
// attestation and do not delay consideration for later.
//
@@ -755,14 +836,14 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
/// Run the checks that apply to the indexed attestation before the signature is checked.
pub fn verify_middle_checks(
- attestation: &Attestation,
+ attestation: AttestationRef,
indexed_attestation: &IndexedAttestation,
committees_per_slot: u64,
subnet_id: Option,
chain: &BeaconChain,
) -> Result<(u64, SubnetId), Error> {
- let expected_subnet_id = SubnetId::compute_subnet_for_attestation_data::(
- &indexed_attestation.data,
+ let expected_subnet_id = SubnetId::compute_subnet_for_attestation::(
+ attestation,
committees_per_slot,
&chain.spec,
)
@@ -779,8 +860,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
};
let validator_index = *indexed_attestation
- .attesting_indices
- .first()
+ .attesting_indices_first()
.ok_or(Error::NotExactlyOneAggregationBitSet(0))?;
/*
@@ -790,12 +870,12 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
if chain
.observed_gossip_attesters
.read()
- .validator_has_been_observed(attestation.data.target.epoch, validator_index as usize)
+ .validator_has_been_observed(attestation.data().target.epoch, validator_index as usize)
.map_err(BeaconChainError::from)?
{
return Err(Error::PriorAttestationKnown {
validator_index,
- epoch: attestation.data.target.epoch,
+ epoch: attestation.data().target.epoch,
});
}
@@ -812,7 +892,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
subnet_id: Option,
chain: &BeaconChain,
) -> Result {
- Self::verify_slashable(attestation, subnet_id, chain)
+ Self::verify_slashable(attestation.to_ref(), subnet_id, chain)
.map(|verified_unaggregated| {
if let Some(slasher) = chain.slasher.as_ref() {
slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone());
@@ -824,7 +904,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
/// Verify the attestation, producing extra information about whether it might be slashable.
pub fn verify_slashable(
- attestation: &'a Attestation,
+ attestation: AttestationRef<'a, T::EthSpec>,
subnet_id: Option,
chain: &BeaconChain,
) -> Result> {
@@ -873,7 +953,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> {
/// Run the checks that apply after the signature has been checked.
fn verify_late_checks(
- attestation: &Attestation,
+ attestation: AttestationRef,
validator_index: u64,
chain: &BeaconChain,
) -> Result<(), Error> {
@@ -886,12 +966,12 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> {
if chain
.observed_gossip_attesters
.write()
- .observe_validator(attestation.data.target.epoch, validator_index as usize)
+ .observe_validator(attestation.data().target.epoch, validator_index as usize)
.map_err(BeaconChainError::from)?
{
return Err(Error::PriorAttestationKnown {
validator_index,
- epoch: attestation.data.target.epoch,
+ epoch: attestation.data().target.epoch,
});
}
Ok(())
@@ -967,7 +1047,7 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> {
}
/// Returns the wrapped `attestation`.
- pub fn attestation(&self) -> &Attestation {
+ pub fn attestation(&self) -> AttestationRef {
self.attestation
}
@@ -997,34 +1077,34 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> {
/// already finalized.
fn verify_head_block_is_known(
chain: &BeaconChain,
- attestation: &Attestation,
+ attestation: AttestationRef,
max_skip_slots: Option,
) -> Result {
let block_opt = chain
.canonical_head
.fork_choice_read_lock()
- .get_block(&attestation.data.beacon_block_root)
+ .get_block(&attestation.data().beacon_block_root)
.or_else(|| {
chain
.early_attester_cache
- .get_proto_block(attestation.data.beacon_block_root)
+ .get_proto_block(attestation.data().beacon_block_root)
});
if let Some(block) = block_opt {
// Reject any block that exceeds our limit on skipped slots.
if let Some(max_skip_slots) = max_skip_slots {
- if attestation.data.slot > block.slot + max_skip_slots {
+ if attestation.data().slot > block.slot + max_skip_slots {
return Err(Error::TooManySkippedSlots {
head_block_slot: block.slot,
- attestation_slot: attestation.data.slot,
+ attestation_slot: attestation.data().slot,
});
}
}
Ok(block)
- } else if chain.is_pre_finalization_block(attestation.data.beacon_block_root)? {
+ } else if chain.is_pre_finalization_block(attestation.data().beacon_block_root)? {
Err(Error::HeadBlockFinalized {
- beacon_block_root: attestation.data.beacon_block_root,
+ beacon_block_root: attestation.data().beacon_block_root,
})
} else {
// The block is either:
@@ -1034,7 +1114,7 @@ fn verify_head_block_is_known(
// 2) A post-finalization block that we don't know about yet. We'll queue
// the attestation until the block becomes available (or we time out).
Err(Error::UnknownHeadBlock {
- beacon_block_root: attestation.data.beacon_block_root,
+ beacon_block_root: attestation.data().beacon_block_root,
})
}
}
@@ -1045,10 +1125,10 @@ fn verify_head_block_is_known(
/// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
pub fn verify_propagation_slot_range(
slot_clock: &S,
- attestation: &Attestation,
+ attestation: AttestationRef,
spec: &ChainSpec,
) -> Result<(), Error> {
- let attestation_slot = attestation.data.slot;
+ let attestation_slot = attestation.data().slot;
let latest_permissible_slot = slot_clock
.now_with_future_tolerance(spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?;
@@ -1067,14 +1147,13 @@ pub fn verify_propagation_slot_range(
let current_fork =
spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?);
- let earliest_permissible_slot = match current_fork {
- ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => {
- one_epoch_prior
- }
- // EIP-7045
- ForkName::Deneb | ForkName::Electra => one_epoch_prior
+ let earliest_permissible_slot = if !current_fork.deneb_enabled() {
+ one_epoch_prior
+ // EIP-7045
+ } else {
+ one_epoch_prior
.epoch(E::slots_per_epoch())
- .start_slot(E::slots_per_epoch()),
+ .start_slot(E::slots_per_epoch())
};
if attestation_slot < earliest_permissible_slot {
@@ -1102,18 +1181,17 @@ pub fn verify_attestation_signature(
let fork = chain
.spec
- .fork_at_epoch(indexed_attestation.data.target.epoch);
+ .fork_at_epoch(indexed_attestation.data().target.epoch);
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
- &indexed_attestation.signature,
+ indexed_attestation.signature(),
indexed_attestation,
&fork,
chain.genesis_validators_root,
&chain.spec,
)
.map_err(BeaconChainError::SignatureSetError)?;
-
metrics::stop_timer(signature_setup_timer);
let _signature_verification_timer =
@@ -1130,11 +1208,11 @@ pub fn verify_attestation_signature(
/// `attestation.data.beacon_block_root`.
pub fn verify_attestation_target_root(
head_block: &ProtoBlock,
- attestation: &Attestation,
+ attestation: AttestationRef,
) -> Result<(), Error> {
// Check the attestation target root.
let head_block_epoch = head_block.slot.epoch(E::slots_per_epoch());
- let attestation_epoch = attestation.data.slot.epoch(E::slots_per_epoch());
+ let attestation_epoch = attestation.data().slot.epoch(E::slots_per_epoch());
if head_block_epoch > attestation_epoch {
// The epoch references an invalid head block from a future epoch.
//
@@ -1147,7 +1225,7 @@ pub fn verify_attestation_target_root(
// Reference:
// https://github.com/ethereum/eth2.0-specs/pull/2001#issuecomment-699246659
return Err(Error::InvalidTargetRoot {
- attestation: attestation.data.target.root,
+ attestation: attestation.data().target.root,
// It is not clear what root we should expect in this case, since the attestation is
// fundamentally invalid.
expected: None,
@@ -1166,9 +1244,9 @@ pub fn verify_attestation_target_root(
};
// Reject any attestation with an invalid target root.
- if target_root != attestation.data.target.root {
+ if target_root != attestation.data().target.root {
return Err(Error::InvalidTargetRoot {
- attestation: attestation.data.target.root,
+ attestation: attestation.data().target.root,
expected: Some(target_root),
});
}
@@ -1199,14 +1277,14 @@ pub fn verify_signed_aggregate_signatures(
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
- let aggregator_index = signed_aggregate.message.aggregator_index;
+ let aggregator_index = signed_aggregate.message().aggregator_index();
if aggregator_index >= pubkey_cache.len() as u64 {
return Err(Error::AggregatorPubkeyUnknown(aggregator_index));
}
let fork = chain
.spec
- .fork_at_epoch(indexed_attestation.data.target.epoch);
+ .fork_at_epoch(indexed_attestation.data().target.epoch);
let signature_sets = vec![
signed_aggregate_selection_proof_signature_set(
@@ -1227,7 +1305,7 @@ pub fn verify_signed_aggregate_signatures(
.map_err(BeaconChainError::SignatureSetError)?,
indexed_attestation_signature_set_from_pubkeys(
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
- &indexed_attestation.signature,
+ indexed_attestation.signature(),
indexed_attestation,
&fork,
chain.genesis_validators_root,
@@ -1239,6 +1317,28 @@ pub fn verify_signed_aggregate_signatures(
Ok(verify_signature_sets(signature_sets.iter()))
}
+/// Verify that the `attestation` committee index is properly set for the attestation's fork.
+/// This function will only apply verification post-Electra.
+pub fn verify_committee_index(attestation: AttestationRef) -> Result<(), Error> {
+ if let Ok(committee_bits) = attestation.committee_bits() {
+ // Check to ensure that the attestation is for a single committee.
+ let num_committee_bits = get_committee_indices::(committee_bits);
+ if num_committee_bits.len() != 1 {
+ return Err(Error::NotExactlyOneCommitteeBitSet(
+ num_committee_bits.len(),
+ ));
+ }
+
+ // Ensure the attestation index is set to zero post Electra.
+ if attestation.data().index != 0 {
+ return Err(Error::CommitteeIndexNonZero(
+ attestation.data().index as usize,
+ ));
+ }
+ }
+ Ok(())
+}
+
/// Assists in readability.
type CommitteesPerSlot = u64;
@@ -1246,35 +1346,71 @@ type CommitteesPerSlot = u64;
/// public keys cached in the `chain`.
pub fn obtain_indexed_attestation_and_committees_per_slot(
chain: &BeaconChain,
- attestation: &Attestation,
+ attestation: AttestationRef,
) -> Result<(IndexedAttestation, CommitteesPerSlot), Error> {
- map_attestation_committee(chain, attestation, |(committee, committees_per_slot)| {
- get_indexed_attestation(committee.committee, attestation)
- .map(|attestation| (attestation, committees_per_slot))
- .map_err(Error::Invalid)
+ map_attestation_committees(chain, attestation, |(committees, committees_per_slot)| {
+ match attestation {
+ AttestationRef::Base(att) => {
+ let committee = committees
+ .iter()
+ .filter(|&committee| committee.index == att.data.index)
+ .at_most_one()
+ .map_err(|_| Error::NoCommitteeForSlotAndIndex {
+ slot: att.data.slot,
+ index: att.data.index,
+ })?;
+
+ if let Some(committee) = committee {
+ attesting_indices_base::get_indexed_attestation(committee.committee, att)
+ .map(|attestation| (attestation, committees_per_slot))
+ .map_err(Error::Invalid)
+ } else {
+ Err(Error::NoCommitteeForSlotAndIndex {
+ slot: att.data.slot,
+ index: att.data.index,
+ })
+ }
+ }
+ AttestationRef::Electra(att) => {
+ attesting_indices_electra::get_indexed_attestation(&committees, att)
+ .map(|attestation| (attestation, committees_per_slot))
+ .map_err(|e| {
+ if let BlockOperationError::BeaconStateError(NoCommitteeFound(index)) = e {
+ Error::NoCommitteeForSlotAndIndex {
+ slot: att.data.slot,
+ index,
+ }
+ } else {
+ Error::Invalid(e)
+ }
+ })
+ }
+ }
})
}
/// Runs the `map_fn` with the committee and committee count per slot for the given `attestation`.
///
-/// This function exists in this odd "map" pattern because efficiently obtaining the committee for
-/// an attestation can be complex. It might involve reading straight from the
+/// This function exists in this odd "map" pattern because efficiently obtaining the committees for
+/// an attestation's slot can be complex. It might involve reading straight from the
/// `beacon_chain.shuffling_cache` or it might involve reading it from a state from the DB. Due to
/// the complexities of `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here.
///
-/// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state
+/// If the committees for an `attestation`'s slot aren't found in the `shuffling_cache`, we will read a state
/// from disk and then update the `shuffling_cache`.
-fn map_attestation_committee(
+///
+/// Committees are sorted by ascending index order 0..committees_per_slot
+fn map_attestation_committees(
chain: &BeaconChain,
- attestation: &Attestation,
+ attestation: AttestationRef,
map_fn: F,
) -> Result
where
T: BeaconChainTypes,
- F: Fn((BeaconCommittee, CommitteesPerSlot)) -> Result,
+ F: Fn((Vec, CommitteesPerSlot)) -> Result,
{
- let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
- let target = &attestation.data.target;
+ let attestation_epoch = attestation.data().slot.epoch(T::EthSpec::slots_per_epoch());
+ let target = &attestation.data().target;
// Attestation target must be for a known block.
//
@@ -1297,12 +1433,12 @@ where
let committees_per_slot = committee_cache.committees_per_slot();
Ok(committee_cache
- .get_beacon_committee(attestation.data.slot, attestation.data.index)
- .map(|committee| map_fn((committee, committees_per_slot)))
- .unwrap_or_else(|| {
+ .get_beacon_committees_at_slot(attestation.data().slot)
+ .map(|committees| map_fn((committees, committees_per_slot)))
+ .unwrap_or_else(|_| {
Err(Error::NoCommitteeForSlotAndIndex {
- slot: attestation.data.slot,
- index: attestation.data.index,
+ slot: attestation.data().slot,
+ index: attestation.committee_index().unwrap_or(0),
})
}))
})
diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs
index 6aec2bef68..07fad1bd4a 100644
--- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs
+++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs
@@ -66,14 +66,13 @@ where
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?;
let mut signature_sets = Vec::with_capacity(num_indexed * 3);
-
// Iterate, flattening to get only the `Ok` values.
for indexed in indexing_results.iter().flatten() {
let signed_aggregate = &indexed.signed_aggregate;
let indexed_attestation = &indexed.indexed_attestation;
let fork = chain
.spec
- .fork_at_epoch(indexed_attestation.data.target.epoch);
+ .fork_at_epoch(indexed_attestation.data().target.epoch);
signature_sets.push(
signed_aggregate_selection_proof_signature_set(
@@ -98,7 +97,7 @@ where
signature_sets.push(
indexed_attestation_signature_set_from_pubkeys(
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
- &indexed_attestation.signature,
+ indexed_attestation.signature(),
indexed_attestation,
&fork,
chain.genesis_validators_root,
@@ -182,11 +181,11 @@ where
let indexed_attestation = &partially_verified.indexed_attestation;
let fork = chain
.spec
- .fork_at_epoch(indexed_attestation.data.target.epoch);
+ .fork_at_epoch(indexed_attestation.data().target.epoch);
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed),
- &indexed_attestation.signature,
+ indexed_attestation.signature(),
indexed_attestation,
&fork,
chain.genesis_validators_root,
diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs
index 2e07cd32ed..b5012e8e4e 100644
--- a/beacon_node/beacon_chain/src/attester_cache.rs
+++ b/beacon_node/beacon_chain/src/attester_cache.rs
@@ -15,6 +15,7 @@ use state_processing::state_advance::{partial_state_advance, Error as StateAdvan
use std::collections::HashMap;
use std::ops::Range;
use types::{
+ attestation::Error as AttestationError,
beacon_state::{
compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count,
},
@@ -59,6 +60,7 @@ pub enum Error {
InverseRange {
range: Range,
},
+ AttestationError(AttestationError),
}
impl From for Error {
diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs
index 5b70215d22..33567001e3 100644
--- a/beacon_node/beacon_chain/src/beacon_block_reward.rs
+++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs
@@ -202,7 +202,7 @@ impl BeaconChain {
let mut previous_epoch_participation = state.previous_epoch_participation()?.clone();
for attestation in block.body().attestations() {
- let data = &attestation.data;
+ let data = attestation.data();
let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64();
// [Modified in Deneb:EIP7045]
let participation_flag_indices = get_attestation_participation_flag_indices(
diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs
index 77e1bc095e..f1d9ce791e 100644
--- a/beacon_node/beacon_chain/src/beacon_chain.rs
+++ b/beacon_node/beacon_chain/src/beacon_chain.rs
@@ -85,7 +85,9 @@ use futures::channel::mpsc::Sender;
use itertools::process_results;
use itertools::Itertools;
use kzg::Kzg;
-use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella};
+use operation_pool::{
+ CompactAttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella,
+};
use parking_lot::{Mutex, RwLock};
use proto_array::{DoNotReOrg, ProposerHeadError};
use safe_arith::SafeArith;
@@ -1643,14 +1645,49 @@ impl BeaconChain {
Ok((duties, dependent_root, execution_status))
}
+ pub fn get_aggregated_attestation(
+ &self,
+ attestation: AttestationRef,
+ ) -> Result