mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-11 18:04:18 +00:00
Fix wrong columns getting processed on a CGC change (#7792)
This PR fixes a bug where wrong columns could get processed immediately after a CGC increase.
Scenario:
- The node's CGC increased due to additional validators attached to it (lets say from 10 to 11)
- The new CGC is advertised and new subnets are subscribed immediately, however the change won't be effective in the data availability check until the next epoch (See [this](ab0e8870b4/beacon_node/beacon_chain/src/validator_custody.rs (L93-L99))). Data availability checker still only require 10 columns for the current epoch.
- During this time, data columns for the additional custody column (lets say column 11) may arrive via gossip as we're already subscribed to the topic, and it may be incorrectly used to satisfy the existing data availability requirement (10 columns), and result in this additional column (instead of a required one) getting persisted, resulting in database inconsistency.
This commit is contained in:
@@ -31,7 +31,6 @@ use metrics::{inc_counter, TryExt};
|
||||
use mockall_double::double;
|
||||
use ssz_types::FixedVector;
|
||||
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, warn};
|
||||
use types::blob_sidecar::BlobSidecarError;
|
||||
@@ -73,7 +72,7 @@ pub async fn fetch_and_process_engine_blobs<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
|
||||
custody_columns: HashSet<ColumnIndex>,
|
||||
custody_columns: &[ColumnIndex],
|
||||
publish_fn: impl Fn(EngineGetBlobsOutput<T>) + Send + 'static,
|
||||
) -> Result<Option<AvailabilityProcessingStatus>, FetchEngineBlobError> {
|
||||
fetch_and_process_engine_blobs_inner(
|
||||
@@ -92,7 +91,7 @@ async fn fetch_and_process_engine_blobs_inner<T: BeaconChainTypes>(
|
||||
chain_adapter: FetchBlobsBeaconAdapter<T>,
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
|
||||
custody_columns: HashSet<ColumnIndex>,
|
||||
custody_columns: &[ColumnIndex],
|
||||
publish_fn: impl Fn(EngineGetBlobsOutput<T>) + Send + 'static,
|
||||
) -> Result<Option<AvailabilityProcessingStatus>, FetchEngineBlobError> {
|
||||
let versioned_hashes = if let Some(kzg_commitments) = block
|
||||
@@ -238,7 +237,7 @@ async fn fetch_and_process_blobs_v2<T: BeaconChainTypes>(
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
versioned_hashes: Vec<VersionedHash>,
|
||||
custody_columns_indices: HashSet<ColumnIndex>,
|
||||
custody_columns_indices: &[ColumnIndex],
|
||||
publish_fn: impl Fn(EngineGetBlobsOutput<T>) + Send + 'static,
|
||||
) -> Result<Option<AvailabilityProcessingStatus>, FetchEngineBlobError> {
|
||||
let num_expected_blobs = versioned_hashes.len();
|
||||
@@ -337,11 +336,12 @@ async fn compute_custody_columns_to_import<T: BeaconChainTypes>(
|
||||
block: Arc<SignedBeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>>,
|
||||
blobs: Vec<Blob<T::EthSpec>>,
|
||||
proofs: Vec<KzgProofs<T::EthSpec>>,
|
||||
custody_columns_indices: HashSet<ColumnIndex>,
|
||||
custody_columns_indices: &[ColumnIndex],
|
||||
) -> Result<Vec<KzgVerifiedCustodyDataColumn<T::EthSpec>>, FetchEngineBlobError> {
|
||||
let kzg = chain_adapter.kzg().clone();
|
||||
let spec = chain_adapter.spec().clone();
|
||||
let chain_adapter_cloned = chain_adapter.clone();
|
||||
let custody_columns_indices = custody_columns_indices.to_vec();
|
||||
chain_adapter
|
||||
.executor()
|
||||
.spawn_blocking_handle(
|
||||
|
||||
@@ -21,6 +21,7 @@ type T = EphemeralHarnessType<E>;
|
||||
|
||||
mod get_blobs_v2 {
|
||||
use super::*;
|
||||
use types::ColumnIndex;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn test_fetch_blobs_v2_no_blobs_in_block() {
|
||||
@@ -36,12 +37,12 @@ mod get_blobs_v2 {
|
||||
mock_adapter.expect_get_blobs_v2().times(0);
|
||||
mock_adapter.expect_process_engine_blobs().times(0);
|
||||
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
Arc::new(block),
|
||||
custody_columns.clone(),
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -61,12 +62,12 @@ mod get_blobs_v2 {
|
||||
mock_get_blobs_v2_response(&mut mock_adapter, None);
|
||||
|
||||
// Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns.clone(),
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -89,12 +90,12 @@ mod get_blobs_v2 {
|
||||
mock_adapter.expect_process_engine_blobs().times(0);
|
||||
|
||||
// Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns.clone(),
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -122,12 +123,12 @@ mod get_blobs_v2 {
|
||||
mock_adapter.expect_process_engine_blobs().times(0);
|
||||
|
||||
// Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns.clone(),
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -161,12 +162,12 @@ mod get_blobs_v2 {
|
||||
mock_adapter.expect_process_engine_blobs().times(0);
|
||||
|
||||
// **WHEN**: Trigger `fetch_blobs` on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns.clone(),
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -203,12 +204,12 @@ mod get_blobs_v2 {
|
||||
);
|
||||
|
||||
// Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns.clone(),
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -252,6 +253,7 @@ mod get_blobs_v1 {
|
||||
use super::*;
|
||||
use crate::block_verification_types::AsBlock;
|
||||
use std::collections::HashSet;
|
||||
use types::ColumnIndex;
|
||||
|
||||
const ELECTRA_FORK: ForkName = ForkName::Electra;
|
||||
|
||||
@@ -268,12 +270,12 @@ mod get_blobs_v1 {
|
||||
mock_adapter.expect_get_blobs_v1().times(0);
|
||||
|
||||
// WHEN: Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
Arc::new(block_no_blobs),
|
||||
custody_columns,
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -295,12 +297,12 @@ mod get_blobs_v1 {
|
||||
mock_get_blobs_v1_response(&mut mock_adapter, vec![None; expected_blob_count]);
|
||||
|
||||
// WHEN: Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns,
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -341,12 +343,12 @@ mod get_blobs_v1 {
|
||||
);
|
||||
|
||||
// WHEN: Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns,
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -381,12 +383,12 @@ mod get_blobs_v1 {
|
||||
mock_fork_choice_contains_block(&mut mock_adapter, vec![block.canonical_root()]);
|
||||
|
||||
// WHEN: Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns,
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -429,12 +431,12 @@ mod get_blobs_v1 {
|
||||
.returning(move |_, _| Some(all_blob_indices.clone()));
|
||||
|
||||
// **WHEN**: Trigger `fetch_blobs` on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns,
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
@@ -473,12 +475,12 @@ mod get_blobs_v1 {
|
||||
);
|
||||
|
||||
// Trigger fetch blobs on the block
|
||||
let custody_columns = hashset![0, 1, 2];
|
||||
let custody_columns: [ColumnIndex; 3] = [0, 1, 2];
|
||||
let processing_status = fetch_and_process_engine_blobs_inner(
|
||||
mock_adapter,
|
||||
block_root,
|
||||
block,
|
||||
custody_columns,
|
||||
&custody_columns,
|
||||
publish_fn,
|
||||
)
|
||||
.await
|
||||
|
||||
Reference in New Issue
Block a user