mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-16 11:22:56 +00:00
Fix wrong columns getting processed on a CGC change (#7792)
This PR fixes a bug where wrong columns could get processed immediately after a CGC increase.
Scenario:
- The node's CGC increased due to additional validators attached to it (lets say from 10 to 11)
- The new CGC is advertised and new subnets are subscribed immediately, however the change won't be effective in the data availability check until the next epoch (See [this](ab0e8870b4/beacon_node/beacon_chain/src/validator_custody.rs (L93-L99))). Data availability checker still only require 10 columns for the current epoch.
- During this time, data columns for the additional custody column (lets say column 11) may arrive via gossip as we're already subscribed to the topic, and it may be incorrectly used to satisfy the existing data availability requirement (10 columns), and result in this additional column (instead of a required one) getting persisted, resulting in database inconsistency.
This commit is contained in:
@@ -159,7 +159,7 @@ impl<E: EthSpec> PendingComponents<E> {
|
||||
pub fn make_available<R>(
|
||||
&mut self,
|
||||
spec: &Arc<ChainSpec>,
|
||||
num_expected_columns: u64,
|
||||
num_expected_columns: usize,
|
||||
recover: R,
|
||||
) -> Result<Option<AvailableExecutedBlock<E>>, AvailabilityCheckError>
|
||||
where
|
||||
@@ -173,7 +173,6 @@ impl<E: EthSpec> PendingComponents<E> {
|
||||
};
|
||||
|
||||
let num_expected_blobs = block.num_blobs_expected();
|
||||
let num_expected_columns = num_expected_columns as usize;
|
||||
let blob_data = if num_expected_blobs == 0 {
|
||||
Some(AvailableBlockData::NoData)
|
||||
} else if spec.is_peer_das_enabled_for_epoch(block.epoch()) {
|
||||
@@ -311,7 +310,7 @@ impl<E: EthSpec> PendingComponents<E> {
|
||||
pub fn status_str(
|
||||
&self,
|
||||
block_epoch: Epoch,
|
||||
num_expected_columns: Option<u64>,
|
||||
num_expected_columns: Option<usize>,
|
||||
spec: &ChainSpec,
|
||||
) -> String {
|
||||
let block_count = if self.executed_block.is_some() { 1 } else { 0 };
|
||||
@@ -348,7 +347,7 @@ pub struct DataAvailabilityCheckerInner<T: BeaconChainTypes> {
|
||||
/// This cache holds a limited number of states in memory and reconstructs them
|
||||
/// from disk when necessary. This is necessary until we merge tree-states
|
||||
state_cache: StateLRUCache<T>,
|
||||
custody_context: Arc<CustodyContext>,
|
||||
custody_context: Arc<CustodyContext<T::EthSpec>>,
|
||||
spec: Arc<ChainSpec>,
|
||||
}
|
||||
|
||||
@@ -365,7 +364,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
|
||||
pub fn new(
|
||||
capacity: NonZeroUsize,
|
||||
beacon_store: BeaconStore<T>,
|
||||
custody_context: Arc<CustodyContext>,
|
||||
custody_context: Arc<CustodyContext<T::EthSpec>>,
|
||||
spec: Arc<ChainSpec>,
|
||||
) -> Result<Self, AvailabilityCheckError> {
|
||||
Ok(Self {
|
||||
@@ -482,7 +481,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
|
||||
if let Some(available_block) = pending_components.make_available(
|
||||
&self.spec,
|
||||
self.custody_context
|
||||
.num_of_data_columns_to_sample(Some(epoch), &self.spec),
|
||||
.num_of_data_columns_to_sample(epoch, &self.spec),
|
||||
|block| self.state_cache.recover_pending_executed_block(block),
|
||||
)? {
|
||||
// We keep the pending components in the availability cache during block import (#5845).
|
||||
@@ -508,10 +507,11 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
|
||||
.peek()
|
||||
.map(|verified_blob| verified_blob.as_data_column().epoch())
|
||||
else {
|
||||
// Verified data_columns list should be non-empty.
|
||||
return Err(AvailabilityCheckError::Unexpected(
|
||||
"empty columns".to_owned(),
|
||||
));
|
||||
// No columns are processed. This can occur if all received columns were filtered out
|
||||
// before this point, e.g. due to a CGC change that caused extra columns to be downloaded
|
||||
// // before the new CGC took effect.
|
||||
// Return `Ok` without marking the block as available.
|
||||
return Ok(Availability::MissingComponents(block_root));
|
||||
};
|
||||
|
||||
let mut write_lock = self.critical.write();
|
||||
@@ -529,7 +529,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
|
||||
|
||||
let num_expected_columns = self
|
||||
.custody_context
|
||||
.num_of_data_columns_to_sample(Some(epoch), &self.spec);
|
||||
.num_of_data_columns_to_sample(epoch, &self.spec);
|
||||
debug!(
|
||||
component = "data_columns",
|
||||
?block_root,
|
||||
@@ -627,7 +627,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
|
||||
|
||||
let num_expected_columns = self
|
||||
.custody_context
|
||||
.num_of_data_columns_to_sample(Some(epoch), &self.spec);
|
||||
.num_of_data_columns_to_sample(epoch, &self.spec);
|
||||
debug!(
|
||||
component = "block",
|
||||
?block_root,
|
||||
|
||||
Reference in New Issue
Block a user