Merge branch 'unstable' into deneb-free-blobs

# Conflicts:
#	.github/workflows/docker.yml
#	.github/workflows/local-testnet.yml
#	.github/workflows/test-suite.yml
#	Cargo.lock
#	Cargo.toml
#	beacon_node/beacon_chain/src/beacon_chain.rs
#	beacon_node/beacon_chain/src/builder.rs
#	beacon_node/beacon_chain/src/test_utils.rs
#	beacon_node/execution_layer/src/engine_api/json_structures.rs
#	beacon_node/network/src/beacon_processor/mod.rs
#	beacon_node/network/src/beacon_processor/worker/gossip_methods.rs
#	beacon_node/network/src/sync/backfill_sync/mod.rs
#	beacon_node/store/src/config.rs
#	beacon_node/store/src/hot_cold_store.rs
#	common/eth2_network_config/Cargo.toml
#	consensus/ssz/src/decode/impls.rs
#	consensus/ssz_derive/src/lib.rs
#	consensus/ssz_derive/tests/tests.rs
#	consensus/ssz_types/src/serde_utils/mod.rs
#	consensus/tree_hash/src/impls.rs
#	consensus/tree_hash/src/lib.rs
#	consensus/types/Cargo.toml
#	consensus/types/src/beacon_state.rs
#	consensus/types/src/chain_spec.rs
#	consensus/types/src/eth_spec.rs
#	consensus/types/src/fork_name.rs
#	lcli/Cargo.toml
#	lcli/src/main.rs
#	lcli/src/new_testnet.rs
#	scripts/local_testnet/el_bootnode.sh
#	scripts/local_testnet/genesis.json
#	scripts/local_testnet/geth.sh
#	scripts/local_testnet/setup.sh
#	scripts/local_testnet/start_local_testnet.sh
#	scripts/local_testnet/vars.env
#	scripts/tests/doppelganger_protection.sh
#	scripts/tests/genesis.json
#	scripts/tests/vars.env
#	testing/ef_tests/Cargo.toml
#	validator_client/src/block_service.rs
This commit is contained in:
Jimmy Chen
2023-05-30 11:26:33 +10:00
333 changed files with 5930 additions and 13386 deletions

View File

@@ -56,6 +56,7 @@ use logging::TimeLatch;
use slog::{crit, debug, error, trace, warn, Logger};
use std::collections::VecDeque;
use std::future::Future;
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::{Arc, Weak};
use std::task::Context;
@@ -1069,6 +1070,13 @@ impl<T: BeaconChainTypes> Stream for InboundEvents<T> {
}
}
/// Defines if and where we will store the SSZ files of invalid blocks.
#[derive(Clone)]
pub enum InvalidBlockStorage {
Enabled(PathBuf),
Disabled,
}
/// A mutli-threaded processor for messages received on the network
/// that need to be processed by the `BeaconChain`
///
@@ -1082,6 +1090,7 @@ pub struct BeaconProcessor<T: BeaconChainTypes> {
pub max_workers: usize,
pub current_workers: usize,
pub importing_blocks: DuplicateCache,
pub invalid_block_storage: InvalidBlockStorage,
pub log: Logger,
}
@@ -1783,19 +1792,23 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
peer_client,
block,
seen_timestamp,
} => task_spawner.spawn_async(async move {
worker
.process_gossip_block(
message_id,
peer_id,
peer_client,
block.into(),
work_reprocessing_tx,
duplicate_cache,
seen_timestamp,
)
.await
}),
} => {
let invalid_block_storage = self.invalid_block_storage.clone();
task_spawner.spawn_async(async move {
worker
.process_gossip_block(
message_id,
peer_id,
peer_client,
block.into(),
work_reprocessing_tx,
duplicate_cache,
invalid_block_storage,
seen_timestamp,
)
.await
})
}
/*
* Verification for blobs sidecars received on gossip.
*/
@@ -1825,12 +1838,16 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
peer_id,
block,
seen_timestamp,
} => task_spawner.spawn_async(worker.process_gossip_verified_block(
peer_id,
*block,
work_reprocessing_tx,
seen_timestamp,
)),
} => {
let invalid_block_storage = self.invalid_block_storage.clone();
task_spawner.spawn_async(worker.process_gossip_verified_block(
peer_id,
*block,
work_reprocessing_tx,
invalid_block_storage,
seen_timestamp,
))
}
/*
* Voluntary exits received on gossip.
*/

View File

@@ -203,6 +203,7 @@ impl TestRig {
max_workers: cmp::max(1, num_cpus::get()),
current_workers: 0,
importing_blocks: duplicate_cache.clone(),
invalid_block_storage: InvalidBlockStorage::Disabled,
log: log.clone(),
}
.spawn_manager(beacon_processor_rx, Some(work_journal_tx));

View File

@@ -14,17 +14,20 @@ use beacon_chain::{
};
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
use operation_pool::ReceivedPreCapella;
use slog::{crit, debug, error, info, trace, warn};
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
use std::fs;
use std::io::Write;
use std::path::PathBuf;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::hot_cold_store::HotColdDBError;
use tokio::sync::mpsc;
use types::{
Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBlobSidecar,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId,
SyncCommitteeMessage, SyncSubnetId,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedBlobSidecar, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit,
Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId,
};
use super::{
@@ -34,7 +37,7 @@ use super::{
},
Worker,
};
use crate::beacon_processor::DuplicateCache;
use crate::beacon_processor::{DuplicateCache, InvalidBlockStorage};
/// Set to `true` to introduce stricter penalties for peers who send some types of late consensus
/// messages.
@@ -796,6 +799,7 @@ impl<T: BeaconChainTypes> Worker<T> {
block: BlockWrapper<T::EthSpec>,
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
duplicate_cache: DuplicateCache,
invalid_block_storage: InvalidBlockStorage,
seen_duration: Duration,
) {
if let Some(gossip_verified_block) = self
@@ -816,6 +820,7 @@ impl<T: BeaconChainTypes> Worker<T> {
peer_id,
gossip_verified_block,
reprocess_tx,
invalid_block_storage,
seen_duration,
)
.await;
@@ -1082,13 +1087,14 @@ impl<T: BeaconChainTypes> Worker<T> {
peer_id: PeerId,
verified_block: GossipVerifiedBlock<T>,
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
invalid_block_storage: InvalidBlockStorage,
// This value is not used presently, but it might come in handy for debugging.
_seen_duration: Duration,
) {
let block = verified_block.block.block_cloned();
let block_root = verified_block.block_root;
match self
let result = self
.chain
.process_block(
block_root,
@@ -1096,14 +1102,15 @@ impl<T: BeaconChainTypes> Worker<T> {
CountUnrealized::True,
NotifyExecutionLayer::Yes,
)
.await
{
.await;
match &result {
Ok(AvailabilityProcessingStatus::Imported(block_root)) => {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL);
if reprocess_tx
.try_send(ReprocessQueueMessage::BlockImported {
block_root,
block_root: *block_root,
parent_root: block.message().parent_root(),
})
.is_err()
@@ -1137,7 +1144,7 @@ impl<T: BeaconChainTypes> Worker<T> {
// make rpc request for blob
self.send_sync_message(SyncMessage::UnknownBlobHash {
peer_id,
pending_blobs,
pending_blobs: pending_blobs.to_vec(),
});
}
Err(BlockError::AvailabilityCheck(_)) => {
@@ -1151,7 +1158,11 @@ impl<T: BeaconChainTypes> Worker<T> {
"Block with unknown parent attempted to be processed";
"peer_id" => %peer_id
);
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root));
self.send_sync_message(SyncMessage::UnknownBlock(
peer_id,
block.clone(),
block_root,
));
}
Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => {
debug!(
@@ -1180,6 +1191,16 @@ impl<T: BeaconChainTypes> Worker<T> {
);
}
};
if let Err(e) = &result {
self.maybe_store_invalid_block(
&invalid_block_storage,
block_root,
&block,
e,
&self.log,
);
}
}
pub fn process_gossip_voluntary_exit(
@@ -2487,6 +2508,25 @@ impl<T: BeaconChainTypes> Worker<T> {
"peer_id" => %peer_id,
"type" => ?message_type,
);
// Do not penalize the peer.
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
return;
}
SyncCommitteeError::PriorSyncContributionMessageKnown { .. } => {
/*
* We have already seen a sync contribution message from this validator for this epoch.
*
* The peer is not necessarily faulty.
*/
debug!(
self.log,
"Prior sync contribution message known";
"peer_id" => %peer_id,
"type" => ?message_type,
);
// We still penalize the peer slightly. We don't want this to be a recurring
// behaviour.
self.gossip_penalize_peer(
@@ -2651,4 +2691,62 @@ impl<T: BeaconChainTypes> Worker<T> {
self.propagate_if_timely(is_timely, message_id, peer_id)
}
/// Stores a block as a SSZ file, if and where `invalid_block_storage` dictates.
fn maybe_store_invalid_block(
&self,
invalid_block_storage: &InvalidBlockStorage,
block_root: Hash256,
block: &SignedBeaconBlock<T::EthSpec>,
error: &BlockError<T::EthSpec>,
log: &Logger,
) {
if let InvalidBlockStorage::Enabled(base_dir) = invalid_block_storage {
let block_path = base_dir.join(format!("{}_{:?}.ssz", block.slot(), block_root));
let error_path = base_dir.join(format!("{}_{:?}.error", block.slot(), block_root));
let write_file = |path: PathBuf, bytes: &[u8]| {
// No need to write the same file twice. For the error file,
// this means that we'll remember the first error message but
// forget the rest.
if path.exists() {
return;
}
// Write to the file.
let write_result = fs::OpenOptions::new()
// Only succeed if the file doesn't already exist. We should
// have checked for this earlier.
.create_new(true)
.write(true)
.open(&path)
.map_err(|e| format!("Failed to open file: {:?}", e))
.map(|mut file| {
file.write_all(bytes)
.map_err(|e| format!("Failed to write file: {:?}", e))
});
if let Err(e) = write_result {
error!(
log,
"Failed to store invalid block/error";
"error" => e,
"path" => ?path,
"root" => ?block_root,
"slot" => block.slot(),
)
} else {
info!(
log,
"Stored invalid block/error ";
"path" => ?path,
"root" => ?block_root,
"slot" => block.slot(),
)
}
};
write_file(block_path, &block.as_ssz_bytes());
write_file(error_path, error.to_string().as_bytes());
}
}
}

View File

@@ -116,10 +116,26 @@ impl<T: BeaconChainTypes> Worker<T> {
}
};
// Returns `true` if the block is already known to fork choice. Notably,
// this will return `false` for blocks that we've already imported but
// ancestors of the finalized checkpoint. That should not be an issue
// for our use here since finalized blocks will always be late and won't
// be requeued anyway.
let block_is_already_known = || {
self.chain
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
};
// If we've already seen a block from this proposer *and* the block
// arrived before the attestation deadline, requeue it to ensure it is
// imported late enough that it won't receive a proposer boost.
if !block_is_late && proposal_already_known() {
//
// Don't requeue blocks if they're already known to fork choice, just
// push them through to block processing so they can be handled through
// the normal channels.
if !block_is_late && proposal_already_known() && !block_is_already_known() {
debug!(
self.log,
"Delaying processing of duplicate RPC block";