mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-03 00:31:50 +00:00
Merge remote-tracking branch 'origin/stable' into unstable
This commit is contained in:
@@ -804,6 +804,16 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
// The peer is faulty if they bad signatures.
|
||||
Some(PeerAction::LowToleranceError)
|
||||
}
|
||||
HistoricalBlockError::MissingOldestBlockRoot { slot } => {
|
||||
warn!(
|
||||
%slot,
|
||||
error = "missing_oldest_block_root",
|
||||
"Backfill batch processing error"
|
||||
);
|
||||
// This is an internal error, do not penalize the peer.
|
||||
None
|
||||
}
|
||||
|
||||
HistoricalBlockError::ValidatorPubkeyCacheTimeout => {
|
||||
warn!(
|
||||
error = "pubkey_cache_timeout",
|
||||
|
||||
@@ -12,6 +12,7 @@ use futures::future::OptionFuture;
|
||||
use futures::prelude::*;
|
||||
|
||||
use lighthouse_network::Enr;
|
||||
use lighthouse_network::identity::Keypair;
|
||||
use lighthouse_network::rpc::InboundRequestId;
|
||||
use lighthouse_network::rpc::RequestType;
|
||||
use lighthouse_network::rpc::methods::RpcResponse;
|
||||
@@ -212,6 +213,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
executor: task_executor::TaskExecutor,
|
||||
libp2p_registry: Option<&'_ mut Registry>,
|
||||
beacon_processor_send: BeaconProcessorSend<T::EthSpec>,
|
||||
local_keypair: Keypair,
|
||||
) -> Result<
|
||||
(
|
||||
NetworkService<T>,
|
||||
@@ -284,6 +286,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
.data_availability_checker
|
||||
.custody_context()
|
||||
.custody_group_count_at_head(&beacon_chain.spec),
|
||||
local_keypair,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -366,6 +369,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
executor: task_executor::TaskExecutor,
|
||||
libp2p_registry: Option<&'_ mut Registry>,
|
||||
beacon_processor_send: BeaconProcessorSend<T::EthSpec>,
|
||||
local_keypair: Keypair,
|
||||
) -> Result<(Arc<NetworkGlobals<T::EthSpec>>, NetworkSenders<T::EthSpec>), String> {
|
||||
let (network_service, network_globals, network_senders) = Self::build(
|
||||
beacon_chain,
|
||||
@@ -373,6 +377,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
||||
executor.clone(),
|
||||
libp2p_registry,
|
||||
beacon_processor_send,
|
||||
local_keypair,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ use beacon_chain::BeaconChainTypes;
|
||||
use beacon_chain::test_utils::BeaconChainHarness;
|
||||
use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig};
|
||||
use futures::StreamExt;
|
||||
use lighthouse_network::identity::secp256k1;
|
||||
use lighthouse_network::types::{GossipEncoding, GossipKind};
|
||||
use lighthouse_network::{Enr, GossipTopic};
|
||||
use std::str::FromStr;
|
||||
@@ -66,6 +67,7 @@ fn test_dht_persistence() {
|
||||
executor,
|
||||
None,
|
||||
beacon_processor_tx,
|
||||
secp256k1::Keypair::generate().into(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -134,6 +136,7 @@ fn test_removing_topic_weight_on_old_topics() {
|
||||
executor.clone(),
|
||||
None,
|
||||
beacon_processor_channels.beacon_processor_tx,
|
||||
secp256k1::Keypair::generate().into(),
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use super::*;
|
||||
use beacon_chain::test_utils::generate_data_column_indices_rand_order;
|
||||
use beacon_chain::{
|
||||
BeaconChain,
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
@@ -73,6 +74,9 @@ impl TestBeaconChain {
|
||||
Duration::from_secs(recent_genesis_time()),
|
||||
Duration::from_millis(SLOT_DURATION_MILLIS),
|
||||
))
|
||||
.ordered_custody_column_indices(generate_data_column_indices_rand_order::<
|
||||
MainnetEthSpec,
|
||||
>())
|
||||
.shutdown_sender(shutdown_tx)
|
||||
.rng(Box::new(StdRng::seed_from_u64(42)))
|
||||
.build()
|
||||
|
||||
@@ -268,8 +268,8 @@ impl<T: BeaconChainTypes> RangeDataColumnBatchRequest<T> {
|
||||
|
||||
let received_columns = columns.iter().map(|c| c.index).collect::<HashSet<_>>();
|
||||
|
||||
let missing_columns = received_columns
|
||||
.difference(expected_custody_columns)
|
||||
let missing_columns = expected_custody_columns
|
||||
.difference(&received_columns)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
// blobs are expected for this slot but there is at least one missing columns
|
||||
|
||||
Reference in New Issue
Block a user