mirror of
https://github.com/sigp/lighthouse.git
synced 2026-05-01 11:43:44 +00:00
More deneb cleanup (#4640)
* remove protoc and token from network tests github action
* delete unused beacon chain methods
* downgrade writing blobs to store log
* reduce diff in block import logic
* remove some todo's and deneb built in network
* remove unnecessary error, actually use some added metrics
* remove some metrics, fix missing components on publish funcitonality
* fix status tests
* rename sidecar by root to blobs by root
* clean up some metrics
* remove unnecessary feature gate from attestation subnet tests, clean up blobs by range response code
* pawan's suggestion in `protocol_info`, peer score in matching up batch sync block and blobs
* fix range tests for deneb
* pub block and blob db cache behind the same mutex
* remove unused errs and an empty file
* move sidecar trait to new file
* move types from payload to eth2 crate
* update comment and add flag value name
* make function private again, remove allow unused
* use reth rlp for tx decoding
* fix compile after merge
* rename kzg commitments
* cargo fmt
* remove unused dep
* Update beacon_node/execution_layer/src/lib.rs
Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
* Update beacon_node/beacon_processor/src/lib.rs
Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
* pawan's suggestiong for vec capacity
* cargo fmt
* Revert "use reth rlp for tx decoding"
This reverts commit 5181837d81.
* remove reth rlp
---------
Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
This commit is contained in:
@@ -71,10 +71,6 @@ lazy_static! {
|
||||
"beacon_processor_gossip_blob_verified_total",
|
||||
"Total number of gossip blob verified for propagation."
|
||||
);
|
||||
pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_processor_gossip_blob_imported_total",
|
||||
"Total number of gossip blobs imported to fork choice, etc."
|
||||
);
|
||||
// Gossip Exits.
|
||||
pub static ref BEACON_PROCESSOR_EXIT_VERIFIED_TOTAL: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_processor_exit_verified_total",
|
||||
@@ -120,10 +116,6 @@ lazy_static! {
|
||||
"beacon_processor_rpc_block_imported_total",
|
||||
"Total number of gossip blocks imported to fork choice, etc."
|
||||
);
|
||||
pub static ref BEACON_PROCESSOR_RPC_BLOB_IMPORTED_TOTAL: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_processor_rpc_blob_imported_total",
|
||||
"Total number of gossip blobs imported."
|
||||
);
|
||||
// Chain segments.
|
||||
pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_processor_chain_segment_success_total",
|
||||
|
||||
@@ -621,6 +621,20 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
.verify_blob_sidecar_for_gossip(signed_blob, blob_index)
|
||||
{
|
||||
Ok(gossip_verified_blob) => {
|
||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL);
|
||||
|
||||
if delay >= self.chain.slot_clock.unagg_attestation_production_delay() {
|
||||
metrics::inc_counter(&metrics::BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL);
|
||||
debug!(
|
||||
self.log,
|
||||
"Gossip blob arrived late";
|
||||
"block_root" => ?gossip_verified_blob.block_root(),
|
||||
"proposer_index" => gossip_verified_blob.proposer_index(),
|
||||
"slot" => gossip_verified_blob.slot(),
|
||||
"delay" => ?delay,
|
||||
);
|
||||
}
|
||||
|
||||
debug!(
|
||||
self.log,
|
||||
"Successfully verified gossip blob";
|
||||
@@ -628,8 +642,20 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
"root" => %root,
|
||||
"index" => %index
|
||||
);
|
||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL);
|
||||
|
||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
|
||||
|
||||
// Log metrics to keep track of propagation delay times.
|
||||
if let Some(duration) = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.ok()
|
||||
.and_then(|now| now.checked_sub(seen_duration))
|
||||
{
|
||||
metrics::observe_duration(
|
||||
&metrics::BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME,
|
||||
duration,
|
||||
);
|
||||
}
|
||||
self.process_gossip_verified_blob(peer_id, gossip_verified_blob, seen_duration)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio_stream::StreamExt;
|
||||
use types::blob_sidecar::BlobIdentifier;
|
||||
use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot};
|
||||
use types::{
|
||||
light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, ForkName, Hash256, Slot,
|
||||
};
|
||||
|
||||
impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
/* Auxiliary functions */
|
||||
@@ -376,13 +378,19 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
);
|
||||
|
||||
// Should not send more than max request blocks
|
||||
// TODO: We should switch the limit to `MAX_REQUEST_BLOCKS` at the fork,
|
||||
// or maybe consider switching the max value given the fork context.
|
||||
if *req.count() > MAX_REQUEST_BLOCKS_DENEB {
|
||||
let max_request_size = self.chain.epoch().map_or(MAX_REQUEST_BLOCKS, |epoch| {
|
||||
match self.chain.spec.fork_name_at_epoch(epoch) {
|
||||
ForkName::Deneb => MAX_REQUEST_BLOCKS_DENEB,
|
||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||
MAX_REQUEST_BLOCKS
|
||||
}
|
||||
}
|
||||
});
|
||||
if *req.count() > max_request_size {
|
||||
return self.send_error_response(
|
||||
peer_id,
|
||||
RPCResponseErrorCode::InvalidRequest,
|
||||
"Request exceeded `MAX_REQUEST_BLOCKS_DENEB`".into(),
|
||||
format!("Request exceeded max size {max_request_size}"),
|
||||
request_id,
|
||||
);
|
||||
}
|
||||
@@ -425,17 +433,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
};
|
||||
|
||||
// Pick out the required blocks, ignoring skip-slots.
|
||||
let mut last_block_root = req
|
||||
.start_slot()
|
||||
.checked_sub(1)
|
||||
.map(|prev_slot| {
|
||||
self.chain
|
||||
.block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev)
|
||||
})
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
.flatten();
|
||||
let mut last_block_root = None;
|
||||
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
|
||||
iter.take_while(|(_, slot)| {
|
||||
slot.as_u64() < req.start_slot().saturating_add(*req.count())
|
||||
@@ -714,17 +712,12 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
};
|
||||
|
||||
// Pick out the required blocks, ignoring skip-slots.
|
||||
let mut last_block_root = req
|
||||
.start_slot
|
||||
.checked_sub(1)
|
||||
.map(|prev_slot| {
|
||||
self.chain
|
||||
.block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev)
|
||||
})
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
.flatten();
|
||||
let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| {
|
||||
self.chain
|
||||
.block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev)
|
||||
.ok()
|
||||
.flatten()
|
||||
});
|
||||
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
|
||||
iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count))
|
||||
// map skip slots to None
|
||||
|
||||
@@ -151,7 +151,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
}
|
||||
|
||||
/// Return count of all currently subscribed subnets (long-lived **and** short-lived).
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
#[cfg(test)]
|
||||
pub fn subscription_count(&self) -> usize {
|
||||
if self.subscribe_all_subnets {
|
||||
self.beacon_chain.spec.attestation_subnet_count as usize
|
||||
@@ -167,7 +167,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
}
|
||||
|
||||
/// Returns whether we are subscribed to a subnet for testing purposes.
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
#[cfg(test)]
|
||||
pub(crate) fn is_subscribed(
|
||||
&self,
|
||||
subnet_id: &SubnetId,
|
||||
@@ -179,7 +179,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
#[cfg(test)]
|
||||
pub(crate) fn long_lived_subscriptions(&self) -> &HashSet<SubnetId> {
|
||||
&self.long_lived_subscriptions
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ impl<T: BeaconChainTypes> SyncCommitteeService<T> {
|
||||
}
|
||||
|
||||
/// Return count of all currently subscribed subnets.
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
#[cfg(test)]
|
||||
pub fn subscription_count(&self) -> usize {
|
||||
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
if self.subscribe_all_subnets {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![cfg(feature = "spec-mainnet")]
|
||||
use super::*;
|
||||
use beacon_chain::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
|
||||
@@ -21,8 +21,8 @@ use types::beacon_block_body::to_block_kzg_commitments;
|
||||
use types::{
|
||||
map_fork_name, map_fork_name_with,
|
||||
test_utils::{SeedableRng, TestRandom, XorShiftRng},
|
||||
BeaconBlock, BlobSidecar, BlobsBundle, EthSpec, ForkName, FullPayloadDeneb,
|
||||
MinimalEthSpec as E, SignedBeaconBlock,
|
||||
BeaconBlock, BlobSidecar, EthSpec, ForkName, FullPayloadDeneb, MinimalEthSpec as E,
|
||||
SignedBeaconBlock,
|
||||
};
|
||||
|
||||
type T = Witness<ManualSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
|
||||
@@ -127,7 +127,7 @@ impl TestRig {
|
||||
message.body.blob_kzg_commitments =
|
||||
to_block_kzg_commitments::<E>(bundle.commitments.clone());
|
||||
|
||||
let BlobsBundle {
|
||||
let eth2::types::BlobsBundle {
|
||||
commitments,
|
||||
proofs,
|
||||
blobs,
|
||||
|
||||
@@ -1108,8 +1108,12 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
self.log, "Blocks and blobs request for range received invalid data";
|
||||
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
|
||||
);
|
||||
// TODO: penalize the peer for being a bad boy
|
||||
let id = RequestId::RangeBlockAndBlobs { id };
|
||||
self.network.report_peer(
|
||||
peer_id,
|
||||
PeerAction::MidToleranceError,
|
||||
"block_blob_faulty_batch",
|
||||
);
|
||||
self.inject_error(peer_id, id, RPCError::InvalidData(e.into()))
|
||||
}
|
||||
}
|
||||
@@ -1160,8 +1164,12 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
self.log, "Blocks and blobs request for backfill received invalid data";
|
||||
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
|
||||
);
|
||||
// TODO: penalize the peer for being a bad boy
|
||||
let id = RequestId::BackFillBlockAndBlobs { id };
|
||||
self.network.report_peer(
|
||||
peer_id,
|
||||
PeerAction::MidToleranceError,
|
||||
"block_blob_faulty_backfill_batch",
|
||||
);
|
||||
self.inject_error(peer_id, id, RPCError::InvalidData(e.into()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -537,7 +537,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
&self.network_beacon_processor
|
||||
}
|
||||
|
||||
pub(crate) fn next_id(&mut self) -> Id {
|
||||
pub fn next_id(&mut self) -> Id {
|
||||
let id = self.request_id;
|
||||
self.request_id += 1;
|
||||
id
|
||||
@@ -545,7 +545,6 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
|
||||
/// Check whether a batch for this epoch (and only this epoch) should request just blocks or
|
||||
/// blocks and blobs.
|
||||
#[allow(unused)]
|
||||
pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType {
|
||||
// Induces a compile time panic if this doesn't hold true.
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
@@ -555,12 +554,6 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
"To deal with alignment with deneb boundaries, batches need to be of just one epoch"
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
{
|
||||
// Keep tests only for blocks.
|
||||
ByRangeRequestType::Blocks
|
||||
}
|
||||
#[cfg(not(test))]
|
||||
if let Some(data_availability_boundary) = self.chain.data_availability_boundary() {
|
||||
if epoch >= data_availability_boundary {
|
||||
ByRangeRequestType::BlocksAndBlobs
|
||||
|
||||
@@ -384,14 +384,13 @@ mod tests {
|
||||
use crate::NetworkMessage;
|
||||
|
||||
use super::*;
|
||||
use crate::sync::network_context::BlockOrBlob;
|
||||
use beacon_chain::builder::Witness;
|
||||
use beacon_chain::eth1_chain::CachingEth1Backend;
|
||||
use beacon_chain::parking_lot::RwLock;
|
||||
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
|
||||
use beacon_chain::EngineState;
|
||||
use beacon_processor::WorkEvent as BeaconWorkEvent;
|
||||
use lighthouse_network::rpc::BlocksByRangeRequest;
|
||||
use lighthouse_network::Request;
|
||||
use lighthouse_network::{rpc::StatusMessage, NetworkGlobals};
|
||||
use slog::{o, Drain};
|
||||
use slot_clock::TestingSlotClock;
|
||||
@@ -399,7 +398,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
use store::MemoryStore;
|
||||
use tokio::sync::mpsc;
|
||||
use types::{Hash256, MinimalEthSpec as E};
|
||||
use types::{ForkName, Hash256, MinimalEthSpec as E};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FakeStorage {
|
||||
@@ -515,18 +514,39 @@ mod tests {
|
||||
|
||||
/// Reads an BlocksByRange request to a given peer from the network receiver channel.
|
||||
#[track_caller]
|
||||
fn grab_request(&mut self, expected_peer: &PeerId) -> (RequestId, BlocksByRangeRequest) {
|
||||
if let Ok(NetworkMessage::SendRequest {
|
||||
fn grab_request(
|
||||
&mut self,
|
||||
expected_peer: &PeerId,
|
||||
fork_name: ForkName,
|
||||
) -> (RequestId, Option<RequestId>) {
|
||||
let block_req_id = if let Ok(NetworkMessage::SendRequest {
|
||||
peer_id,
|
||||
request: Request::BlocksByRange(request),
|
||||
request: _,
|
||||
request_id,
|
||||
}) = self.network_rx.try_recv()
|
||||
{
|
||||
assert_eq!(&peer_id, expected_peer);
|
||||
(request_id, request)
|
||||
request_id
|
||||
} else {
|
||||
panic!("Should have sent a batch request to the peer")
|
||||
}
|
||||
};
|
||||
let blob_req_id = match fork_name {
|
||||
ForkName::Deneb => {
|
||||
if let Ok(NetworkMessage::SendRequest {
|
||||
peer_id,
|
||||
request: _,
|
||||
request_id,
|
||||
}) = self.network_rx.try_recv()
|
||||
{
|
||||
assert_eq!(&peer_id, expected_peer);
|
||||
Some(request_id)
|
||||
} else {
|
||||
panic!("Should have sent a batch request to the peer")
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
(block_req_id, blob_req_id)
|
||||
}
|
||||
|
||||
/// Produce a head peer
|
||||
@@ -646,8 +666,14 @@ mod tests {
|
||||
range.add_peer(&mut rig.cx, local_info, head_peer, remote_info);
|
||||
range.assert_state(RangeSyncType::Head);
|
||||
|
||||
let fork = rig
|
||||
.cx
|
||||
.chain
|
||||
.spec
|
||||
.fork_name_at_epoch(rig.cx.chain.epoch().unwrap());
|
||||
|
||||
// Sync should have requested a batch, grab the request.
|
||||
let _request = rig.grab_request(&head_peer);
|
||||
let _ = rig.grab_request(&head_peer, fork);
|
||||
|
||||
// Now get a peer with an advanced finalized epoch.
|
||||
let (finalized_peer, local_info, remote_info) = rig.finalized_peer();
|
||||
@@ -655,7 +681,7 @@ mod tests {
|
||||
range.assert_state(RangeSyncType::Finalized);
|
||||
|
||||
// Sync should have requested a batch, grab the request
|
||||
let _second_request = rig.grab_request(&finalized_peer);
|
||||
let _ = rig.grab_request(&finalized_peer, fork);
|
||||
|
||||
// Fail the head chain by disconnecting the peer.
|
||||
range.remove_peer(&mut rig.cx, &head_peer);
|
||||
@@ -673,8 +699,14 @@ mod tests {
|
||||
range.add_peer(&mut rig.cx, local_info, head_peer, head_info);
|
||||
range.assert_state(RangeSyncType::Head);
|
||||
|
||||
let fork = rig
|
||||
.cx
|
||||
.chain
|
||||
.spec
|
||||
.fork_name_at_epoch(rig.cx.chain.epoch().unwrap());
|
||||
|
||||
// Sync should have requested a batch, grab the request.
|
||||
let _request = rig.grab_request(&head_peer);
|
||||
let _ = rig.grab_request(&head_peer, fork);
|
||||
|
||||
// Now get a peer with an advanced finalized epoch.
|
||||
let (finalized_peer, local_info, remote_info) = rig.finalized_peer();
|
||||
@@ -683,7 +715,7 @@ mod tests {
|
||||
range.assert_state(RangeSyncType::Finalized);
|
||||
|
||||
// Sync should have requested a batch, grab the request
|
||||
let _second_request = rig.grab_request(&finalized_peer);
|
||||
let _ = rig.grab_request(&finalized_peer, fork);
|
||||
|
||||
// Now the chain knows both chains target roots.
|
||||
rig.chain.remember_block(head_peer_root);
|
||||
@@ -697,15 +729,39 @@ mod tests {
|
||||
#[test]
|
||||
fn pause_and_resume_on_ee_offline() {
|
||||
let (mut rig, mut range) = range(true);
|
||||
let fork = rig
|
||||
.cx
|
||||
.chain
|
||||
.spec
|
||||
.fork_name_at_epoch(rig.cx.chain.epoch().unwrap());
|
||||
|
||||
// add some peers
|
||||
let (peer1, local_info, head_info) = rig.head_peer();
|
||||
range.add_peer(&mut rig.cx, local_info, peer1, head_info);
|
||||
let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
|
||||
(rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
|
||||
let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork);
|
||||
|
||||
let (chain1, batch1, id1) = if blob_req_opt.is_some() {
|
||||
match block_req {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => {
|
||||
let _ = rig
|
||||
.cx
|
||||
.range_sync_block_and_blob_response(id, BlockOrBlob::Block(None));
|
||||
let (chain1, response) = rig
|
||||
.cx
|
||||
.range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None))
|
||||
.unwrap();
|
||||
(chain1, response.batch_id, id)
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
}
|
||||
} else {
|
||||
match block_req {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
|
||||
let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap();
|
||||
(chain, batch, id)
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
};
|
||||
|
||||
// make the ee offline
|
||||
@@ -720,11 +776,30 @@ mod tests {
|
||||
// while the ee is offline, more peers might arrive. Add a new finalized peer.
|
||||
let (peer2, local_info, finalized_info) = rig.finalized_peer();
|
||||
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
|
||||
let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
|
||||
(rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
|
||||
let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork);
|
||||
|
||||
let (chain2, batch2, id2) = if blob_req_opt.is_some() {
|
||||
match block_req {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => {
|
||||
let _ = rig
|
||||
.cx
|
||||
.range_sync_block_and_blob_response(id, BlockOrBlob::Block(None));
|
||||
let (chain2, response) = rig
|
||||
.cx
|
||||
.range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None))
|
||||
.unwrap();
|
||||
(chain2, response.batch_id, id)
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
}
|
||||
} else {
|
||||
match block_req {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
|
||||
let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap();
|
||||
(chain, batch, id)
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
};
|
||||
|
||||
// send the response to the request
|
||||
|
||||
Reference in New Issue
Block a user