More deneb cleanup (#4640)

* remove protoc and token from network tests github action

* delete unused beacon chain methods

* downgrade writing blobs to store log

* reduce diff in block import logic

* remove some todo's and deneb built in network

* remove unnecessary error, actually use some added metrics

* remove some metrics, fix missing components on publish funcitonality

* fix status tests

* rename sidecar by root to blobs by root

* clean up some metrics

* remove unnecessary feature gate from attestation subnet tests, clean up blobs by range response code

* pawan's suggestion in `protocol_info`, peer score in matching up batch sync block and blobs

* fix range tests for deneb

* pub block and blob db cache behind the same mutex

* remove unused errs and an empty file

* move sidecar trait to new file

* move types from payload to eth2 crate

* update comment and add flag value name

* make function private again, remove allow unused

* use reth rlp for tx decoding

* fix compile after merge

* rename kzg commitments

* cargo fmt

* remove unused dep

* Update beacon_node/execution_layer/src/lib.rs

Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>

* Update beacon_node/beacon_processor/src/lib.rs

Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>

* pawan's suggestiong for vec capacity

* cargo fmt

* Revert "use reth rlp for tx decoding"

This reverts commit 5181837d81.

* remove reth rlp

---------

Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
This commit is contained in:
realbigsean
2023-08-20 21:17:17 -04:00
committed by GitHub
parent 4898430330
commit 7d468cb487
66 changed files with 805 additions and 871 deletions

View File

@@ -384,14 +384,13 @@ mod tests {
use crate::NetworkMessage;
use super::*;
use crate::sync::network_context::BlockOrBlob;
use beacon_chain::builder::Witness;
use beacon_chain::eth1_chain::CachingEth1Backend;
use beacon_chain::parking_lot::RwLock;
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use beacon_chain::EngineState;
use beacon_processor::WorkEvent as BeaconWorkEvent;
use lighthouse_network::rpc::BlocksByRangeRequest;
use lighthouse_network::Request;
use lighthouse_network::{rpc::StatusMessage, NetworkGlobals};
use slog::{o, Drain};
use slot_clock::TestingSlotClock;
@@ -399,7 +398,7 @@ mod tests {
use std::sync::Arc;
use store::MemoryStore;
use tokio::sync::mpsc;
use types::{Hash256, MinimalEthSpec as E};
use types::{ForkName, Hash256, MinimalEthSpec as E};
#[derive(Debug)]
struct FakeStorage {
@@ -515,18 +514,39 @@ mod tests {
/// Reads an BlocksByRange request to a given peer from the network receiver channel.
#[track_caller]
fn grab_request(&mut self, expected_peer: &PeerId) -> (RequestId, BlocksByRangeRequest) {
if let Ok(NetworkMessage::SendRequest {
fn grab_request(
&mut self,
expected_peer: &PeerId,
fork_name: ForkName,
) -> (RequestId, Option<RequestId>) {
let block_req_id = if let Ok(NetworkMessage::SendRequest {
peer_id,
request: Request::BlocksByRange(request),
request: _,
request_id,
}) = self.network_rx.try_recv()
{
assert_eq!(&peer_id, expected_peer);
(request_id, request)
request_id
} else {
panic!("Should have sent a batch request to the peer")
}
};
let blob_req_id = match fork_name {
ForkName::Deneb => {
if let Ok(NetworkMessage::SendRequest {
peer_id,
request: _,
request_id,
}) = self.network_rx.try_recv()
{
assert_eq!(&peer_id, expected_peer);
Some(request_id)
} else {
panic!("Should have sent a batch request to the peer")
}
}
_ => None,
};
(block_req_id, blob_req_id)
}
/// Produce a head peer
@@ -646,8 +666,14 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, head_peer, remote_info);
range.assert_state(RangeSyncType::Head);
let fork = rig
.cx
.chain
.spec
.fork_name_at_epoch(rig.cx.chain.epoch().unwrap());
// Sync should have requested a batch, grab the request.
let _request = rig.grab_request(&head_peer);
let _ = rig.grab_request(&head_peer, fork);
// Now get a peer with an advanced finalized epoch.
let (finalized_peer, local_info, remote_info) = rig.finalized_peer();
@@ -655,7 +681,7 @@ mod tests {
range.assert_state(RangeSyncType::Finalized);
// Sync should have requested a batch, grab the request
let _second_request = rig.grab_request(&finalized_peer);
let _ = rig.grab_request(&finalized_peer, fork);
// Fail the head chain by disconnecting the peer.
range.remove_peer(&mut rig.cx, &head_peer);
@@ -673,8 +699,14 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, head_peer, head_info);
range.assert_state(RangeSyncType::Head);
let fork = rig
.cx
.chain
.spec
.fork_name_at_epoch(rig.cx.chain.epoch().unwrap());
// Sync should have requested a batch, grab the request.
let _request = rig.grab_request(&head_peer);
let _ = rig.grab_request(&head_peer, fork);
// Now get a peer with an advanced finalized epoch.
let (finalized_peer, local_info, remote_info) = rig.finalized_peer();
@@ -683,7 +715,7 @@ mod tests {
range.assert_state(RangeSyncType::Finalized);
// Sync should have requested a batch, grab the request
let _second_request = rig.grab_request(&finalized_peer);
let _ = rig.grab_request(&finalized_peer, fork);
// Now the chain knows both chains target roots.
rig.chain.remember_block(head_peer_root);
@@ -697,15 +729,39 @@ mod tests {
#[test]
fn pause_and_resume_on_ee_offline() {
let (mut rig, mut range) = range(true);
let fork = rig
.cx
.chain
.spec
.fork_name_at_epoch(rig.cx.chain.epoch().unwrap());
// add some peers
let (peer1, local_info, head_info) = rig.head_peer();
range.add_peer(&mut rig.cx, local_info, peer1, head_info);
let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork);
let (chain1, batch1, id1) = if blob_req_opt.is_some() {
match block_req {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => {
let _ = rig
.cx
.range_sync_block_and_blob_response(id, BlockOrBlob::Block(None));
let (chain1, response) = rig
.cx
.range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None))
.unwrap();
(chain1, response.batch_id, id)
}
other => panic!("unexpected request {:?}", other),
}
} else {
match block_req {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap();
(chain, batch, id)
}
other => panic!("unexpected request {:?}", other),
}
other => panic!("unexpected request {:?}", other),
};
// make the ee offline
@@ -720,11 +776,30 @@ mod tests {
// while the ee is offline, more peers might arrive. Add a new finalized peer.
let (peer2, local_info, finalized_info) = rig.finalized_peer();
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork);
let (chain2, batch2, id2) = if blob_req_opt.is_some() {
match block_req {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => {
let _ = rig
.cx
.range_sync_block_and_blob_response(id, BlockOrBlob::Block(None));
let (chain2, response) = rig
.cx
.range_sync_block_and_blob_response(id, BlockOrBlob::Blob(None))
.unwrap();
(chain2, response.batch_id, id)
}
other => panic!("unexpected request {:?}", other),
}
} else {
match block_req {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
let (chain, batch) = rig.cx.range_sync_block_only_response(id, true).unwrap();
(chain, batch, id)
}
other => panic!("unexpected request {:?}", other),
}
other => panic!("unexpected request {:?}", other),
};
// send the response to the request