mirror of
https://github.com/sigp/lighthouse.git
synced 2026-05-07 00:42:42 +00:00
Merge branch 'unstable' of https://github.com/sigp/lighthouse into electra_attestation_changes
This commit is contained in:
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -64,7 +64,7 @@ jobs:
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows')
|
||||
with:
|
||||
version: "16.0"
|
||||
version: "17.0"
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
- name: Set LIBCLANG_PATH
|
||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||
|
||||
5
.github/workflows/test-suite.yml
vendored
5
.github/workflows/test-suite.yml
vendored
@@ -112,11 +112,6 @@ jobs:
|
||||
- name: Install make
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
run: choco install -y make
|
||||
# - uses: KyleMayes/install-llvm-action@v1
|
||||
# if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
# with:
|
||||
# version: "16.0"
|
||||
# directory: ${{ runner.temp }}/llvm
|
||||
- name: Set LIBCLANG_PATH
|
||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||
- name: Run tests in release
|
||||
|
||||
14
Cargo.lock
generated
14
Cargo.lock
generated
@@ -1716,16 +1716,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "curve25519-dalek"
|
||||
version = "4.1.2"
|
||||
version = "4.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348"
|
||||
checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"curve25519-dalek-derive",
|
||||
"digest 0.10.7",
|
||||
"fiat-crypto",
|
||||
"platforms 3.4.0",
|
||||
"rustc_version 0.4.0",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
@@ -4964,6 +4963,7 @@ dependencies = [
|
||||
"libp2p-mplex",
|
||||
"lighthouse_metrics",
|
||||
"lighthouse_version",
|
||||
"logging",
|
||||
"lru",
|
||||
"lru_cache",
|
||||
"parking_lot 0.12.3",
|
||||
@@ -6143,12 +6143,6 @@ version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94"
|
||||
|
||||
[[package]]
|
||||
name = "platforms"
|
||||
version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7"
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.6"
|
||||
@@ -6455,7 +6449,7 @@ dependencies = [
|
||||
"nix 0.24.3",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"platforms 2.0.0",
|
||||
"platforms",
|
||||
"thiserror",
|
||||
"unescape",
|
||||
]
|
||||
|
||||
1
Makefile
1
Makefile
@@ -229,7 +229,6 @@ lint:
|
||||
-D clippy::manual_let_else \
|
||||
-D warnings \
|
||||
-A clippy::derive_partial_eq_without_eq \
|
||||
-A clippy::from-over-into \
|
||||
-A clippy::upper-case-acronyms \
|
||||
-A clippy::vec-init-then-push \
|
||||
-A clippy::question-mark \
|
||||
|
||||
@@ -389,35 +389,35 @@ pub struct PersistedForkChoiceStore {
|
||||
pub equivocating_indices: BTreeSet<u64>,
|
||||
}
|
||||
|
||||
impl Into<PersistedForkChoiceStore> for PersistedForkChoiceStoreV11 {
|
||||
fn into(self) -> PersistedForkChoiceStore {
|
||||
impl From<PersistedForkChoiceStoreV11> for PersistedForkChoiceStore {
|
||||
fn from(from: PersistedForkChoiceStoreV11) -> PersistedForkChoiceStore {
|
||||
PersistedForkChoiceStore {
|
||||
balances_cache: self.balances_cache,
|
||||
time: self.time,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances,
|
||||
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
|
||||
proposer_boost_root: self.proposer_boost_root,
|
||||
equivocating_indices: self.equivocating_indices,
|
||||
balances_cache: from.balances_cache,
|
||||
time: from.time,
|
||||
finalized_checkpoint: from.finalized_checkpoint,
|
||||
justified_checkpoint: from.justified_checkpoint,
|
||||
justified_balances: from.justified_balances,
|
||||
unrealized_justified_checkpoint: from.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: from.unrealized_finalized_checkpoint,
|
||||
proposer_boost_root: from.proposer_boost_root,
|
||||
equivocating_indices: from.equivocating_indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<PersistedForkChoiceStoreV11> for PersistedForkChoiceStore {
|
||||
fn into(self) -> PersistedForkChoiceStoreV11 {
|
||||
impl From<PersistedForkChoiceStore> for PersistedForkChoiceStoreV11 {
|
||||
fn from(from: PersistedForkChoiceStore) -> PersistedForkChoiceStoreV11 {
|
||||
PersistedForkChoiceStoreV11 {
|
||||
balances_cache: self.balances_cache,
|
||||
time: self.time,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
justified_balances: self.justified_balances,
|
||||
balances_cache: from.balances_cache,
|
||||
time: from.time,
|
||||
finalized_checkpoint: from.finalized_checkpoint,
|
||||
justified_checkpoint: from.justified_checkpoint,
|
||||
justified_balances: from.justified_balances,
|
||||
best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT,
|
||||
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
|
||||
proposer_boost_root: self.proposer_boost_root,
|
||||
equivocating_indices: self.equivocating_indices,
|
||||
unrealized_justified_checkpoint: from.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: from.unrealized_finalized_checkpoint,
|
||||
proposer_boost_root: from.proposer_boost_root,
|
||||
equivocating_indices: from.equivocating_indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,20 +20,20 @@ pub struct PersistedForkChoice {
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV17,
|
||||
}
|
||||
|
||||
impl Into<PersistedForkChoice> for PersistedForkChoiceV11 {
|
||||
fn into(self) -> PersistedForkChoice {
|
||||
impl From<PersistedForkChoiceV11> for PersistedForkChoice {
|
||||
fn from(from: PersistedForkChoiceV11) -> PersistedForkChoice {
|
||||
PersistedForkChoice {
|
||||
fork_choice: self.fork_choice,
|
||||
fork_choice_store: self.fork_choice_store.into(),
|
||||
fork_choice: from.fork_choice,
|
||||
fork_choice_store: from.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<PersistedForkChoiceV11> for PersistedForkChoice {
|
||||
fn into(self) -> PersistedForkChoiceV11 {
|
||||
impl From<PersistedForkChoice> for PersistedForkChoiceV11 {
|
||||
fn from(from: PersistedForkChoice) -> PersistedForkChoiceV11 {
|
||||
PersistedForkChoiceV11 {
|
||||
fork_choice: self.fork_choice,
|
||||
fork_choice_store: self.fork_choice_store.into(),
|
||||
fork_choice: from.fork_choice,
|
||||
fork_choice_store: from.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,9 +257,9 @@ pub mod deposit_methods {
|
||||
Latest,
|
||||
}
|
||||
|
||||
impl Into<u64> for Eth1Id {
|
||||
fn into(self) -> u64 {
|
||||
match self {
|
||||
impl From<Eth1Id> for u64 {
|
||||
fn from(from: Eth1Id) -> u64 {
|
||||
match from {
|
||||
Eth1Id::Mainnet => 1,
|
||||
Eth1Id::Custom(id) => id,
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ tempfile = { workspace = true }
|
||||
quickcheck = { workspace = true }
|
||||
quickcheck_macros = { workspace = true }
|
||||
async-channel = { workspace = true }
|
||||
logging = { workspace = true }
|
||||
|
||||
[features]
|
||||
libp2p-websocket = []
|
||||
|
||||
@@ -374,6 +374,12 @@ where
|
||||
id: outbound_info.req_id,
|
||||
})));
|
||||
}
|
||||
|
||||
// Also handle any events that are awaiting to be sent to the behaviour
|
||||
if !self.events_out.is_empty() {
|
||||
return Poll::Ready(Some(self.events_out.remove(0)));
|
||||
}
|
||||
|
||||
Poll::Ready(None)
|
||||
}
|
||||
|
||||
|
||||
@@ -316,6 +316,27 @@ where
|
||||
self.events.push(error_msg);
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the pending Requests to the disconnected peer
|
||||
// with reports of failed requests.
|
||||
self.events.iter_mut().for_each(|event| match &event {
|
||||
ToSwarm::NotifyHandler {
|
||||
peer_id: p,
|
||||
event: RPCSend::Request(request_id, req),
|
||||
..
|
||||
} if *p == peer_id => {
|
||||
*event = ToSwarm::GenerateEvent(RPCMessage {
|
||||
peer_id,
|
||||
conn_id: connection_id,
|
||||
event: HandlerEvent::Err(HandlerErr::Outbound {
|
||||
id: *request_id,
|
||||
proto: req.versioned_protocol().protocol(),
|
||||
error: RPCError::Disconnected,
|
||||
}),
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ impl<Id: ReqId, E: EthSpec> SelfRateLimiter<Id, E> {
|
||||
Err((rate_limited_req, wait_time)) => {
|
||||
let key = (peer_id, protocol);
|
||||
self.next_peer_request.insert(key, wait_time);
|
||||
queued_requests.push_back(rate_limited_req);
|
||||
queued_requests.push_front(rate_limited_req);
|
||||
// If one fails just wait for the next window that allows sending requests.
|
||||
return;
|
||||
}
|
||||
@@ -205,3 +205,72 @@ impl<Id: ReqId, E: EthSpec> SelfRateLimiter<Id, E> {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::rpc::config::{OutboundRateLimiterConfig, RateLimiterConfig};
|
||||
use crate::rpc::rate_limiter::Quota;
|
||||
use crate::rpc::self_limiter::SelfRateLimiter;
|
||||
use crate::rpc::{OutboundRequest, Ping, Protocol};
|
||||
use crate::service::api_types::RequestId;
|
||||
use libp2p::PeerId;
|
||||
use std::time::Duration;
|
||||
use types::MainnetEthSpec;
|
||||
|
||||
/// Test that `next_peer_request_ready` correctly maintains the queue.
|
||||
#[tokio::test]
|
||||
async fn test_next_peer_request_ready() {
|
||||
let log = logging::test_logger();
|
||||
let config = OutboundRateLimiterConfig(RateLimiterConfig {
|
||||
ping_quota: Quota::n_every(1, 2),
|
||||
..Default::default()
|
||||
});
|
||||
let mut limiter: SelfRateLimiter<RequestId<u64>, MainnetEthSpec> =
|
||||
SelfRateLimiter::new(config, log).unwrap();
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
for i in 1..=5 {
|
||||
let _ = limiter.allows(
|
||||
peer_id,
|
||||
RequestId::Application(i),
|
||||
OutboundRequest::Ping(Ping { data: i }),
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
let queue = limiter
|
||||
.delayed_requests
|
||||
.get(&(peer_id, Protocol::Ping))
|
||||
.unwrap();
|
||||
assert_eq!(4, queue.len());
|
||||
|
||||
// Check that requests in the queue are ordered in the sequence 2, 3, 4, 5.
|
||||
let mut iter = queue.iter();
|
||||
for i in 2..=5 {
|
||||
assert_eq!(iter.next().unwrap().request_id, RequestId::Application(i));
|
||||
}
|
||||
|
||||
assert_eq!(limiter.ready_requests.len(), 0);
|
||||
}
|
||||
|
||||
// Wait until the tokens have been regenerated, then run `next_peer_request_ready`.
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
limiter.next_peer_request_ready(peer_id, Protocol::Ping);
|
||||
|
||||
{
|
||||
let queue = limiter
|
||||
.delayed_requests
|
||||
.get(&(peer_id, Protocol::Ping))
|
||||
.unwrap();
|
||||
assert_eq!(3, queue.len());
|
||||
|
||||
// Check that requests in the queue are ordered in the sequence 3, 4, 5.
|
||||
let mut iter = queue.iter();
|
||||
for i in 3..=5 {
|
||||
assert_eq!(iter.next().unwrap().request_id, RequestId::Application(i));
|
||||
}
|
||||
|
||||
assert_eq!(limiter.ready_requests.len(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -700,7 +700,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
"index" => %index,
|
||||
"commitment" => %commitment,
|
||||
);
|
||||
// Prevent recurring behaviour by penalizing the peer slightly.
|
||||
// Prevent recurring behaviour by penalizing the peer.
|
||||
self.gossip_penalize_peer(
|
||||
peer_id,
|
||||
PeerAction::LowToleranceError,
|
||||
@@ -712,10 +712,8 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
MessageAcceptance::Reject,
|
||||
);
|
||||
}
|
||||
GossipBlobError::FutureSlot { .. }
|
||||
| GossipBlobError::RepeatBlob { .. }
|
||||
| GossipBlobError::PastFinalizedSlot { .. } => {
|
||||
warn!(
|
||||
GossipBlobError::FutureSlot { .. } | GossipBlobError::RepeatBlob { .. } => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Could not verify blob sidecar for gossip. Ignoring the blob sidecar";
|
||||
"error" => ?err,
|
||||
@@ -736,6 +734,30 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
MessageAcceptance::Ignore,
|
||||
);
|
||||
}
|
||||
GossipBlobError::PastFinalizedSlot { .. } => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Could not verify blob sidecar for gossip. Ignoring the blob sidecar";
|
||||
"error" => ?err,
|
||||
"slot" => %slot,
|
||||
"root" => %root,
|
||||
"index" => %index,
|
||||
"commitment" => %commitment,
|
||||
);
|
||||
// Prevent recurring behaviour by penalizing the peer. A low-tolerance
|
||||
// error is fine because there's no reason for peers to be propagating old
|
||||
// blobs on gossip, even if their view of finality is lagging.
|
||||
self.gossip_penalize_peer(
|
||||
peer_id,
|
||||
PeerAction::LowToleranceError,
|
||||
"gossip_blob_low",
|
||||
);
|
||||
self.propagate_validation_result(
|
||||
message_id,
|
||||
peer_id,
|
||||
MessageAcceptance::Ignore,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1715,11 +1715,11 @@ impl<E: EthSpec> ForkVersionDeserialize for FullBlockContents<E> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Into<BeaconBlock<E>> for FullBlockContents<E> {
|
||||
fn into(self) -> BeaconBlock<E> {
|
||||
match self {
|
||||
Self::BlockContents(block_and_sidecars) => block_and_sidecars.block,
|
||||
Self::Block(block) => block,
|
||||
impl<E: EthSpec> From<FullBlockContents<E>> for BeaconBlock<E> {
|
||||
fn from(from: FullBlockContents<E>) -> BeaconBlock<E> {
|
||||
match from {
|
||||
FullBlockContents::<E>::BlockContents(block_and_sidecars) => block_and_sidecars.block,
|
||||
FullBlockContents::<E>::Block(block) => block,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,24 +145,24 @@ impl TryInto<ProtoNode> for ProtoNodeV16 {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV16> for ProtoNode {
|
||||
fn into(self) -> ProtoNodeV16 {
|
||||
impl From<ProtoNode> for ProtoNodeV16 {
|
||||
fn from(from: ProtoNode) -> ProtoNodeV16 {
|
||||
ProtoNodeV16 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: Some(self.justified_checkpoint),
|
||||
finalized_checkpoint: Some(self.finalized_checkpoint),
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
|
||||
slot: from.slot,
|
||||
state_root: from.state_root,
|
||||
target_root: from.target_root,
|
||||
current_epoch_shuffling_id: from.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: from.next_epoch_shuffling_id,
|
||||
root: from.root,
|
||||
parent: from.parent,
|
||||
justified_checkpoint: Some(from.justified_checkpoint),
|
||||
finalized_checkpoint: Some(from.finalized_checkpoint),
|
||||
weight: from.weight,
|
||||
best_child: from.best_child,
|
||||
best_descendant: from.best_descendant,
|
||||
execution_status: from.execution_status,
|
||||
unrealized_justified_checkpoint: from.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: from.unrealized_finalized_checkpoint,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,19 +55,19 @@ impl TryInto<SszContainer> for SszContainerV16 {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SszContainerV16> for SszContainer {
|
||||
fn into(self) -> SszContainerV16 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
impl From<SszContainer> for SszContainerV16 {
|
||||
fn from(from: SszContainer) -> SszContainerV16 {
|
||||
let nodes = from.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV16 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
votes: from.votes,
|
||||
balances: from.balances,
|
||||
prune_threshold: from.prune_threshold,
|
||||
justified_checkpoint: from.justified_checkpoint,
|
||||
finalized_checkpoint: from.finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: self.previous_proposer_boost,
|
||||
indices: from.indices,
|
||||
previous_proposer_boost: from.previous_proposer_boost,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,7 @@ use crate::{
|
||||
use crate::{KzgProofs, SignedBeaconBlock};
|
||||
use bls::Signature;
|
||||
use derivative::Derivative;
|
||||
use kzg::{
|
||||
Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT,
|
||||
FIELD_ELEMENTS_PER_BLOB,
|
||||
};
|
||||
use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT};
|
||||
use merkle_proof::{merkle_root_from_branch, verify_merkle_proof, MerkleTreeError};
|
||||
use rand::Rng;
|
||||
use safe_arith::ArithError;
|
||||
@@ -221,13 +218,7 @@ impl<E: EthSpec> BlobSidecar<E> {
|
||||
rng.fill_bytes(&mut blob_bytes);
|
||||
// Ensure that the blob is canonical by ensuring that
|
||||
// each field element contained in the blob is < BLS_MODULUS
|
||||
for i in 0..FIELD_ELEMENTS_PER_BLOB {
|
||||
let Some(byte) = blob_bytes.get_mut(
|
||||
i.checked_mul(BYTES_PER_FIELD_ELEMENT)
|
||||
.ok_or("overflow".to_string())?,
|
||||
) else {
|
||||
return Err(format!("blob byte index out of bounds: {:?}", i));
|
||||
};
|
||||
for byte in blob_bytes.iter_mut().step_by(BYTES_PER_FIELD_ELEMENT) {
|
||||
*byte = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -37,9 +37,9 @@ impl From<[u8; GRAFFITI_BYTES_LEN]> for Graffiti {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<[u8; GRAFFITI_BYTES_LEN]> for Graffiti {
|
||||
fn into(self) -> [u8; GRAFFITI_BYTES_LEN] {
|
||||
self.0
|
||||
impl From<Graffiti> for [u8; GRAFFITI_BYTES_LEN] {
|
||||
fn from(from: Graffiti) -> [u8; GRAFFITI_BYTES_LEN] {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,9 +77,9 @@ impl<'de> Deserialize<'de> for GraffitiString {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Graffiti> for GraffitiString {
|
||||
fn into(self) -> Graffiti {
|
||||
let graffiti_bytes = self.0.as_bytes();
|
||||
impl From<GraffitiString> for Graffiti {
|
||||
fn from(from: GraffitiString) -> Graffiti {
|
||||
let graffiti_bytes = from.0.as_bytes();
|
||||
let mut graffiti = [0; GRAFFITI_BYTES_LEN];
|
||||
|
||||
let graffiti_len = std::cmp::min(graffiti_bytes.len(), GRAFFITI_BYTES_LEN);
|
||||
|
||||
@@ -77,9 +77,9 @@ impl SelectionProof {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Signature> for SelectionProof {
|
||||
fn into(self) -> Signature {
|
||||
self.0
|
||||
impl From<SelectionProof> for Signature {
|
||||
fn from(from: SelectionProof) -> Signature {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@ macro_rules! impl_from_into_u64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<u64> for $main {
|
||||
fn into(self) -> u64 {
|
||||
self.0
|
||||
impl From<$main> for u64 {
|
||||
fn from(from: $main) -> u64 {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,9 +28,9 @@ macro_rules! impl_from_into_usize {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<usize> for $main {
|
||||
fn into(self) -> usize {
|
||||
self.0 as usize
|
||||
impl From<$main> for usize {
|
||||
fn from(from: $main) -> usize {
|
||||
from.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -152,15 +152,15 @@ impl From<u64> for SubnetId {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<u64> for SubnetId {
|
||||
fn into(self) -> u64 {
|
||||
self.0
|
||||
impl From<SubnetId> for u64 {
|
||||
fn from(from: SubnetId) -> u64 {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<u64> for &SubnetId {
|
||||
fn into(self) -> u64 {
|
||||
self.0
|
||||
impl From<&SubnetId> for u64 {
|
||||
fn from(from: &SubnetId) -> u64 {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -90,9 +90,9 @@ impl SyncSelectionProof {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Signature> for SyncSelectionProof {
|
||||
fn into(self) -> Signature {
|
||||
self.0
|
||||
impl From<SyncSelectionProof> for Signature {
|
||||
fn from(from: SyncSelectionProof) -> Signature {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -78,15 +78,15 @@ impl From<u64> for SyncSubnetId {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<u64> for SyncSubnetId {
|
||||
fn into(self) -> u64 {
|
||||
self.0
|
||||
impl From<SyncSubnetId> for u64 {
|
||||
fn from(from: SyncSubnetId) -> u64 {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<u64> for &SyncSubnetId {
|
||||
fn into(self) -> u64 {
|
||||
self.0
|
||||
impl From<&SyncSubnetId> for u64 {
|
||||
fn from(from: &SyncSubnetId) -> u64 {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@ pub enum ChecksumFunction {
|
||||
Sha256,
|
||||
}
|
||||
|
||||
impl Into<String> for ChecksumFunction {
|
||||
fn into(self) -> String {
|
||||
match self {
|
||||
impl From<ChecksumFunction> for String {
|
||||
fn from(from: ChecksumFunction) -> String {
|
||||
match from {
|
||||
ChecksumFunction::Sha256 => "sha256".into(),
|
||||
}
|
||||
}
|
||||
@@ -38,8 +38,8 @@ impl TryFrom<String> for ChecksumFunction {
|
||||
#[serde(try_from = "Value", into = "Value")]
|
||||
pub struct EmptyMap;
|
||||
|
||||
impl Into<Value> for EmptyMap {
|
||||
fn into(self) -> Value {
|
||||
impl From<EmptyMap> for Value {
|
||||
fn from(_from: EmptyMap) -> Value {
|
||||
Value::Object(Map::default())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ pub enum CipherFunction {
|
||||
Aes128Ctr,
|
||||
}
|
||||
|
||||
impl Into<String> for CipherFunction {
|
||||
fn into(self) -> String {
|
||||
match self {
|
||||
impl From<CipherFunction> for String {
|
||||
fn from(from: CipherFunction) -> String {
|
||||
match from {
|
||||
CipherFunction::Aes128Ctr => "aes-128-ctr".into(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,9 +25,9 @@ impl From<Vec<u8>> for HexBytes {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<String> for HexBytes {
|
||||
fn into(self) -> String {
|
||||
hex::encode(self.0)
|
||||
impl From<HexBytes> for String {
|
||||
fn from(from: HexBytes) -> String {
|
||||
hex::encode(from.0)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,8 +23,8 @@ pub struct KdfModule {
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct EmptyString;
|
||||
|
||||
impl Into<String> for EmptyString {
|
||||
fn into(self) -> String {
|
||||
impl From<EmptyString> for String {
|
||||
fn from(_from: EmptyString) -> String {
|
||||
"".into()
|
||||
}
|
||||
}
|
||||
@@ -91,9 +91,9 @@ pub enum KdfFunction {
|
||||
Pbkdf2,
|
||||
}
|
||||
|
||||
impl Into<String> for KdfFunction {
|
||||
fn into(self) -> String {
|
||||
match self {
|
||||
impl From<KdfFunction> for String {
|
||||
fn from(from: KdfFunction) -> String {
|
||||
match from {
|
||||
KdfFunction::Scrypt => "scrypt".into(),
|
||||
KdfFunction::Pbkdf2 => "pbkdf2".into(),
|
||||
}
|
||||
|
||||
@@ -39,9 +39,9 @@ pub enum TypeField {
|
||||
Hd,
|
||||
}
|
||||
|
||||
impl Into<String> for TypeField {
|
||||
fn into(self) -> String {
|
||||
match self {
|
||||
impl From<TypeField> for String {
|
||||
fn from(from: TypeField) -> String {
|
||||
match from {
|
||||
TypeField::Hd => "hierarchical deterministic".into(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,9 +38,9 @@ impl From<[u8; BYTES_PER_PROOF]> for KzgProof {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<[u8; BYTES_PER_PROOF]> for KzgProof {
|
||||
fn into(self) -> [u8; BYTES_PER_PROOF] {
|
||||
self.0
|
||||
impl From<KzgProof> for [u8; BYTES_PER_PROOF] {
|
||||
fn from(from: KzgProof) -> [u8; BYTES_PER_PROOF] {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,201 +1,85 @@
|
||||
# Simple Local Testnet
|
||||
|
||||
These scripts allow for running a small local testnet with multiple beacon nodes and validator clients and a geth execution client.
|
||||
These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 geth execution clients using Kurtosis.
|
||||
This setup can be useful for testing and development.
|
||||
|
||||
## Requirements
|
||||
## Installation
|
||||
|
||||
The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH` (run `echo $PATH` to view all `PATH` directories).
|
||||
1. Install [Docker](https://docs.docker.com/get-docker/). Verify that Docker has been successfully installed by running `sudo docker run hello-world`.
|
||||
|
||||
1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version.
|
||||
|
||||
MacOS users need to install GNU `sed` and GNU `grep`, and add them both to `PATH` as well.
|
||||
|
||||
The first step is to install Rust and dependencies. Refer to the [Lighthouse Book](https://lighthouse-book.sigmaprime.io/installation-source.html#dependencies) for installation. We will also need [jq](https://jqlang.github.io/jq/), which can be installed with `sudo apt install jq`.
|
||||
|
||||
Then, we clone the Lighthouse repository:
|
||||
```bash
|
||||
cd ~
|
||||
git clone https://github.com/sigp/lighthouse.git
|
||||
cd lighthouse
|
||||
```
|
||||
We are now ready to build Lighthouse. Run the command:
|
||||
|
||||
```bash
|
||||
make
|
||||
make install-lcli
|
||||
```
|
||||
|
||||
This will build `lighthouse` and `lcli`. For `geth` and `bootnode`, go to [geth website](https://geth.ethereum.org/downloads) and download the `Geth & Tools`. For example, to download and extract `Geth & Tools 1.13.1`:
|
||||
|
||||
```bash
|
||||
cd ~
|
||||
curl -LO https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz
|
||||
tar xvf geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz
|
||||
```
|
||||
|
||||
After extraction, copy `geth` and `bootnode` to the `PATH`. A typical directory is `/usr/local/bin`.
|
||||
|
||||
```bash
|
||||
cd geth-alltools-linux-amd64-1.13.1-3f40e65c
|
||||
sudo cp geth bootnode /usr/local/bin
|
||||
```
|
||||
|
||||
After that We can remove the downloaded files:
|
||||
|
||||
```bash
|
||||
cd ~
|
||||
rm -r geth-alltools-linux-amd64-1.13.1-3f40e65c geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz
|
||||
```
|
||||
|
||||
We are now ready to start a local testnet.
|
||||
1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `sudo apt install yq -y`.
|
||||
|
||||
## Starting the testnet
|
||||
|
||||
To start a testnet using the predetermined settings:
|
||||
To start a testnet, from the Lighthouse root repository:
|
||||
|
||||
```bash
|
||||
cd ~
|
||||
cd ./lighthouse/scripts/local_testnet
|
||||
./start_local_testnet.sh genesis.json
|
||||
cd ./scripts/local_testnet
|
||||
./start_local_testnet.sh
|
||||
```
|
||||
|
||||
This will execute the script and if the testnet setup is successful, you will see "Started!" at the end.
|
||||
It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end.
|
||||
You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key.
|
||||
Full configuration reference for kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration).
|
||||
|
||||
The testnet starts with a post-merge genesis state.
|
||||
The testnet starts a consensus layer and execution layer boot node along with `BN_COUNT`
|
||||
(the number of beacon nodes) each connected to a geth execution client and `VC_COUNT` (the number of validator clients). By default, `BN_COUNT=4`, `VC_COUNT=4`.
|
||||
|
||||
The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. It also takes a mandatory `GENESIS_FILE` for initialising geth's state.
|
||||
A sample `genesis.json` is provided in this directory.
|
||||
|
||||
The options may be in any order or absent in which case they take the default value specified.
|
||||
- VC_COUNT: the number of validator clients to create, default: `BN_COUNT`
|
||||
- DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info`
|
||||
|
||||
The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified.
|
||||
|
||||
To view the beacon, validator client and geth logs:
|
||||
To view all running services:
|
||||
|
||||
```bash
|
||||
tail -f ~/.lighthouse/local-testnet/testnet/beacon_node_1.log
|
||||
tail -f ~/.lighthouse/local-testnet/testnet/validator_node_1.log
|
||||
tail -f ~/.lighthouse/local-testnet/testnet/geth_1.log
|
||||
kurtosis enclave inspect local-testnet
|
||||
```
|
||||
|
||||
where `beacon_node_1` can be changed to `beacon_node_2`, `beacon_node_3` or `beacon_node_4` to view logs for different beacon nodes. The same applies to validator clients and geth nodes.
|
||||
To view the logs:
|
||||
|
||||
```bash
|
||||
kurtosis service logs local-testnet $SERVICE_NAME
|
||||
```
|
||||
|
||||
where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and geth:
|
||||
|
||||
```bash
|
||||
kurtosis service logs local-testnet -f cl-1-lighthouse-geth
|
||||
kurtosis service logs local-testnet -f vc-1-geth-lighthouse
|
||||
kurtosis service logs local-testnet -f el-1-geth-lighthouse
|
||||
```
|
||||
|
||||
If you would like to save the logs, use the command:
|
||||
|
||||
```bash
|
||||
kurtosis dump $OUTPUT_DIRECTORY
|
||||
```
|
||||
|
||||
This will create a folder named `$OUTPUT_DIRECTORY` in the present working directory that contains all logs and other information. If you want the logs for a particular service and saved to a file named `logs.txt`:
|
||||
|
||||
```bash
|
||||
kurtosis service logs local-testnet $SERVICE_NAME -a > logs.txt
|
||||
```
|
||||
where `$SERVICE_NAME` can be viewed by running `kurtosis enclave inspect local-testnet`.
|
||||
|
||||
Kurtosis comes with a Dora explorer which can be opened with:
|
||||
|
||||
```bash
|
||||
open $(kurtosis port print local-testnet dora http)
|
||||
```
|
||||
|
||||
Some testnet parameters can be varied by modifying the `network_params.yaml` file. Kurtosis also comes with a web UI which can be open with `kurtosis web`.
|
||||
|
||||
## Stopping the testnet
|
||||
|
||||
To stop the testnet, navigate to the directory `cd ~/lighthouse/scripts/local_testnet`, then run the command:
|
||||
To stop the testnet, from the Lighthouse root repository:
|
||||
|
||||
```bash
|
||||
cd ./scripts/local_testnet
|
||||
./stop_local_testnet.sh
|
||||
```
|
||||
|
||||
Once a testnet is stopped, it cannot be continued from where it left off. When the start local testnet command is run, it will start a new local testnet.
|
||||
You will see "Local testnet stopped." at the end.
|
||||
|
||||
## Manual creation of local testnet
|
||||
## CLI options
|
||||
|
||||
In [Starting the testnet](./README.md#starting-the-testnet), the testnet is started automatically with predetermined parameters (database directory, ports used etc). This section describes some modifications of the local testnet settings, e.g., changing the database directory, or changing the ports used.
|
||||
|
||||
|
||||
The testnet also contains parameters that are specified in `vars.env`, such as the slot time `SECONDS_PER_SLOT=3` (instead of 12 seconds on mainnet). You may change these parameters to suit your testing purposes. After that, in the `local_testnet` directory, run the following command to create genesis state with embedded validators and validator keys, and also to update the time in `genesis.json`:
|
||||
The script comes with some CLI options, which can be viewed with `./start_local_testnet.sh --help`. One of the CLI options is to avoid rebuilding Lighthouse each time the testnet starts, which can be configured with the command:
|
||||
|
||||
```bash
|
||||
./setup.sh
|
||||
./setup_time.sh genesis.json
|
||||
```
|
||||
|
||||
Note: The generated genesis validators are embedded into the genesis state as genesis validators and hence do not require manual deposits to activate.
|
||||
|
||||
Generate bootnode enr and start an EL and CL bootnode so that multiple nodes can find each other
|
||||
```bash
|
||||
./bootnode.sh
|
||||
./el_bootnode.sh
|
||||
```
|
||||
|
||||
Start a geth node:
|
||||
```bash
|
||||
./geth.sh <DATADIR> <NETWORK-PORT> <HTTP-PORT> <AUTH-HTTP-PORT> <GENESIS_FILE>
|
||||
```
|
||||
e.g.
|
||||
```bash
|
||||
./geth.sh $HOME/.lighthouse/local-testnet/geth_1 7001 6001 5001 genesis.json
|
||||
```
|
||||
|
||||
Start a beacon node:
|
||||
|
||||
```bash
|
||||
./beacon_node.sh <DATADIR> <NETWORK-PORT> <QUIC-PORT> <HTTP-PORT> <EXECUTION-ENDPOINT> <EXECUTION-JWT-PATH> <OPTIONAL-DEBUG-LEVEL>
|
||||
```
|
||||
e.g.
|
||||
```bash
|
||||
./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9001 9101 8001 http://localhost:5001 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret
|
||||
```
|
||||
|
||||
In a new terminal, start the validator client which will attach to the first
|
||||
beacon node:
|
||||
|
||||
```bash
|
||||
./validator_client.sh <DATADIR> <BEACON-NODE-HTTP> <OPTIONAL-DEBUG-LEVEL>
|
||||
```
|
||||
e.g. to attach to the above created beacon node
|
||||
```bash
|
||||
./validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8001
|
||||
```
|
||||
|
||||
You can create additional geth, beacon node and validator client instances by changing the ports, e.g., for a second geth, beacon node and validator client:
|
||||
|
||||
```bash
|
||||
./geth.sh $HOME/.lighthouse/local-testnet/geth_2 7002 6002 5002 genesis.json
|
||||
./beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9002 9102 8002 http://localhost:5002 ~/.lighthouse/local-testnet/geth_2/geth/jwtsecret
|
||||
./validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8002
|
||||
```
|
||||
|
||||
## Additional Info
|
||||
|
||||
### Adjusting number and distribution of validators
|
||||
The `VALIDATOR_COUNT` parameter is used to specify the number of insecure validator keystores to generate and make deposits for.
|
||||
The `BN_COUNT` parameter is used to adjust the division of these generated keys among separate validator client instances.
|
||||
For e.g. for `VALIDATOR_COUNT=80` and `BN_COUNT=4`, the validator keys are distributed over 4 datadirs with 20 keystores per datadir. The datadirs are located in `$DATADIR/node_{i}` which can be passed to separate validator client
|
||||
instances using the `--datadir` parameter.
|
||||
|
||||
### Starting fresh
|
||||
|
||||
You can delete the current testnet and all related files using the following command. Alternatively, if you wish to start another testnet, doing the steps [Starting the testnet](./README.md#starting-the-testnet) will automatically delete the files and start a fresh local testnet.
|
||||
|
||||
```bash
|
||||
./clean.sh
|
||||
```
|
||||
|
||||
### Updating the genesis time of the beacon state
|
||||
|
||||
If it's been a while since you ran `./setup` then the genesis time of the
|
||||
genesis state will be far in the future, causing lots of skip slots.
|
||||
|
||||
Update the genesis time to now using:
|
||||
|
||||
```bash
|
||||
./reset_genesis_time.sh
|
||||
```
|
||||
|
||||
> Note: you probably want to just rerun `./start_local_testnet.sh` to start over
|
||||
> but this is another option.
|
||||
|
||||
### Testing builder flow
|
||||
|
||||
1. Add builder URL to `BN_ARGS` in `./vars.env`, e.g. `--builder http://localhost:8650`. Some mock builder server options:
|
||||
- [`mock-relay`](https://github.com/realbigsean/mock-relay)
|
||||
- [`dummy-builder`](https://github.com/michaelsproul/dummy_builder)
|
||||
2. The above mock builders do not support non-mainnet presets as of now, and will require setting `SECONDS_PER_SLOT` and `SECONDS_PER_ETH1_BLOCK` to `12` in `./vars.env`.
|
||||
3. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag):
|
||||
```bash
|
||||
./start_local_testnet.sh -p genesis.json
|
||||
```
|
||||
4. Block production using builder flow will start at epoch 4.
|
||||
|
||||
### Testing sending a transaction
|
||||
|
||||
Some addresses in the local testnet are seeded with testnet ETH, allowing users to carry out transactions. To send a transaction, we first add the address to a wallet, such as [Metamask](https://metamask.io/). The private keys for the addresses are listed [here](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/testing/execution_engine_integration/src/execution_engine.rs#L13-L14).
|
||||
|
||||
Next, we add the local testnet to Metamask, a brief guide can be found [here](https://support.metamask.io/hc/en-us/articles/360043227612-How-to-add-a-custom-network-RPC). If you start the local testnet with default settings, the network RPC is: http://localhost:6001 and the `Chain ID` is `4242`, as defined in [`vars.env`](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/scripts/local_testnet/vars.env#L42). Once the network and account are added, you should see that the account contains testnet ETH which allow us to carry out transactions.
|
||||
./start_local_testnet.sh -b false
|
||||
```
|
||||
@@ -11,7 +11,7 @@ use unused_port::unused_tcp4_port;
|
||||
/// We've pinned the Nethermind version since our method of using the `master` branch to
|
||||
/// find the latest tag isn't working. It appears Nethermind don't always tag on `master`.
|
||||
/// We should fix this so we always pull the latest version of Nethermind.
|
||||
const NETHERMIND_BRANCH: &str = "release/1.21.0";
|
||||
const NETHERMIND_BRANCH: &str = "release/1.27.0";
|
||||
const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind";
|
||||
|
||||
fn build_result(repo_dir: &Path) -> Output {
|
||||
@@ -70,11 +70,10 @@ impl NethermindEngine {
|
||||
.join("nethermind")
|
||||
.join("src")
|
||||
.join("Nethermind")
|
||||
.join("Nethermind.Runner")
|
||||
.join("artifacts")
|
||||
.join("bin")
|
||||
.join("Release")
|
||||
.join("net7.0")
|
||||
.join("linux-x64")
|
||||
.join("Nethermind.Runner")
|
||||
.join("release")
|
||||
.join("nethermind")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,9 +67,9 @@ impl From<Hash256> for SigningRoot {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Hash256> for SigningRoot {
|
||||
fn into(self) -> Hash256 {
|
||||
self.0
|
||||
impl From<SigningRoot> for Hash256 {
|
||||
fn from(from: SigningRoot) -> Hash256 {
|
||||
from.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user