mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-03 00:31:50 +00:00
Altair validator client and HTTP API (#2404)
## Proposed Changes * Implement the validator client and HTTP API changes necessary to support Altair Co-authored-by: realbigsean <seananderson33@gmail.com> Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
142
beacon_node/http_api/tests/common.rs
Normal file
142
beacon_node/http_api/tests/common.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use beacon_chain::{
|
||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||
BeaconChain, BeaconChainTypes,
|
||||
};
|
||||
use eth2::{BeaconNodeHttpClient, Timeouts};
|
||||
use eth2_libp2p::{
|
||||
discv5::enr::{CombinedKey, EnrBuilder},
|
||||
rpc::methods::{MetaData, MetaDataV2},
|
||||
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
|
||||
Enr, NetworkGlobals, PeerId,
|
||||
};
|
||||
use http_api::{Config, Context};
|
||||
use network::NetworkMessage;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use slog::Logger;
|
||||
use std::future::Future;
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use types::{test_utils::generate_deterministic_keypairs, ChainSpec, EthSpec};
|
||||
|
||||
pub const TCP_PORT: u16 = 42;
|
||||
pub const UDP_PORT: u16 = 42;
|
||||
pub const SEQ_NUMBER: u64 = 0;
|
||||
pub const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000";
|
||||
|
||||
/// HTTP API tester that allows interaction with the underlying beacon chain harness.
|
||||
pub struct InteractiveTester<E: EthSpec> {
|
||||
pub harness: BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
pub client: BeaconNodeHttpClient,
|
||||
pub network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>,
|
||||
_server_shutdown: oneshot::Sender<()>,
|
||||
}
|
||||
|
||||
/// The result of calling `create_api_server`.
|
||||
///
|
||||
/// Glue-type between `tests::ApiTester` and `InteractiveTester`.
|
||||
pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
|
||||
pub server: SFut,
|
||||
pub listening_socket: SocketAddr,
|
||||
pub shutdown_tx: oneshot::Sender<()>,
|
||||
pub network_rx: tokio::sync::mpsc::UnboundedReceiver<NetworkMessage<E>>,
|
||||
pub local_enr: Enr,
|
||||
pub external_peer_id: PeerId,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> InteractiveTester<E> {
|
||||
pub fn new(spec: Option<ChainSpec>, validator_count: usize) -> Self {
|
||||
let harness = BeaconChainHarness::new(
|
||||
E::default(),
|
||||
spec,
|
||||
generate_deterministic_keypairs(validator_count),
|
||||
);
|
||||
|
||||
let ApiServer {
|
||||
server,
|
||||
listening_socket,
|
||||
shutdown_tx: _server_shutdown,
|
||||
network_rx,
|
||||
..
|
||||
} = create_api_server(harness.chain.clone(), harness.logger().clone());
|
||||
|
||||
tokio::spawn(server);
|
||||
|
||||
let client = BeaconNodeHttpClient::new(
|
||||
SensitiveUrl::parse(&format!(
|
||||
"http://{}:{}",
|
||||
listening_socket.ip(),
|
||||
listening_socket.port()
|
||||
))
|
||||
.unwrap(),
|
||||
Timeouts::set_all(Duration::from_secs(1)),
|
||||
);
|
||||
|
||||
Self {
|
||||
harness,
|
||||
client,
|
||||
network_rx,
|
||||
_server_shutdown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_api_server<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
log: Logger,
|
||||
) -> ApiServer<T::EthSpec, impl Future<Output = ()>> {
|
||||
let (network_tx, network_rx) = mpsc::unbounded_channel();
|
||||
|
||||
// Default metadata
|
||||
let meta_data = MetaData::V2(MetaDataV2 {
|
||||
seq_number: SEQ_NUMBER,
|
||||
attnets: EnrAttestationBitfield::<T::EthSpec>::default(),
|
||||
syncnets: EnrSyncCommitteeBitfield::<T::EthSpec>::default(),
|
||||
});
|
||||
let enr_key = CombinedKey::generate_secp256k1();
|
||||
let enr = EnrBuilder::new("v4").build(&enr_key).unwrap();
|
||||
let network_globals =
|
||||
NetworkGlobals::new(enr.clone(), TCP_PORT, UDP_PORT, meta_data, vec![], &log);
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
network_globals
|
||||
.peers
|
||||
.write()
|
||||
.connect_ingoing(&peer_id, EXTERNAL_ADDR.parse().unwrap(), None);
|
||||
|
||||
*network_globals.sync_state.write() = SyncState::Synced;
|
||||
|
||||
let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone());
|
||||
|
||||
let context = Arc::new(Context {
|
||||
config: Config {
|
||||
enabled: true,
|
||||
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||
listen_port: 0,
|
||||
allow_origin: None,
|
||||
serve_legacy_spec: true,
|
||||
},
|
||||
chain: Some(chain.clone()),
|
||||
network_tx: Some(network_tx),
|
||||
network_globals: Some(Arc::new(network_globals)),
|
||||
eth1_service: Some(eth1_service),
|
||||
log,
|
||||
});
|
||||
let ctx = context.clone();
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
let server_shutdown = async {
|
||||
// It's not really interesting why this triggered, just that it happened.
|
||||
let _ = shutdown_rx.await;
|
||||
};
|
||||
let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap();
|
||||
|
||||
ApiServer {
|
||||
server,
|
||||
listening_socket,
|
||||
shutdown_tx,
|
||||
network_rx,
|
||||
local_enr: enr,
|
||||
external_peer_id: peer_id,
|
||||
}
|
||||
}
|
||||
305
beacon_node/http_api/tests/fork_tests.rs
Normal file
305
beacon_node/http_api/tests/fork_tests.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
//! Tests for API behaviour across fork boundaries.
|
||||
use crate::common::*;
|
||||
use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig};
|
||||
use eth2::types::{StateId, SyncSubcommittee};
|
||||
use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot};
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
|
||||
fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec {
|
||||
let mut spec = E::default_spec();
|
||||
spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||
spec
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn sync_committee_duties_across_fork() {
|
||||
let validator_count = E::sync_committee_size();
|
||||
let fork_epoch = Epoch::new(8);
|
||||
let spec = altair_spec(fork_epoch);
|
||||
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count);
|
||||
let harness = &tester.harness;
|
||||
let client = &tester.client;
|
||||
|
||||
let all_validators = harness.get_all_validators();
|
||||
let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(harness.get_current_slot(), 0);
|
||||
|
||||
// Prior to the fork the endpoint should return an empty vec.
|
||||
let early_duties = client
|
||||
.post_validator_duties_sync(fork_epoch - 1, &all_validators_u64)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
assert!(early_duties.is_empty());
|
||||
|
||||
// If there's a skip slot at the fork slot, the endpoint should return duties, even
|
||||
// though the head state hasn't transitioned yet.
|
||||
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
||||
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
||||
let (_, state) = harness
|
||||
.add_attested_block_at_slot(
|
||||
fork_slot - 1,
|
||||
genesis_state,
|
||||
genesis_state_root,
|
||||
&all_validators,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
harness.advance_slot();
|
||||
assert_eq!(harness.get_current_slot(), fork_slot);
|
||||
|
||||
let sync_duties = client
|
||||
.post_validator_duties_sync(fork_epoch, &all_validators_u64)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
assert_eq!(sync_duties.len(), E::sync_committee_size());
|
||||
|
||||
// After applying a block at the fork slot the duties should remain unchanged.
|
||||
let state_root = state.canonical_root();
|
||||
harness
|
||||
.add_attested_block_at_slot(fork_slot, state, state_root, &all_validators)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
client
|
||||
.post_validator_duties_sync(fork_epoch, &all_validators_u64)
|
||||
.await
|
||||
.unwrap()
|
||||
.data,
|
||||
sync_duties
|
||||
);
|
||||
|
||||
// Sync duties should also be available for the next period.
|
||||
let current_period = fork_epoch.sync_committee_period(&spec).unwrap();
|
||||
let next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 1);
|
||||
|
||||
let next_period_duties = client
|
||||
.post_validator_duties_sync(next_period_epoch, &all_validators_u64)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
assert_eq!(next_period_duties.len(), E::sync_committee_size());
|
||||
|
||||
// Sync duties should *not* be available for the period after the next period.
|
||||
// We expect a 400 (bad request) response.
|
||||
let next_next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 2);
|
||||
assert_eq!(
|
||||
client
|
||||
.post_validator_duties_sync(next_next_period_epoch, &all_validators_u64)
|
||||
.await
|
||||
.unwrap_err()
|
||||
.status()
|
||||
.unwrap(),
|
||||
400
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn attestations_across_fork_with_skip_slots() {
|
||||
let validator_count = E::sync_committee_size();
|
||||
let fork_epoch = Epoch::new(8);
|
||||
let spec = altair_spec(fork_epoch);
|
||||
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count);
|
||||
let harness = &tester.harness;
|
||||
let client = &tester.client;
|
||||
|
||||
let all_validators = harness.get_all_validators();
|
||||
|
||||
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
||||
let fork_state = harness
|
||||
.chain
|
||||
.state_at_slot(fork_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
|
||||
harness.set_current_slot(fork_slot);
|
||||
|
||||
let attestations = harness.make_attestations(
|
||||
&all_validators,
|
||||
&fork_state,
|
||||
fork_state.canonical_root(),
|
||||
(*fork_state.get_block_root(fork_slot - 1).unwrap()).into(),
|
||||
fork_slot,
|
||||
);
|
||||
|
||||
let unaggregated_attestations = attestations
|
||||
.iter()
|
||||
.flat_map(|(atts, _)| atts.iter().map(|(att, _)| att.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(!unaggregated_attestations.is_empty());
|
||||
client
|
||||
.post_beacon_pool_attestations(&unaggregated_attestations)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let signed_aggregates = attestations
|
||||
.into_iter()
|
||||
.filter_map(|(_, op_aggregate)| op_aggregate)
|
||||
.collect::<Vec<_>>();
|
||||
assert!(!signed_aggregates.is_empty());
|
||||
|
||||
client
|
||||
.post_validator_aggregate_and_proof(&signed_aggregates)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn sync_contributions_across_fork_with_skip_slots() {
|
||||
let validator_count = E::sync_committee_size();
|
||||
let fork_epoch = Epoch::new(8);
|
||||
let spec = altair_spec(fork_epoch);
|
||||
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count);
|
||||
let harness = &tester.harness;
|
||||
let client = &tester.client;
|
||||
|
||||
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
||||
let fork_state = harness
|
||||
.chain
|
||||
.state_at_slot(fork_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
|
||||
harness.set_current_slot(fork_slot);
|
||||
|
||||
let sync_messages = harness.make_sync_contributions(
|
||||
&fork_state,
|
||||
*fork_state.get_block_root(fork_slot - 1).unwrap(),
|
||||
fork_slot,
|
||||
RelativeSyncCommittee::Current,
|
||||
);
|
||||
|
||||
let sync_committee_messages = sync_messages
|
||||
.iter()
|
||||
.flat_map(|(messages, _)| messages.iter().map(|(message, _subnet)| message.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
assert!(!sync_committee_messages.is_empty());
|
||||
|
||||
client
|
||||
.post_beacon_pool_sync_committee_signatures(&sync_committee_messages)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let signed_contributions = sync_messages
|
||||
.into_iter()
|
||||
.filter_map(|(_, op_aggregate)| op_aggregate)
|
||||
.collect::<Vec<_>>();
|
||||
assert!(!signed_contributions.is_empty());
|
||||
|
||||
client
|
||||
.post_validator_contribution_and_proofs(&signed_contributions)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn sync_committee_indices_across_fork() {
|
||||
let validator_count = E::sync_committee_size();
|
||||
let fork_epoch = Epoch::new(8);
|
||||
let spec = altair_spec(fork_epoch);
|
||||
let tester = InteractiveTester::<E>::new(Some(spec.clone()), validator_count);
|
||||
let harness = &tester.harness;
|
||||
let client = &tester.client;
|
||||
|
||||
let all_validators = harness.get_all_validators();
|
||||
|
||||
// Flatten subcommittees into a single vec.
|
||||
let flatten = |subcommittees: &[SyncSubcommittee]| -> Vec<u64> {
|
||||
subcommittees
|
||||
.iter()
|
||||
.flat_map(|sub| sub.indices.iter().copied())
|
||||
.collect()
|
||||
};
|
||||
|
||||
// Prior to the fork the `sync_committees` endpoint should return a 400 error.
|
||||
assert_eq!(
|
||||
client
|
||||
.get_beacon_states_sync_committees(StateId::Slot(Slot::new(0)), None)
|
||||
.await
|
||||
.unwrap_err()
|
||||
.status()
|
||||
.unwrap(),
|
||||
400
|
||||
);
|
||||
assert_eq!(
|
||||
client
|
||||
.get_beacon_states_sync_committees(StateId::Head, Some(Epoch::new(0)))
|
||||
.await
|
||||
.unwrap_err()
|
||||
.status()
|
||||
.unwrap(),
|
||||
400
|
||||
);
|
||||
|
||||
// If there's a skip slot at the fork slot, the endpoint will return a 400 until a block is
|
||||
// applied.
|
||||
let fork_slot = fork_epoch.start_slot(E::slots_per_epoch());
|
||||
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
||||
let (_, state) = harness
|
||||
.add_attested_block_at_slot(
|
||||
fork_slot - 1,
|
||||
genesis_state,
|
||||
genesis_state_root,
|
||||
&all_validators,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
harness.advance_slot();
|
||||
assert_eq!(harness.get_current_slot(), fork_slot);
|
||||
|
||||
// Using the head state must fail.
|
||||
assert_eq!(
|
||||
client
|
||||
.get_beacon_states_sync_committees(StateId::Head, Some(fork_epoch))
|
||||
.await
|
||||
.unwrap_err()
|
||||
.status()
|
||||
.unwrap(),
|
||||
400
|
||||
);
|
||||
|
||||
// In theory we could do a state advance and make this work, but to keep things simple I've
|
||||
// avoided doing that for now.
|
||||
assert_eq!(
|
||||
client
|
||||
.get_beacon_states_sync_committees(StateId::Slot(fork_slot), None)
|
||||
.await
|
||||
.unwrap_err()
|
||||
.status()
|
||||
.unwrap(),
|
||||
400
|
||||
);
|
||||
|
||||
// Once the head is updated it should be useable for requests, including in the next sync
|
||||
// committee period.
|
||||
let state_root = state.canonical_root();
|
||||
harness
|
||||
.add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators)
|
||||
.unwrap();
|
||||
|
||||
let current_period = fork_epoch.sync_committee_period(&spec).unwrap();
|
||||
let next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 1);
|
||||
assert!(next_period_epoch > fork_epoch);
|
||||
|
||||
for epoch in [
|
||||
None,
|
||||
Some(fork_epoch),
|
||||
Some(fork_epoch + 1),
|
||||
Some(next_period_epoch),
|
||||
Some(next_period_epoch + 1),
|
||||
] {
|
||||
let committee = client
|
||||
.get_beacon_states_sync_committees(StateId::Head, epoch)
|
||||
.await
|
||||
.unwrap()
|
||||
.data;
|
||||
assert_eq!(committee.validators.len(), E::sync_committee_size());
|
||||
|
||||
assert_eq!(
|
||||
committee.validators,
|
||||
flatten(&committee.validator_aggregates)
|
||||
);
|
||||
}
|
||||
}
|
||||
6
beacon_node/http_api/tests/main.rs
Normal file
6
beacon_node/http_api/tests/main.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
pub mod common;
|
||||
pub mod fork_tests;
|
||||
pub mod tests;
|
||||
@@ -1,6 +1,4 @@
|
||||
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
use crate::common::{create_api_server, ApiServer};
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||
BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||
@@ -9,21 +7,14 @@ use environment::null_logger;
|
||||
use eth2::Error;
|
||||
use eth2::StatusCode;
|
||||
use eth2::{types::*, BeaconNodeHttpClient, Timeouts};
|
||||
use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder};
|
||||
use eth2_libp2p::{
|
||||
rpc::methods::{MetaData, MetaDataV2},
|
||||
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
|
||||
Enr, EnrExt, NetworkGlobals, PeerId,
|
||||
};
|
||||
use eth2_libp2p::{Enr, EnrExt, PeerId};
|
||||
use futures::stream::{Stream, StreamExt};
|
||||
use futures::FutureExt;
|
||||
use http_api::{Config, Context};
|
||||
use network::NetworkMessage;
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use slot_clock::SlotClock;
|
||||
use state_processing::per_slot_processing;
|
||||
use std::convert::TryInto;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tokio::time::Duration;
|
||||
@@ -41,9 +32,6 @@ const VALIDATOR_COUNT: usize = SLOTS_PER_EPOCH as usize;
|
||||
const CHAIN_LENGTH: u64 = SLOTS_PER_EPOCH * 5 - 1; // Make `next_block` an epoch transition
|
||||
const JUSTIFIED_EPOCH: u64 = 4;
|
||||
const FINALIZED_EPOCH: u64 = 3;
|
||||
const TCP_PORT: u16 = 42;
|
||||
const UDP_PORT: u16 = 42;
|
||||
const SEQ_NUMBER: u64 = 0;
|
||||
const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000";
|
||||
|
||||
/// Skipping the slots around the epoch boundary allows us to check that we're obtaining states
|
||||
@@ -74,9 +62,13 @@ struct ApiTester {
|
||||
|
||||
impl ApiTester {
|
||||
pub fn new() -> Self {
|
||||
let mut harness = BeaconChainHarness::new(
|
||||
// This allows for testing voluntary exits without building out a massive chain.
|
||||
let mut spec = E::default_spec();
|
||||
spec.shard_committee_period = 2;
|
||||
|
||||
let harness = BeaconChainHarness::new(
|
||||
MainnetEthSpec,
|
||||
None,
|
||||
Some(spec),
|
||||
generate_deterministic_keypairs(VALIDATOR_COUNT),
|
||||
);
|
||||
|
||||
@@ -134,13 +126,7 @@ impl ApiTester {
|
||||
let proposer_slashing = harness.make_proposer_slashing(2);
|
||||
let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap());
|
||||
|
||||
// Changing this *after* the chain has been initialized is a bit cheeky, but it shouldn't
|
||||
// cause issue.
|
||||
//
|
||||
// This allows for testing voluntary exits without building out a massive chain.
|
||||
harness.chain.spec.shard_committee_period = 2;
|
||||
|
||||
let chain = Arc::new(harness.chain);
|
||||
let chain = harness.chain.clone();
|
||||
|
||||
assert_eq!(
|
||||
chain.head_info().unwrap().finalized_checkpoint.epoch,
|
||||
@@ -157,56 +143,18 @@ impl ApiTester {
|
||||
"precondition: justification"
|
||||
);
|
||||
|
||||
let (network_tx, network_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let log = null_logger().unwrap();
|
||||
|
||||
// Default metadata
|
||||
let meta_data = MetaData::V2(MetaDataV2 {
|
||||
seq_number: SEQ_NUMBER,
|
||||
attnets: EnrAttestationBitfield::<MainnetEthSpec>::default(),
|
||||
syncnets: EnrSyncCommitteeBitfield::<MainnetEthSpec>::default(),
|
||||
});
|
||||
let enr_key = CombinedKey::generate_secp256k1();
|
||||
let enr = EnrBuilder::new("v4").build(&enr_key).unwrap();
|
||||
let enr_clone = enr.clone();
|
||||
let network_globals = NetworkGlobals::new(enr, TCP_PORT, UDP_PORT, meta_data, vec![], &log);
|
||||
let ApiServer {
|
||||
server,
|
||||
listening_socket,
|
||||
shutdown_tx,
|
||||
network_rx,
|
||||
local_enr,
|
||||
external_peer_id,
|
||||
} = create_api_server(chain.clone(), log);
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
network_globals.peers.write().connect_ingoing(
|
||||
&peer_id,
|
||||
EXTERNAL_ADDR.parse().unwrap(),
|
||||
None,
|
||||
);
|
||||
|
||||
*network_globals.sync_state.write() = SyncState::Synced;
|
||||
|
||||
let eth1_service =
|
||||
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone());
|
||||
|
||||
let context = Arc::new(Context {
|
||||
config: Config {
|
||||
enabled: true,
|
||||
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||
listen_port: 0,
|
||||
allow_origin: None,
|
||||
serve_legacy_spec: true,
|
||||
},
|
||||
chain: Some(chain.clone()),
|
||||
network_tx: Some(network_tx),
|
||||
network_globals: Some(Arc::new(network_globals)),
|
||||
eth1_service: Some(eth1_service),
|
||||
log,
|
||||
});
|
||||
let ctx = context.clone();
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
let server_shutdown = async {
|
||||
// It's not really interesting why this triggered, just that it happened.
|
||||
let _ = shutdown_rx.await;
|
||||
};
|
||||
let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap();
|
||||
|
||||
tokio::spawn(async { server.await });
|
||||
tokio::spawn(server);
|
||||
|
||||
let client = BeaconNodeHttpClient::new(
|
||||
SensitiveUrl::parse(&format!(
|
||||
@@ -230,8 +178,8 @@ impl ApiTester {
|
||||
_server_shutdown: shutdown_tx,
|
||||
validator_keypairs: harness.validator_keypairs,
|
||||
network_rx,
|
||||
local_enr: enr_clone,
|
||||
external_peer_id: peer_id,
|
||||
local_enr,
|
||||
external_peer_id,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,58 +219,20 @@ impl ApiTester {
|
||||
let proposer_slashing = harness.make_proposer_slashing(2);
|
||||
let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap());
|
||||
|
||||
let chain = Arc::new(harness.chain);
|
||||
|
||||
let (network_tx, network_rx) = mpsc::unbounded_channel();
|
||||
let chain = harness.chain.clone();
|
||||
|
||||
let log = null_logger().unwrap();
|
||||
|
||||
// Default metadata
|
||||
let meta_data = MetaData::V2(MetaDataV2 {
|
||||
seq_number: SEQ_NUMBER,
|
||||
attnets: EnrAttestationBitfield::<MainnetEthSpec>::default(),
|
||||
syncnets: EnrSyncCommitteeBitfield::<MainnetEthSpec>::default(),
|
||||
});
|
||||
let enr_key = CombinedKey::generate_secp256k1();
|
||||
let enr = EnrBuilder::new("v4").build(&enr_key).unwrap();
|
||||
let enr_clone = enr.clone();
|
||||
let network_globals = NetworkGlobals::new(enr, TCP_PORT, UDP_PORT, meta_data, vec![], &log);
|
||||
let ApiServer {
|
||||
server,
|
||||
listening_socket,
|
||||
shutdown_tx,
|
||||
network_rx,
|
||||
local_enr,
|
||||
external_peer_id,
|
||||
} = create_api_server(chain.clone(), log);
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
network_globals.peers.write().connect_ingoing(
|
||||
&peer_id,
|
||||
EXTERNAL_ADDR.parse().unwrap(),
|
||||
None,
|
||||
);
|
||||
|
||||
*network_globals.sync_state.write() = SyncState::Synced;
|
||||
|
||||
let eth1_service =
|
||||
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone());
|
||||
|
||||
let context = Arc::new(Context {
|
||||
config: Config {
|
||||
enabled: true,
|
||||
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||
listen_port: 0,
|
||||
allow_origin: None,
|
||||
serve_legacy_spec: true,
|
||||
},
|
||||
chain: Some(chain.clone()),
|
||||
network_tx: Some(network_tx),
|
||||
network_globals: Some(Arc::new(network_globals)),
|
||||
eth1_service: Some(eth1_service),
|
||||
log,
|
||||
});
|
||||
let ctx = context.clone();
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
let server_shutdown = async {
|
||||
// It's not really interesting why this triggered, just that it happened.
|
||||
let _ = shutdown_rx.await;
|
||||
};
|
||||
let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap();
|
||||
|
||||
tokio::spawn(async { server.await });
|
||||
tokio::spawn(server);
|
||||
|
||||
let client = BeaconNodeHttpClient::new(
|
||||
SensitiveUrl::parse(&format!(
|
||||
@@ -346,8 +256,8 @@ impl ApiTester {
|
||||
_server_shutdown: shutdown_tx,
|
||||
validator_keypairs: harness.validator_keypairs,
|
||||
network_rx,
|
||||
local_enr: enr_clone,
|
||||
external_peer_id: peer_id,
|
||||
local_enr,
|
||||
external_peer_id,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1011,13 +921,18 @@ impl ApiTester {
|
||||
}
|
||||
}
|
||||
|
||||
let json_result = self
|
||||
.client
|
||||
.get_beacon_blocks(block_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.map(|res| res.data);
|
||||
assert_eq!(json_result, expected, "{:?}", block_id);
|
||||
let json_result = self.client.get_beacon_blocks(block_id).await.unwrap();
|
||||
|
||||
if let (Some(json), Some(expected)) = (&json_result, &expected) {
|
||||
assert_eq!(json.data, *expected, "{:?}", block_id);
|
||||
assert_eq!(
|
||||
json.version,
|
||||
Some(expected.fork_name(&self.chain.spec).unwrap())
|
||||
);
|
||||
} else {
|
||||
assert_eq!(json_result, None);
|
||||
assert_eq!(expected, None);
|
||||
}
|
||||
|
||||
let ssz_result = self
|
||||
.client
|
||||
@@ -1025,6 +940,16 @@ impl ApiTester {
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(ssz_result, expected, "{:?}", block_id);
|
||||
|
||||
// Check that the legacy v1 API still works but doesn't return a version field.
|
||||
let v1_result = self.client.get_beacon_blocks_v1(block_id).await.unwrap();
|
||||
if let (Some(v1_result), Some(expected)) = (&v1_result, &expected) {
|
||||
assert_eq!(v1_result.version, None);
|
||||
assert_eq!(v1_result.data, *expected);
|
||||
} else {
|
||||
assert_eq!(v1_result, None);
|
||||
assert_eq!(expected, None);
|
||||
}
|
||||
}
|
||||
|
||||
self
|
||||
@@ -1443,23 +1368,44 @@ impl ApiTester {
|
||||
|
||||
pub async fn test_get_debug_beacon_states(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
let result_json = self.client.get_debug_beacon_states(state_id).await.unwrap();
|
||||
|
||||
let mut expected = self.get_state(state_id);
|
||||
expected.as_mut().map(|state| state.drop_all_caches());
|
||||
|
||||
if let (Some(json), Some(expected)) = (&result_json, &expected) {
|
||||
assert_eq!(json.data, *expected, "{:?}", state_id);
|
||||
assert_eq!(
|
||||
json.version,
|
||||
Some(expected.fork_name(&self.chain.spec).unwrap())
|
||||
);
|
||||
} else {
|
||||
assert_eq!(result_json, None);
|
||||
assert_eq!(expected, None);
|
||||
}
|
||||
|
||||
// Check SSZ API.
|
||||
let result_ssz = self
|
||||
.client
|
||||
.get_debug_beacon_states_ssz(state_id, &self.chain.spec)
|
||||
.await
|
||||
.unwrap();
|
||||
let result_json = self
|
||||
.client
|
||||
.get_debug_beacon_states(state_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.map(|res| res.data);
|
||||
|
||||
let mut expected = self.get_state(state_id);
|
||||
expected.as_mut().map(|state| state.drop_all_caches());
|
||||
|
||||
assert_eq!(result_ssz, expected, "{:?}", state_id);
|
||||
assert_eq!(result_json, expected, "{:?}", state_id);
|
||||
|
||||
// Check legacy v1 API.
|
||||
let result_v1 = self
|
||||
.client
|
||||
.get_debug_beacon_states_v1(state_id)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
if let (Some(json), Some(expected)) = (&result_v1, &expected) {
|
||||
assert_eq!(json.version, None);
|
||||
assert_eq!(json.data, *expected, "{:?}", state_id);
|
||||
} else {
|
||||
assert_eq!(result_v1, None);
|
||||
assert_eq!(expected, None);
|
||||
}
|
||||
}
|
||||
|
||||
self
|
||||
|
||||
Reference in New Issue
Block a user