mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-06 10:11:44 +00:00
Engine API v1.0.0.alpha.6 + interop tests (#3024)
## Issue Addressed NA ## Proposed Changes This PR extends #3018 to address my review comments there and add automated integration tests with Geth (and other implementations, in the future). I've also de-duplicated the "unused port" logic by creating an `common/unused_port` crate. ## Additional Info I'm not sure if we want to merge this PR, or update #3018 and merge that. I don't mind, I'm primarily opening this PR to make sure CI works. Co-authored-by: Mark Mackey <mark@sigmaprime.io>
This commit is contained in:
131
testing/execution_engine_integration/src/execution_engine.rs
Normal file
131
testing/execution_engine_integration/src/execution_engine.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use crate::{genesis_json::geth_genesis_json, SUPPRESS_LOGS};
|
||||
use sensitive_url::SensitiveUrl;
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Child, Command, Output, Stdio};
|
||||
use std::{env, fs::File};
|
||||
use tempfile::TempDir;
|
||||
use unused_port::unused_tcp_port;
|
||||
|
||||
/// Defined for each EE type (e.g., Geth, Nethermind, etc).
|
||||
pub trait GenericExecutionEngine: Clone {
|
||||
fn init_datadir() -> TempDir;
|
||||
fn start_client(datadir: &TempDir, http_port: u16) -> Child;
|
||||
}
|
||||
|
||||
/// Holds handle to a running EE process, plus some other metadata.
|
||||
pub struct ExecutionEngine<E> {
|
||||
#[allow(dead_code)]
|
||||
engine: E,
|
||||
#[allow(dead_code)]
|
||||
datadir: TempDir,
|
||||
http_port: u16,
|
||||
child: Child,
|
||||
}
|
||||
|
||||
impl<E> Drop for ExecutionEngine<E> {
|
||||
fn drop(&mut self) {
|
||||
// Ensure the EE process is killed on drop.
|
||||
if let Err(e) = self.child.kill() {
|
||||
eprintln!("failed to kill child: {:?}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: GenericExecutionEngine> ExecutionEngine<E> {
|
||||
pub fn new(engine: E) -> Self {
|
||||
let datadir = E::init_datadir();
|
||||
let http_port = unused_tcp_port().unwrap();
|
||||
let child = E::start_client(&datadir, http_port);
|
||||
Self {
|
||||
engine,
|
||||
datadir,
|
||||
http_port,
|
||||
child,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn http_url(&self) -> SensitiveUrl {
|
||||
SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Geth-specific Implementation
|
||||
*/
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Geth;
|
||||
|
||||
impl Geth {
|
||||
fn binary_path() -> PathBuf {
|
||||
let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into();
|
||||
manifest_dir
|
||||
.join("execution_clients")
|
||||
.join("go-ethereum")
|
||||
.join("build")
|
||||
.join("bin")
|
||||
.join("geth")
|
||||
}
|
||||
}
|
||||
|
||||
impl GenericExecutionEngine for Geth {
|
||||
fn init_datadir() -> TempDir {
|
||||
let datadir = TempDir::new().unwrap();
|
||||
|
||||
let genesis_json_path = datadir.path().join("genesis.json");
|
||||
let mut file = File::create(&genesis_json_path).unwrap();
|
||||
let json = geth_genesis_json();
|
||||
serde_json::to_writer(&mut file, &json).unwrap();
|
||||
|
||||
let output = Command::new(Self::binary_path())
|
||||
.arg("--datadir")
|
||||
.arg(datadir.path().to_str().unwrap())
|
||||
.arg("init")
|
||||
.arg(genesis_json_path.to_str().unwrap())
|
||||
.output()
|
||||
.expect("failed to init geth");
|
||||
|
||||
check_command_output(output, "geth init failed");
|
||||
|
||||
datadir
|
||||
}
|
||||
|
||||
fn start_client(datadir: &TempDir, http_port: u16) -> Child {
|
||||
let network_port = unused_tcp_port().unwrap();
|
||||
|
||||
Command::new(Self::binary_path())
|
||||
.arg("--datadir")
|
||||
.arg(datadir.path().to_str().unwrap())
|
||||
.arg("--http")
|
||||
.arg("--http.api")
|
||||
.arg("engine,eth")
|
||||
.arg("--http.port")
|
||||
.arg(http_port.to_string())
|
||||
.arg("--port")
|
||||
.arg(network_port.to_string())
|
||||
.stdout(build_stdio())
|
||||
.stderr(build_stdio())
|
||||
.spawn()
|
||||
.expect("failed to start beacon node")
|
||||
}
|
||||
}
|
||||
|
||||
fn check_command_output(output: Output, failure_msg: &'static str) {
|
||||
if !output.status.success() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
|
||||
dbg!(stdout);
|
||||
dbg!(stderr);
|
||||
panic!("{}", failure_msg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the stdout/stderr handler for commands which might output to the terminal.
|
||||
fn build_stdio() -> Stdio {
|
||||
if SUPPRESS_LOGS {
|
||||
Stdio::null()
|
||||
} else {
|
||||
Stdio::inherit()
|
||||
}
|
||||
}
|
||||
42
testing/execution_engine_integration/src/genesis_json.rs
Normal file
42
testing/execution_engine_integration/src/genesis_json.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use serde_json::{json, Value};
|
||||
|
||||
/// Sourced from:
|
||||
///
|
||||
/// https://notes.ethereum.org/rmVErCfCRPKGqGkUe89-Kg
|
||||
pub fn geth_genesis_json() -> Value {
|
||||
json!({
|
||||
"config": {
|
||||
"chainId":1,
|
||||
"homesteadBlock":0,
|
||||
"eip150Block":0,
|
||||
"eip155Block":0,
|
||||
"eip158Block":0,
|
||||
"byzantiumBlock":0,
|
||||
"constantinopleBlock":0,
|
||||
"petersburgBlock":0,
|
||||
"istanbulBlock":0,
|
||||
"muirGlacierBlock":0,
|
||||
"berlinBlock":0,
|
||||
"londonBlock":0,
|
||||
"clique": {
|
||||
"period": 5,
|
||||
"epoch": 30000
|
||||
},
|
||||
"terminalTotalDifficulty":0
|
||||
},
|
||||
"nonce":"0x42",
|
||||
"timestamp":"0x0",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"gasLimit":"0x1C9C380",
|
||||
"difficulty":"0x400000000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase":"0x0000000000000000000000000000000000000000",
|
||||
"alloc":{
|
||||
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"}
|
||||
},
|
||||
"number":"0x0",
|
||||
"gasUsed":"0x0",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"baseFeePerGas":"0x7"
|
||||
})
|
||||
}
|
||||
12
testing/execution_engine_integration/src/lib.rs
Normal file
12
testing/execution_engine_integration/src/lib.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
/// This library provides integration testing between Lighthouse and other execution engines.
|
||||
///
|
||||
/// See the `tests/tests.rs` file to run tests.
|
||||
mod execution_engine;
|
||||
mod genesis_json;
|
||||
mod test_rig;
|
||||
|
||||
pub use execution_engine::Geth;
|
||||
pub use test_rig::TestRig;
|
||||
|
||||
/// Set to `false` to send logs to the console during tests. Logs are useful when debugging.
|
||||
const SUPPRESS_LOGS: bool = true;
|
||||
363
testing/execution_engine_integration/src/test_rig.rs
Normal file
363
testing/execution_engine_integration/src/test_rig.rs
Normal file
@@ -0,0 +1,363 @@
|
||||
use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine};
|
||||
use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatusV1Status};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::time::sleep;
|
||||
use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256};
|
||||
|
||||
const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
struct ExecutionPair<E> {
|
||||
/// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP.
|
||||
execution_layer: ExecutionLayer,
|
||||
/// A handle to external EE process, once this is dropped the process will be killed.
|
||||
#[allow(dead_code)]
|
||||
execution_engine: ExecutionEngine<E>,
|
||||
}
|
||||
|
||||
/// A rig that holds two EE processes for testing.
|
||||
///
|
||||
/// There are two EEs held here so that we can test out-of-order application of payloads, and other
|
||||
/// edge-cases.
|
||||
pub struct TestRig<E> {
|
||||
#[allow(dead_code)]
|
||||
runtime: Arc<tokio::runtime::Runtime>,
|
||||
ee_a: ExecutionPair<E>,
|
||||
ee_b: ExecutionPair<E>,
|
||||
spec: ChainSpec,
|
||||
_runtime_shutdown: exit_future::Signal,
|
||||
}
|
||||
|
||||
impl<E: GenericExecutionEngine> TestRig<E> {
|
||||
pub fn new(generic_engine: E) -> Self {
|
||||
let log = environment::null_logger().unwrap();
|
||||
let runtime = Arc::new(
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
let (runtime_shutdown, exit) = exit_future::signal();
|
||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||
let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx);
|
||||
|
||||
let fee_recipient = None;
|
||||
|
||||
let ee_a = {
|
||||
let execution_engine = ExecutionEngine::new(generic_engine.clone());
|
||||
let urls = vec![execution_engine.http_url()];
|
||||
let execution_layer =
|
||||
ExecutionLayer::from_urls(urls, fee_recipient, executor.clone(), log.clone())
|
||||
.unwrap();
|
||||
ExecutionPair {
|
||||
execution_engine,
|
||||
execution_layer,
|
||||
}
|
||||
};
|
||||
|
||||
let ee_b = {
|
||||
let execution_engine = ExecutionEngine::new(generic_engine);
|
||||
let urls = vec![execution_engine.http_url()];
|
||||
let execution_layer =
|
||||
ExecutionLayer::from_urls(urls, fee_recipient, executor, log).unwrap();
|
||||
ExecutionPair {
|
||||
execution_engine,
|
||||
execution_layer,
|
||||
}
|
||||
};
|
||||
|
||||
let mut spec = MainnetEthSpec::default_spec();
|
||||
spec.terminal_total_difficulty = Uint256::zero();
|
||||
|
||||
Self {
|
||||
runtime,
|
||||
ee_a,
|
||||
ee_b,
|
||||
spec,
|
||||
_runtime_shutdown: runtime_shutdown,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn perform_tests_blocking(&self) {
|
||||
self.ee_a
|
||||
.execution_layer
|
||||
.block_on_generic(|_| async { self.perform_tests().await })
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn wait_until_synced(&self) {
|
||||
let start_instant = Instant::now();
|
||||
|
||||
for pair in [&self.ee_a, &self.ee_b] {
|
||||
loop {
|
||||
// Run the routine to check for online nodes.
|
||||
pair.execution_layer.watchdog_task().await;
|
||||
|
||||
if pair.execution_layer.is_synced().await {
|
||||
break;
|
||||
} else if start_instant + EXECUTION_ENGINE_START_TIMEOUT > Instant::now() {
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
} else {
|
||||
panic!("timeout waiting for execution engines to come online")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn perform_tests(&self) {
|
||||
self.wait_until_synced().await;
|
||||
|
||||
/*
|
||||
* Read the terminal block hash from both pairs, check it's equal.
|
||||
*/
|
||||
|
||||
let terminal_pow_block_hash = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.get_terminal_pow_block_hash(&self.spec)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
terminal_pow_block_hash,
|
||||
self.ee_b
|
||||
.execution_layer
|
||||
.get_terminal_pow_block_hash(&self.spec)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Produce a valid payload atop the terminal block.
|
||||
*/
|
||||
|
||||
let parent_hash = terminal_pow_block_hash;
|
||||
let timestamp = timestamp_now();
|
||||
let random = Hash256::zero();
|
||||
let finalized_block_hash = Hash256::zero();
|
||||
let proposer_index = 0;
|
||||
let valid_payload = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.get_payload::<MainnetEthSpec>(
|
||||
parent_hash,
|
||||
timestamp,
|
||||
random,
|
||||
finalized_block_hash,
|
||||
proposer_index,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Indicate that the payload is the head of the chain, before submitting a
|
||||
* `notify_new_payload`.
|
||||
*/
|
||||
let head_block_hash = valid_payload.block_hash;
|
||||
let finalized_block_hash = Hash256::zero();
|
||||
let payload_attributes = None;
|
||||
let (status, _) = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Syncing);
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Provide the valid payload back to the EE again.
|
||||
*/
|
||||
|
||||
let (status, _) = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.notify_new_payload(&valid_payload)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Valid);
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Indicate that the payload is the head of the chain.
|
||||
*
|
||||
* Do not provide payload attributes (we'll test that later).
|
||||
*/
|
||||
let head_block_hash = valid_payload.block_hash;
|
||||
let finalized_block_hash = Hash256::zero();
|
||||
let payload_attributes = None;
|
||||
let (status, _) = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Valid);
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Provide an invalidated payload to the EE.
|
||||
*/
|
||||
|
||||
let mut invalid_payload = valid_payload.clone();
|
||||
invalid_payload.random = Hash256::from_low_u64_be(42);
|
||||
let (status, _) = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.notify_new_payload(&invalid_payload)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(matches!(
|
||||
status,
|
||||
PayloadStatusV1Status::Invalid | PayloadStatusV1Status::InvalidBlockHash
|
||||
));
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Produce another payload atop the previous one.
|
||||
*/
|
||||
|
||||
let parent_hash = valid_payload.block_hash;
|
||||
let timestamp = valid_payload.timestamp + 1;
|
||||
let random = Hash256::zero();
|
||||
let finalized_block_hash = Hash256::zero();
|
||||
let proposer_index = 0;
|
||||
let second_payload = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.get_payload::<MainnetEthSpec>(
|
||||
parent_hash,
|
||||
timestamp,
|
||||
random,
|
||||
finalized_block_hash,
|
||||
proposer_index,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Provide the second payload back to the EE again.
|
||||
*/
|
||||
|
||||
let (status, _) = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.notify_new_payload(&second_payload)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Valid);
|
||||
|
||||
/*
|
||||
* Execution Engine A:
|
||||
*
|
||||
* Indicate that the payload is the head of the chain, providing payload attributes.
|
||||
*/
|
||||
let head_block_hash = valid_payload.block_hash;
|
||||
let finalized_block_hash = Hash256::zero();
|
||||
let payload_attributes = Some(PayloadAttributes {
|
||||
timestamp: second_payload.timestamp + 1,
|
||||
random: Hash256::zero(),
|
||||
suggested_fee_recipient: Address::zero(),
|
||||
});
|
||||
let (status, _) = self
|
||||
.ee_a
|
||||
.execution_layer
|
||||
.notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Valid);
|
||||
|
||||
/*
|
||||
* Execution Engine B:
|
||||
*
|
||||
* Provide the second payload, without providing the first.
|
||||
*/
|
||||
let (status, _) = self
|
||||
.ee_b
|
||||
.execution_layer
|
||||
.notify_new_payload(&second_payload)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Syncing);
|
||||
|
||||
/*
|
||||
* Execution Engine B:
|
||||
*
|
||||
* Set the second payload as the head, without providing payload attributes.
|
||||
*/
|
||||
let head_block_hash = second_payload.block_hash;
|
||||
let finalized_block_hash = Hash256::zero();
|
||||
let payload_attributes = None;
|
||||
let (status, _) = self
|
||||
.ee_b
|
||||
.execution_layer
|
||||
.notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Syncing);
|
||||
|
||||
/*
|
||||
* Execution Engine B:
|
||||
*
|
||||
* Provide the first payload to the EE.
|
||||
*/
|
||||
|
||||
let (status, _) = self
|
||||
.ee_b
|
||||
.execution_layer
|
||||
.notify_new_payload(&valid_payload)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Valid);
|
||||
|
||||
/*
|
||||
* Execution Engine B:
|
||||
*
|
||||
* Provide the second payload, now the first has been provided.
|
||||
*/
|
||||
let (status, _) = self
|
||||
.ee_b
|
||||
.execution_layer
|
||||
.notify_new_payload(&second_payload)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Valid);
|
||||
|
||||
/*
|
||||
* Execution Engine B:
|
||||
*
|
||||
* Set the second payload as the head, without providing payload attributes.
|
||||
*/
|
||||
let head_block_hash = second_payload.block_hash;
|
||||
let finalized_block_hash = Hash256::zero();
|
||||
let payload_attributes = None;
|
||||
let (status, _) = self
|
||||
.ee_b
|
||||
.execution_layer
|
||||
.notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatusV1Status::Valid);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the duration since the unix epoch.
|
||||
pub fn timestamp_now() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_else(|_| Duration::from_secs(0))
|
||||
.as_secs()
|
||||
}
|
||||
Reference in New Issue
Block a user