mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-23 06:44:35 +00:00
* Refactor to cache Eth1Data * Fix merge conflicts and minor refactorings * Rename Eth1Cache to Eth1DataCache * Refactor events subscription * Add deposits module to interface with BeaconChain deposits * Remove utils * Rename to types.rs and add trait constraints to Eth1DataFetcher * Confirm to trait constraints. Make Web3DataFetcher cloneable * Make fetcher object member of deposit and eth1_data cache and other fixes * Fix update_cache function * Move fetch_eth1_data to impl block * Fix deposit tests * Create Eth1 object for interfacing with Beacon chain * Add `run` function for running update_cache and subscribe_deposit_logs tasks * Add logging * Run `cargo fmt` and make tests pass * Convert sync functions to async * Add timeouts to web3 functions * Return futures from cache functions * Add failed chaining of futures * Working cache updation * Clean up tests and `update_cache` function * Refactor `get_eth1_data` functions to work with future returning functions * Refactor eth1 `run` function to work with modified `update_cache` api * Minor changes * Add distance parameter to `update_cache` * Fix tests and other minor fixes * Working integration with cache and deposits * Add merkle_tree construction, proof generation and verification code * Add function to construct and fetch Deposits for BeaconNode * Add error handling * Import ssz * Add error handling to eth1 cache and fix minor errors * Run rustfmt * Fix minor bug * Rename Eth1Error and change to Result<T> * Change deposit fetching mechanism from notification based to poll based * Add deposits from eth1 chain in a given range every `x` blocks * Modify `run` function to accommodate changes * Minor fixes * Fix formatting * Initial commit. web3 api working. * Tidied up lib. Add function for fetching logs. * Refactor with `Eth1DataFetcher` trait * Add parsing for deposit contract logs and get_eth1_data function * Add `get_eth1_votes` function * Refactor to cache Eth1Data * Fix merge conflicts and minor refactorings * Rename Eth1Cache to Eth1DataCache * Refactor events subscription * Add deposits module to interface with BeaconChain deposits * Remove utils * Rename to types.rs and add trait constraints to Eth1DataFetcher * Confirm to trait constraints. Make Web3DataFetcher cloneable * Make fetcher object member of deposit and eth1_data cache and other fixes * Fix update_cache function * Move fetch_eth1_data to impl block * Fix deposit tests * Create Eth1 object for interfacing with Beacon chain * Add `run` function for running update_cache and subscribe_deposit_logs tasks * Add logging * Run `cargo fmt` and make tests pass * Convert sync functions to async * Add timeouts to web3 functions * Return futures from cache functions * Add failed chaining of futures * Working cache updation * Clean up tests and `update_cache` function * Refactor `get_eth1_data` functions to work with future returning functions * Refactor eth1 `run` function to work with modified `update_cache` api * Minor changes * Add distance parameter to `update_cache` * Fix tests and other minor fixes * Working integration with cache and deposits * Add merkle_tree construction, proof generation and verification code * Add function to construct and fetch Deposits for BeaconNode * Add error handling * Import ssz * Add error handling to eth1 cache and fix minor errors * Run rustfmt * Fix minor bug * Rename Eth1Error and change to Result<T> * Change deposit fetching mechanism from notification based to poll based * Add deposits from eth1 chain in a given range every `x` blocks * Modify `run` function to accommodate changes * Minor fixes * Fix formatting * Fix merge issue * Refactor with `Config` struct. Remote `ContractConfig` * Rename eth1_chain crate to eth1 * Rename files and read abi file using `fs::read` * Move eth1 to lib * Remove unnecessary mutability constraint * Add `Web3Backend` for returning actual eth1 data * Refactor `get_eth1_votes` to return a Result * Delete `eth1_chain` crate * Return `Result` from `get_deposits` * Fix range of deposits to return to beacon chain * Add `get_block_height_by_hash` trait function * Add naive method for getting `previous_eth1_distance` * Add eth1 config params to main config * Add instructions for setting up eth1 testing environment * Add build script to fetch deposit contract abi * Contract ABI is part of compiled binary * Fix minor bugs * Move docs to lib * Add timeout to config * Remove print statements * Change warn to error * Fix typos * Removed prints in test and get timeout value from config * Fixed error types * Added logging to web3_fetcher * Refactor for modified web3 api * Fix minor stuff * Add build script * Tidy, hide eth1 integration tests behind flag * Add http crate * Add first stages of eth1_test_rig * Fix deposits on test rig * Fix bug with deposit count method * Add block hash getter to http eth1 * Clean eth1 http crate and tests * Add script to start ganache * Adds deposit tree to eth1-http * Extend deposit tree tests * Tidy tests in eth1-http * Add more detail to get block request * Add block cache to eth1-http * Rename deposit tree to deposit cache * Add inital updating to eth1-http * Tidy updater * Fix compile bugs in tests * Adds an Eth1DataCache builder * Reorg eth1-http files * Add (failing) tests for eth1 updater * Rename files, fix bug in eth1-http * Ensure that ganache timestamps are increasing * Fix bugs with getting eth1data ancestors * Improve eth1 testing, fix bugs * Add truncate method to block cache * Add pruning to block cache update process * Add tests for block pruning * Allow for dropping an expired cache. * Add more comments * Add first compiling version of deposit updater * Add common fn for getting range of required blocks * Add passing deposit update test * Improve tests * Fix block pruning bug * Add tests for running two updates at once * Add updater services to eth1 * Add deposit collection to beacon chain * Add incomplete builder experiments * Add first working version of beacon chain builder * Update test harness to new beacon chain type * Rename builder file, tidy * Add first working client builder * Progress further on client builder * Update becaon node binary to use client builder * Ensure release tests compile * Remove old eth1 crate * Add first pass of new lighthouse binary * Fix websocket server startup * Remove old binary code from beacon_node crate * Add first working beacon node tests * Add genesis crate, new eth1 cache_2 * Add Serivce to Eth1Cache * Refactor with general eth1 improvements * Add passing genesis test * Tidy, add comments * Add more comments to eth1 service * Add further eth1 progress * Fix some bugs with genesis * Fix eth1 bugs, make eth1 linking more efficient * Shift logic in genesis service * Add more comments to genesis service * Add gzip, max request values, timeouts to http * Update testnet parameters to suit goerli testnet * Add ability to vary Fork, fix custom spec * Be more explicit about deposit fork version * Start adding beacon chain eth1 option * Add more flexibility to prod client * Further runtime refactoring * Allow for starting from store * Add bootstrapping to client config * Add remote_beacon_node crate * Update eth1 service for more configurability * Update eth1 tests to use less runtimes * Patch issues with tests using too many files * Move dummy eth1 backend flag * Ensure all tests pass * Add ganache-cli to Dockerfile * Use a special docker hub image for testing * Appease clippy * Move validator client into lighthouse binary * Allow starting with dummy eth1 backend * Improve logging * Fix dummy eth1 backend from cli * Add extra testnet command * Ensure consistent spec in beacon node * Update eth1 rig to work on goerli * Tidy lcli, start adding support for yaml config * Add incomplete YamlConfig struct * Remove efforts at YamlConfig * Add incomplete eth1 voting. Blocked on spec issues * Add (untested) first pass at eth1 vote algo * Add tests for winning vote * Add more tests for eth1 chain * Add more eth1 voting tests * Added more eth1 voting testing * Change test name * Add more tests to eth1 chain * Tidy eth1 generics, add more tests * Improve comments * Tidy beacon_node tests * Tidy, rename JsonRpc.. to Caching.. * Tidy voting logic * Tidy builder docs * Add comments, tidy eth1 * Add more comments to eth1 * Fix bug with winning_vote * Add doc comments to the `ClientBuilder` * Remove commented-out code * Improve `ClientBuilder` docs * Add comments to client config * Add decoding test for `ClientConfig` * Remove unused `DepositSet` struct * Tidy `block_cache` * Remove commented out lines * Remove unused code in `eth1` crate * Remove old validator binary `main.rs` * Tidy, fix tests compile error * Add initial tests for get_deposits * Remove dead code in eth1_test_rig * Update TestingDepositBuilder * Add testing for getting eth1 deposits * Fix duplicate rand dep * Remove dead code * Remove accidentally-added files * Fix comment in eth1_genesis_service * Add .gitignore for eth1_test_rig * Fix bug in eth1_genesis_service * Remove dead code from eth2_config * Fix tabs/spaces in root Cargo.toml * Tidy eth1 crate * Allow for re-use of eth1 service after genesis * Update docs for new CLI * Change README gif * Tidy eth1 http module * Tidy eth1 service * Tidy environment crate * Remove unused file * Tidy, add comments * Remove commented-out code * Address majority of Michael's comments * Address other PR comments * Add link to issue alongside TODO
364 lines
14 KiB
Rust
364 lines
14 KiB
Rust
/// The Validator Client service.
|
|
///
|
|
/// Connects to a beacon node and negotiates the correct chain id.
|
|
///
|
|
/// Once connected, the service loads known validators keypairs from disk. Every slot,
|
|
/// the service pings the beacon node, asking for new duties for each of the validators.
|
|
///
|
|
/// When a validator needs to either produce a block or sign an attestation, it requests the
|
|
/// data from the beacon node and performs the signing before publishing the block to the beacon
|
|
/// node.
|
|
use crate::attestation_producer::AttestationProducer;
|
|
use crate::block_producer::{BeaconBlockGrpcClient, BlockProducer};
|
|
use crate::config::Config as ValidatorConfig;
|
|
use crate::duties::{BeaconNodeDuties, DutiesManager, EpochDutiesMap};
|
|
use crate::error as error_chain;
|
|
use crate::signer::Signer;
|
|
use bls::Keypair;
|
|
use eth2_config::Eth2Config;
|
|
use grpcio::{ChannelBuilder, EnvBuilder};
|
|
use parking_lot::RwLock;
|
|
use protos::services::Empty;
|
|
use protos::services_grpc::{
|
|
AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient,
|
|
ValidatorServiceClient,
|
|
};
|
|
use slog::{crit, error, info, trace, warn};
|
|
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
|
use std::marker::PhantomData;
|
|
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
use types::{ChainSpec, Epoch, EthSpec, Fork, Slot};
|
|
|
|
/// The validator service. This is the main thread that executes and maintains validator
|
|
/// duties.
|
|
//TODO: Generalize the BeaconNode types to use testing
|
|
pub struct Service<B: BeaconNodeDuties + 'static, S: Signer + 'static, E: EthSpec> {
|
|
/// The node's current fork version we are processing on.
|
|
fork: Fork,
|
|
/// The slot clock for this service.
|
|
pub slot_clock: SystemTimeSlotClock,
|
|
/// The slot that is currently, or was previously processed by the service.
|
|
current_slot: RwLock<Option<Slot>>,
|
|
slots_per_epoch: u64,
|
|
/// The chain specification for this clients instance.
|
|
pub spec: Arc<ChainSpec>,
|
|
/// The duties manager which maintains the state of when to perform actions.
|
|
duties_manager: Arc<DutiesManager<B, S>>,
|
|
// GRPC Clients
|
|
/// The beacon block GRPC client.
|
|
beacon_block_client: Arc<BeaconBlockGrpcClient>,
|
|
/// The attester GRPC client.
|
|
attestation_client: Arc<AttestationServiceClient>,
|
|
/// The validator client logger.
|
|
log: slog::Logger,
|
|
_phantom: PhantomData<E>,
|
|
}
|
|
|
|
impl<E: EthSpec> Service<ValidatorServiceClient, Keypair, E> {
|
|
/// Initial connection to the beacon node to determine its properties.
|
|
///
|
|
/// This tries to connect to a beacon node. Once connected, it initialised the gRPC clients
|
|
/// and returns an instance of the service.
|
|
pub fn initialize_service(
|
|
client_config: ValidatorConfig,
|
|
eth2_config: Eth2Config,
|
|
log: slog::Logger,
|
|
) -> error_chain::Result<Service<ValidatorServiceClient, Keypair, E>> {
|
|
let server_url = format!(
|
|
"{}:{}",
|
|
client_config.server, client_config.server_grpc_port
|
|
);
|
|
|
|
let env = Arc::new(EnvBuilder::new().build());
|
|
// Beacon node gRPC beacon node endpoints.
|
|
let beacon_node_client = {
|
|
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
|
|
BeaconNodeServiceClient::new(ch)
|
|
};
|
|
|
|
// retrieve node information and validate the beacon node
|
|
let node_info = loop {
|
|
match beacon_node_client.info(&Empty::new()) {
|
|
Err(e) => {
|
|
let retry_seconds = 5;
|
|
warn!(
|
|
log,
|
|
"Could not connect to beacon node";
|
|
"error" => format!("{:?}", e),
|
|
"retry_in" => format!("{} seconds", retry_seconds),
|
|
);
|
|
std::thread::sleep(Duration::from_secs(retry_seconds));
|
|
continue;
|
|
}
|
|
Ok(info) => {
|
|
// verify the node's network id
|
|
if eth2_config.spec.network_id != info.network_id as u8 {
|
|
error!(
|
|
log,
|
|
"Beacon Node's genesis time is in the future. No work to do.\n Exiting"
|
|
);
|
|
return Err(format!("Beacon node has the wrong chain id. Expected chain id: {}, node's chain id: {}", eth2_config.spec.network_id, info.network_id).into());
|
|
}
|
|
break info;
|
|
}
|
|
};
|
|
};
|
|
|
|
// build requisite objects to form Self
|
|
let genesis_time = node_info.get_genesis_time();
|
|
let genesis_slot = Slot::from(node_info.get_genesis_slot());
|
|
|
|
info!(
|
|
log,
|
|
"Beacon node connected";
|
|
"version" => node_info.version.clone(),
|
|
"network_id" => node_info.network_id,
|
|
"genesis_time" => genesis_time
|
|
);
|
|
|
|
let proto_fork = node_info.get_fork();
|
|
let mut previous_version: [u8; 4] = [0; 4];
|
|
let mut current_version: [u8; 4] = [0; 4];
|
|
previous_version.copy_from_slice(&proto_fork.get_previous_version()[..4]);
|
|
current_version.copy_from_slice(&proto_fork.get_current_version()[..4]);
|
|
let fork = Fork {
|
|
previous_version,
|
|
current_version,
|
|
epoch: Epoch::from(proto_fork.get_epoch()),
|
|
};
|
|
|
|
// initialize the RPC clients
|
|
|
|
// Beacon node gRPC beacon block endpoints.
|
|
let beacon_block_client = {
|
|
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
|
|
let beacon_block_service_client = Arc::new(BeaconBlockServiceClient::new(ch));
|
|
// a wrapper around the service client to implement the beacon block node trait
|
|
Arc::new(BeaconBlockGrpcClient::new(beacon_block_service_client))
|
|
};
|
|
|
|
// Beacon node gRPC validator endpoints.
|
|
let validator_client = {
|
|
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
|
|
Arc::new(ValidatorServiceClient::new(ch))
|
|
};
|
|
|
|
//Beacon node gRPC attester endpoints.
|
|
let attestation_client = {
|
|
let ch = ChannelBuilder::new(env.clone()).connect(&server_url);
|
|
Arc::new(AttestationServiceClient::new(ch))
|
|
};
|
|
|
|
// build the validator slot clock
|
|
let slot_clock = SystemTimeSlotClock::new(
|
|
genesis_slot,
|
|
Duration::from_secs(genesis_time),
|
|
Duration::from_millis(eth2_config.spec.milliseconds_per_slot),
|
|
);
|
|
|
|
/* Generate the duties manager */
|
|
|
|
// Load generated keypairs
|
|
let keypairs = Arc::new(client_config.fetch_keys(&log)?);
|
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
|
|
|
// TODO: keypairs are randomly generated; they should be loaded from a file or generated.
|
|
// https://github.com/sigp/lighthouse/issues/160
|
|
//let keypairs = Arc::new(generate_deterministic_keypairs(8));
|
|
|
|
// Builds a mapping of Epoch -> Map(PublicKey, EpochDuty)
|
|
// where EpochDuty contains slot numbers and attestation data that each validator needs to
|
|
// produce work on.
|
|
let duties_map = RwLock::new(EpochDutiesMap::new(slots_per_epoch));
|
|
|
|
// builds a manager which maintains the list of current duties for all known validators
|
|
// and can check when a validator needs to perform a task.
|
|
let duties_manager = Arc::new(DutiesManager {
|
|
duties_map,
|
|
// these are abstract objects capable of signing
|
|
signers: keypairs,
|
|
beacon_node: validator_client,
|
|
});
|
|
|
|
let spec = Arc::new(eth2_config.spec);
|
|
|
|
Ok(Service {
|
|
fork,
|
|
slot_clock,
|
|
current_slot: RwLock::new(None),
|
|
slots_per_epoch,
|
|
spec,
|
|
duties_manager,
|
|
beacon_block_client,
|
|
attestation_client,
|
|
log,
|
|
_phantom: PhantomData,
|
|
})
|
|
}
|
|
}
|
|
|
|
impl<B: BeaconNodeDuties + 'static, S: Signer + 'static, E: EthSpec> Service<B, S, E> {
|
|
/// The execution logic that runs every slot.
|
|
// Errors are logged to output, and core execution continues unless fatal errors occur.
|
|
pub fn per_slot_execution(&self) -> error_chain::Result<()> {
|
|
/* get the new current slot and epoch */
|
|
self.update_current_slot()?;
|
|
|
|
/* check for new duties */
|
|
self.check_for_duties();
|
|
|
|
/* process any required duties for validators */
|
|
self.process_duties();
|
|
|
|
trace!(
|
|
self.log,
|
|
"Per slot execution finished";
|
|
);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Updates the known current slot and epoch.
|
|
fn update_current_slot(&self) -> error_chain::Result<()> {
|
|
let wall_clock_slot = self
|
|
.slot_clock
|
|
.now()
|
|
.ok_or_else::<error_chain::Error, _>(|| {
|
|
"Genesis is not in the past. Exiting.".into()
|
|
})?;
|
|
|
|
let wall_clock_epoch = wall_clock_slot.epoch(self.slots_per_epoch);
|
|
let mut current_slot = self.current_slot.write();
|
|
|
|
// this is a non-fatal error. If the slot clock repeats, the node could
|
|
// have been slow to process the previous slot and is now duplicating tasks.
|
|
// We ignore duplicated but raise a critical error.
|
|
if let Some(current_slot) = *current_slot {
|
|
if wall_clock_slot <= current_slot {
|
|
crit!(
|
|
self.log,
|
|
"The validator tried to duplicate a slot. Likely missed the previous slot"
|
|
);
|
|
return Err("Duplicate slot".into());
|
|
}
|
|
}
|
|
*current_slot = Some(wall_clock_slot);
|
|
info!(self.log, "Processing"; "slot" => wall_clock_slot.as_u64(), "epoch" => wall_clock_epoch.as_u64());
|
|
Ok(())
|
|
}
|
|
|
|
/// For all known validator keypairs, update any known duties from the beacon node.
|
|
fn check_for_duties(&self) {
|
|
let cloned_manager = self.duties_manager.clone();
|
|
let cloned_log = self.log.clone();
|
|
let current_epoch = self
|
|
.current_slot
|
|
.read()
|
|
.expect("The current slot must be updated before checking for duties")
|
|
.epoch(self.slots_per_epoch);
|
|
|
|
trace!(
|
|
self.log,
|
|
"Checking for duties";
|
|
"epoch" => current_epoch
|
|
);
|
|
|
|
// spawn a new thread separate to the runtime
|
|
// TODO: Handle thread termination/timeout
|
|
// TODO: Add duties thread back in, with channel to process duties in duty change.
|
|
// leave sequential for now.
|
|
//std::thread::spawn(move || {
|
|
// the return value is a future which returns ready.
|
|
// built to be compatible with the tokio runtime.
|
|
let _empty = cloned_manager.run_update(current_epoch, cloned_log.clone());
|
|
//});
|
|
}
|
|
|
|
/// If there are any duties to process, spawn a separate thread and perform required actions.
|
|
fn process_duties(&self) {
|
|
if let Some(work) = self.duties_manager.get_current_work(
|
|
self.current_slot
|
|
.read()
|
|
.expect("The current slot must be updated before processing duties"),
|
|
) {
|
|
trace!(
|
|
self.log,
|
|
"Processing duties";
|
|
"work_items" => work.len()
|
|
);
|
|
|
|
for (signer_index, work_type) in work {
|
|
if work_type.produce_block {
|
|
// we need to produce a block
|
|
// spawns a thread to produce a beacon block
|
|
let signers = self.duties_manager.signers.clone(); // this is an arc
|
|
let fork = self.fork.clone();
|
|
let slot = self
|
|
.current_slot
|
|
.read()
|
|
.expect("The current slot must be updated before processing duties");
|
|
let spec = self.spec.clone();
|
|
let beacon_node = self.beacon_block_client.clone();
|
|
let log = self.log.clone();
|
|
let slots_per_epoch = self.slots_per_epoch;
|
|
std::thread::spawn(move || {
|
|
info!(
|
|
log,
|
|
"Producing a block";
|
|
"validator"=> format!("{}", signers[signer_index]),
|
|
"slot"=> slot
|
|
);
|
|
let signer = &signers[signer_index];
|
|
let mut block_producer = BlockProducer {
|
|
fork,
|
|
slot,
|
|
spec,
|
|
beacon_node,
|
|
signer,
|
|
slots_per_epoch,
|
|
_phantom: PhantomData::<E>,
|
|
log,
|
|
};
|
|
block_producer.handle_produce_block();
|
|
});
|
|
}
|
|
if work_type.attestation_duty.is_some() {
|
|
// we need to produce an attestation
|
|
// spawns a thread to produce and sign an attestation
|
|
let slot = self
|
|
.current_slot
|
|
.read()
|
|
.expect("The current slot must be updated before processing duties");
|
|
let signers = self.duties_manager.signers.clone(); // this is an arc
|
|
let fork = self.fork.clone();
|
|
let spec = self.spec.clone();
|
|
let beacon_node = self.attestation_client.clone();
|
|
let log = self.log.clone();
|
|
let slots_per_epoch = self.slots_per_epoch;
|
|
std::thread::spawn(move || {
|
|
info!(
|
|
log,
|
|
"Producing an attestation";
|
|
"validator"=> format!("{}", signers[signer_index]),
|
|
"slot"=> slot
|
|
);
|
|
let signer = &signers[signer_index];
|
|
let mut attestation_producer = AttestationProducer {
|
|
fork,
|
|
duty: work_type.attestation_duty.expect("Should never be none"),
|
|
spec,
|
|
beacon_node,
|
|
signer,
|
|
slots_per_epoch,
|
|
_phantom: PhantomData::<E>,
|
|
};
|
|
attestation_producer.handle_produce_attestation(log);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|