mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-19 21:04:41 +00:00
Stable futures (#879)
* Port eth1 lib to use stable futures * Port eth1_test_rig to stable futures * Port eth1 tests to stable futures * Port genesis service to stable futures * Port genesis tests to stable futures * Port beacon_chain to stable futures * Port lcli to stable futures * Fix eth1_test_rig (#1014) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Update hashmap hashset to stable futures * Adds panic test to hashset delay * Port remote_beacon_node to stable futures * Fix lcli merge conflicts * Non rpc stuff compiles * protocol.rs compiles * Port websockets, timer and notifier to stable futures (#1035) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Port remote_beacon_node to stable futures * Partial eth2-libp2p stable future upgrade * Finished first round of fighting RPC types * Further progress towards porting eth2-libp2p adds caching to discovery * Update behaviour * RPC handler to stable futures * Update RPC to master libp2p * Network service additions * Fix the fallback transport construction (#1102) * Correct warning * Remove hashmap delay * Compiling version of eth2-libp2p * Update all crates versions * Fix conversion function and add tests (#1113) * Port validator_client to stable futures (#1114) * Add PH & MS slot clock changes * Account for genesis time * Add progress on duties refactor * Add simple is_aggregator bool to val subscription * Start work on attestation_verification.rs * Add progress on ObservedAttestations * Progress with ObservedAttestations * Fix tests * Add observed attestations to the beacon chain * Add attestation observation to processing code * Add progress on attestation verification * Add first draft of ObservedAttesters * Add more tests * Add observed attesters to beacon chain * Add observers to attestation processing * Add more attestation verification * Create ObservedAggregators map * Remove commented-out code * Add observed aggregators into chain * Add progress * Finish adding features to attestation verification * Ensure beacon chain compiles * Link attn verification into chain * Integrate new attn verification in chain * Remove old attestation processing code * Start trying to fix beacon_chain tests * Split adding into pools into two functions * Add aggregation to harness * Get test harness working again * Adjust the number of aggregators for test harness * Fix edge-case in harness * Integrate new attn processing in network * Fix compile bug in validator_client * Update validator API endpoints * Fix aggreagation in test harness * Fix enum thing * Fix attestation observation bug: * Patch failing API tests * Start adding comments to attestation verification * Remove unused attestation field * Unify "is block known" logic * Update comments * Supress fork choice errors for network processing * Add todos * Tidy * Add gossip attn tests * Disallow test harness to produce old attns * Comment out in-progress tests * Partially address pruning tests * Fix failing store test * Add aggregate tests * Add comments about which spec conditions we check * Dont re-aggregate * Split apart test harness attn production * Fix compile error in network * Make progress on commented-out test * Fix skipping attestation test * Add fork choice verification tests * Tidy attn tests, remove dead code * Remove some accidentally added code * Fix clippy lint * Rename test file * Add block tests, add cheap block proposer check * Rename block testing file * Add observed_block_producers * Tidy * Switch around block signature verification * Finish block testing * Remove gossip from signature tests * First pass of self review * Fix deviation in spec * Update test spec tags * Start moving over to hashset * Finish moving observed attesters to hashmap * Move aggregation pool over to hashmap * Make fc attn borrow again * Fix rest_api compile error * Fix missing comments * Fix monster test * Uncomment increasing slots test * Address remaining comments * Remove unsafe, use cfg test * Remove cfg test flag * Fix dodgy comment * Revert "Update hashmap hashset to stable futures" This reverts commitd432378a3c. * Revert "Adds panic test to hashset delay" This reverts commit281502396f. * Ported attestation_service * Ported duties_service * Ported fork_service * More ports * Port block_service * Minor fixes * VC compiles * Update TODOS * Borrow self where possible * Ignore aggregates that are already known. * Unify aggregator modulo logic * Fix typo in logs * Refactor validator subscription logic * Avoid reproducing selection proof * Skip HTTP call if no subscriptions * Rename DutyAndState -> DutyAndProof * Tidy logs * Print root as dbg * Fix compile errors in tests * Fix compile error in test * Re-Fix attestation and duties service * Minor fixes Co-authored-by: Paul Hauner <paul@paulhauner.com> * Network crate update to stable futures * Port account_manager to stable futures (#1121) * Port account_manager to stable futures * Run async fns in tokio environment * Port rest_api crate to stable futures (#1118) * Port rest_api lib to stable futures * Reduce tokio features * Update notifier to stable futures * Builder update * Further updates * Convert self referential async functions * stable futures fixes (#1124) * Fix eth1 update functions * Fix genesis and client * Fix beacon node lib * Return appropriate runtimes from environment * Fix test rig * Refactor eth1 service update * Upgrade simulator to stable futures * Lighthouse compiles on stable futures * Remove println debugging statement * Update libp2p service, start rpc test upgrade * Update network crate for new libp2p * Update tokio::codec to futures_codec (#1128) * Further work towards RPC corrections * Correct http timeout and network service select * Use tokio runtime for libp2p * Revert "Update tokio::codec to futures_codec (#1128)" This reverts commite57aea924a. * Upgrade RPC libp2p tests * Upgrade secio fallback test * Upgrade gossipsub examples * Clean up RPC protocol * Test fixes (#1133) * Correct websocket timeout and run on os thread * Fix network test * Clean up PR * Correct tokio tcp move attestation service tests * Upgrade attestation service tests * Correct network test * Correct genesis test * Test corrections * Log info when block is received * Modify logs and update attester service events * Stable futures: fixes to vc, eth1 and account manager (#1142) * Add local testnet scripts * Remove whiteblock script * Rename local testnet script * Move spawns onto handle * Fix VC panic * Initial fix to block production issue * Tidy block producer fix * Tidy further * Add local testnet clean script * Run cargo fmt * Tidy duties service * Tidy fork service * Tidy ForkService * Tidy AttestationService * Tidy notifier * Ensure await is not suppressed in eth1 * Ensure await is not suppressed in account_manager * Use .ok() instead of .unwrap_or(()) * RPC decoding test for proto * Update discv5 and eth2-libp2p deps * Fix lcli double runtime issue (#1144) * Handle stream termination and dialing peer errors * Correct peer_info variant types * Remove unnecessary warnings * Handle subnet unsubscription removal and improve logigng * Add logs around ping * Upgrade discv5 and improve logging * Handle peer connection status for multiple connections * Improve network service logging * Improve logging around peer manager * Upgrade swarm poll centralise peer management * Identify clients on error * Fix `remove_peer` in sync (#1150) * remove_peer removes from all chains * Remove logs * Fix early return from loop * Improved logging, fix panic * Partially correct tests * Stable futures: Vc sync (#1149) * Improve syncing heuristic * Add comments * Use safer method for tolerance * Fix tests * Stable futures: Fix VC bug, update agg pool, add more metrics (#1151) * Expose epoch processing summary * Expose participation metrics to prometheus * Switch to f64 * Reduce precision * Change precision * Expose observed attesters metrics * Add metrics for agg/unagg attn counts * Add metrics for gossip rx * Add metrics for gossip tx * Adds ignored attns to prom * Add attestation timing * Add timer for aggregation pool sig agg * Add write lock timer for agg pool * Add more metrics to agg pool * Change map lock code * Add extra metric to agg pool * Change lock handling in agg pool * Change .write() to .read() * Add another agg pool timer * Fix for is_aggregator * Fix pruning bug Co-authored-by: pawan <pawandhananjay@gmail.com> Co-authored-by: Paul Hauner <paul@paulhauner.com>
This commit is contained in:
@@ -7,15 +7,15 @@ edition = "2018"
|
||||
[dependencies]
|
||||
milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.0.1" }
|
||||
eth2_hashing = "0.1.0"
|
||||
hex = "0.3"
|
||||
rand = "0.7.2"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
hex = "0.4.2"
|
||||
rand = "0.7.3"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
serde_hex = { path = "../serde_hex" }
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_ssz_types = { path = "../ssz_types" }
|
||||
tree_hash = "0.1.0"
|
||||
arbitrary = { version = "0.4", features = ["derive"], optional = true }
|
||||
arbitrary = { version = "0.4.4", features = ["derive"], optional = true }
|
||||
|
||||
[features]
|
||||
fake_crypto = []
|
||||
|
||||
@@ -5,17 +5,17 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.9"
|
||||
ethereum-types = "0.9.1"
|
||||
eth2_ssz_types = { path = "../ssz_types" }
|
||||
eth2_hashing = "0.1"
|
||||
eth2_hashing = "0.1.0"
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
eth2_ssz = "0.1.2"
|
||||
tree_hash = "0.1"
|
||||
smallvec = "1.2.0"
|
||||
tree_hash = "0.1.0"
|
||||
smallvec = "1.4.0"
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "0.9"
|
||||
quickcheck_macros = "0.8"
|
||||
quickcheck = "0.9.2"
|
||||
quickcheck_macros = "0.9.1"
|
||||
|
||||
[features]
|
||||
arbitrary = ["ethereum-types/arbitrary"]
|
||||
|
||||
@@ -8,8 +8,8 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
hex = "0.3"
|
||||
dirs = "2.0"
|
||||
hex = "0.4.2"
|
||||
dirs = "2.0.2"
|
||||
types = { path = "../../types" }
|
||||
eth2_testnet_config = { path = "../eth2_testnet_config" }
|
||||
eth2_ssz = { path = "../ssz" }
|
||||
eth2_ssz = "0.1.2"
|
||||
|
||||
@@ -8,5 +8,5 @@ edition = "2018"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
syn = "0.15"
|
||||
quote = "0.6"
|
||||
syn = "1.0.18"
|
||||
quote = "1.0.4"
|
||||
|
||||
@@ -8,7 +8,7 @@ use syn::{parse_macro_input, DeriveInput};
|
||||
fn is_slice(field: &syn::Field) -> bool {
|
||||
field.attrs.iter().any(|attr| {
|
||||
attr.path.is_ident("compare_fields")
|
||||
&& attr.tts.to_string().replace(" ", "") == "(as_slice)"
|
||||
&& attr.tokens.to_string().replace(" ", "") == "(as_slice)"
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,11 @@ edition = "2018"
|
||||
build = "build.rs"
|
||||
|
||||
[build-dependencies]
|
||||
reqwest = "0.9.20"
|
||||
serde_json = "1.0"
|
||||
reqwest = { version = "0.10.4", features = ["blocking", "json"] }
|
||||
serde_json = "1.0.52"
|
||||
|
||||
[dependencies]
|
||||
types = { path = "../../types"}
|
||||
eth2_ssz = { path = "../ssz"}
|
||||
tree_hash = { path = "../tree_hash"}
|
||||
ethabi = "12.0"
|
||||
eth2_ssz = "0.1.2"
|
||||
tree_hash = "0.1.0"
|
||||
ethabi = "12.0.0"
|
||||
|
||||
@@ -56,8 +56,8 @@ pub fn download_deposit_contract(
|
||||
if abi_file.exists() {
|
||||
// Nothing to do.
|
||||
} else {
|
||||
match reqwest::get(url) {
|
||||
Ok(mut response) => {
|
||||
match reqwest::blocking::get(url) {
|
||||
Ok(response) => {
|
||||
let mut abi_file = File::create(abi_file)
|
||||
.map_err(|e| format!("Failed to create local abi file: {:?}", e))?;
|
||||
let mut bytecode_file = File::create(bytecode_file)
|
||||
|
||||
@@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
toml = "0.5.4"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
toml = "0.5.6"
|
||||
types = { path = "../../types" }
|
||||
|
||||
@@ -13,13 +13,13 @@ lazy_static = { version = "1.4.0", optional = true }
|
||||
ring = "0.16.9"
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
sha2 = "0.8.0"
|
||||
sha2 = "0.8.1"
|
||||
|
||||
[dev-dependencies]
|
||||
rustc-hex = "2.0.1"
|
||||
rustc-hex = "2.1.0"
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
|
||||
wasm-bindgen-test = "0.3.2"
|
||||
wasm-bindgen-test = "0.3.12"
|
||||
|
||||
[features]
|
||||
default = ["zero_hash_cache"]
|
||||
|
||||
@@ -8,13 +8,13 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.4.0"
|
||||
num-bigint = "0.2.3"
|
||||
num-bigint = "0.2.6"
|
||||
eth2_hashing = "0.1.0"
|
||||
hex = "0.3"
|
||||
hex = "0.4.2"
|
||||
milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.0.1" }
|
||||
serde_yaml = "0.8.11"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
|
||||
[dev-dependencies]
|
||||
base64 = "0.11.0"
|
||||
base64 = "0.12.1"
|
||||
|
||||
@@ -11,7 +11,7 @@ rand = "0.7.2"
|
||||
rust-crypto = "0.2.36"
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
zeroize = { version = "1.0.0", features = ["zeroize_derive"] }
|
||||
serde = "1.0.102"
|
||||
serde = "1.0.110"
|
||||
serde_repr = "0.1"
|
||||
hex = "0.3"
|
||||
bls = { path = "../bls" }
|
||||
|
||||
@@ -7,15 +7,14 @@ edition = "2018"
|
||||
build = "build.rs"
|
||||
|
||||
[build-dependencies]
|
||||
reqwest = "0.9.20"
|
||||
reqwest = { version = "0.10.4", features = ["blocking"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3"
|
||||
reqwest = "0.9.20"
|
||||
tempdir = "0.3.7"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0"
|
||||
serde_yaml = "0.8"
|
||||
serde = "1.0.110"
|
||||
serde_yaml = "0.8.11"
|
||||
types = { path = "../../types"}
|
||||
eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p"}
|
||||
eth2_ssz = { path = "../ssz"}
|
||||
eth2_ssz = "0.1.2"
|
||||
|
||||
@@ -46,13 +46,18 @@ pub fn get_file(filename: &str) -> Result<(), String> {
|
||||
let mut file =
|
||||
File::create(path).map_err(|e| format!("Failed to create {}: {:?}", filename, e))?;
|
||||
|
||||
let mut response = reqwest::get(&url)
|
||||
let request = reqwest::blocking::Client::builder()
|
||||
.build()
|
||||
.map_err(|_| "Could not build request client".to_string())?
|
||||
.get(&url)
|
||||
.timeout(std::time::Duration::from_secs(120));
|
||||
|
||||
let contents = request
|
||||
.send()
|
||||
.map_err(|e| format!("Failed to download {}: {}", filename, e))?
|
||||
.error_for_status()
|
||||
.map_err(|e| format!("Error downloading {}: {}", filename, e))?;
|
||||
let mut contents: Vec<u8> = vec![];
|
||||
response
|
||||
.copy_to(&mut contents)
|
||||
.map_err(|e| format!("Error downloading {}: {}", filename, e))?
|
||||
.bytes()
|
||||
.map_err(|e| format!("Failed to read {} response bytes: {}", filename, e))?;
|
||||
|
||||
file.write(&contents)
|
||||
|
||||
@@ -7,7 +7,7 @@ edition = "2018"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0.102"
|
||||
serde = "1.0.110"
|
||||
serde_json = "1.0.41"
|
||||
serde_repr = "0.1"
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
[package]
|
||||
name = "hashmap_delay"
|
||||
version = "0.2.0"
|
||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
tokio-timer = "0.2.12"
|
||||
futures = "0.1.29"
|
||||
@@ -1,161 +0,0 @@
|
||||
//! A simple hashmap object coupled with a `delay_queue` which has entries that expire after a
|
||||
//! fixed time.
|
||||
//!
|
||||
//! A `HashMapDelay` implements `Stream` which removes expired items from the map.
|
||||
|
||||
/// The default delay for entries, in seconds. This is only used when `insert()` is used to add
|
||||
/// entries.
|
||||
const DEFAULT_DELAY: u64 = 30;
|
||||
|
||||
use futures::prelude::*;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
use tokio_timer::delay_queue::{self, DelayQueue};
|
||||
|
||||
pub struct HashMapDelay<K, V>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
{
|
||||
/// The given entries.
|
||||
entries: HashMap<K, MapEntry<V>>,
|
||||
/// A queue holding the timeouts of each entry.
|
||||
expirations: DelayQueue<K>,
|
||||
/// The default expiration timeout of an entry.
|
||||
default_entry_timeout: Duration,
|
||||
}
|
||||
|
||||
/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key.
|
||||
struct MapEntry<V> {
|
||||
/// The expiration key for the entry.
|
||||
key: delay_queue::Key,
|
||||
/// The actual entry.
|
||||
value: V,
|
||||
}
|
||||
|
||||
impl<K, V> Default for HashMapDelay<K, V>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
{
|
||||
fn default() -> Self {
|
||||
HashMapDelay::new(Duration::from_secs(DEFAULT_DELAY))
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> HashMapDelay<K, V>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
{
|
||||
/// Creates a new instance of `HashMapDelay`.
|
||||
pub fn new(default_entry_timeout: Duration) -> Self {
|
||||
HashMapDelay {
|
||||
entries: HashMap::new(),
|
||||
expirations: DelayQueue::new(),
|
||||
default_entry_timeout,
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`.
|
||||
pub fn insert(&mut self, key: K, value: V) {
|
||||
self.insert_at(key, value, self.default_entry_timeout);
|
||||
}
|
||||
|
||||
/// Inserts an entry that will expire at a given instant.
|
||||
pub fn insert_at(&mut self, key: K, value: V, entry_duration: Duration) {
|
||||
let delay_key = self.expirations.insert(key.clone(), entry_duration);
|
||||
let entry = MapEntry {
|
||||
key: delay_key,
|
||||
value,
|
||||
};
|
||||
self.entries.insert(key, entry);
|
||||
}
|
||||
|
||||
/// Gets a reference to an entry if it exists.
|
||||
///
|
||||
/// Returns None if the entry does not exist.
|
||||
pub fn get(&self, key: &K) -> Option<&V> {
|
||||
self.entries.get(key).map(|entry| &entry.value)
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to an entry if it exists.
|
||||
///
|
||||
/// Returns None if the entry does not exist.
|
||||
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
|
||||
self.entries.get_mut(key).map(|entry| &mut entry.value)
|
||||
}
|
||||
|
||||
/// Returns true if the key exists, false otherwise.
|
||||
pub fn contains_key(&self, key: &K) -> bool {
|
||||
self.entries.contains_key(key)
|
||||
}
|
||||
|
||||
/// Returns the length of the mapping.
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Updates the timeout for a given key. Returns true if the key existed, false otherwise.
|
||||
///
|
||||
/// Panics if the duration is too far in the future.
|
||||
pub fn update_timeout(&mut self, key: &K, timeout: Duration) -> bool {
|
||||
if let Some(entry) = self.entries.get(key) {
|
||||
self.expirations.reset(&entry.key, timeout);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes a key from the map returning the value associated with the key that was in the map.
|
||||
///
|
||||
/// Return None if the key was not in the map.
|
||||
pub fn remove(&mut self, key: &K) -> Option<V> {
|
||||
if let Some(entry) = self.entries.remove(key) {
|
||||
self.expirations.remove(&entry.key);
|
||||
return Some(entry.value);
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
/// Retains only the elements specified by the predicate.
|
||||
///
|
||||
/// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false.
|
||||
pub fn retain<F: FnMut(&K, &mut V) -> bool>(&mut self, mut f: F) {
|
||||
let expiration = &mut self.expirations;
|
||||
self.entries.retain(|key, entry| {
|
||||
let result = f(key, &mut entry.value);
|
||||
if !result {
|
||||
expiration.remove(&entry.key);
|
||||
}
|
||||
result
|
||||
})
|
||||
}
|
||||
|
||||
/// Removes all entries from the map.
|
||||
pub fn clear(&mut self) {
|
||||
self.entries.clear();
|
||||
self.expirations.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Stream for HashMapDelay<K, V>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
{
|
||||
type Item = (K, V);
|
||||
type Error = &'static str;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
match self.expirations.poll() {
|
||||
Ok(Async::Ready(Some(key))) => {
|
||||
let key = key.into_inner();
|
||||
match self.entries.remove(&key) {
|
||||
Some(entry) => Ok(Async::Ready(Some((key, entry.value)))),
|
||||
None => Err("Value no longer exists in expirations"),
|
||||
}
|
||||
}
|
||||
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_) => Err("Error polling HashMapDelay"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
//! This crate provides two objects:
|
||||
//! - `HashMapDelay`
|
||||
//! - `HashSetDelay`
|
||||
//!
|
||||
//! # HashMapDelay
|
||||
//!
|
||||
//! This provides a `HashMap` coupled with a `DelayQueue`. Objects that are inserted into
|
||||
//! the map are inserted with an expiry. `Stream` is implemented on the `HashMapDelay`
|
||||
//! which return objects that have expired. These objects are removed from the mapping.
|
||||
//!
|
||||
//! # HashSetDelay
|
||||
//!
|
||||
//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This
|
||||
//! allows users to add objects and check their expiry deadlines before the `Stream`
|
||||
//! consumes them.
|
||||
|
||||
mod hashmap_delay;
|
||||
mod hashset_delay;
|
||||
|
||||
pub use crate::hashmap_delay::HashMapDelay;
|
||||
pub use crate::hashset_delay::HashSetDelay;
|
||||
12
eth2/utils/hashset_delay/Cargo.toml
Normal file
12
eth2/utils/hashset_delay/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "hashset_delay"
|
||||
version = "0.2.0"
|
||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
futures = "0.3.5"
|
||||
tokio = { version = "0.2.20", features = ["time"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.20", features = ["time", "rt-threaded", "macros"] }
|
||||
@@ -6,13 +6,17 @@
|
||||
const DEFAULT_DELAY: u64 = 30;
|
||||
|
||||
use futures::prelude::*;
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio_timer::delay_queue::{self, DelayQueue};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::time::delay_queue::{self, DelayQueue};
|
||||
|
||||
pub struct HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
/// The given entries.
|
||||
entries: HashMap<K, MapEntry>,
|
||||
@@ -32,7 +36,7 @@ struct MapEntry {
|
||||
|
||||
impl<K> Default for HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
fn default() -> Self {
|
||||
HashSetDelay::new(Duration::from_secs(DEFAULT_DELAY))
|
||||
@@ -41,7 +45,7 @@ where
|
||||
|
||||
impl<K> HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
/// Creates a new instance of `HashSetDelay`.
|
||||
pub fn new(default_entry_timeout: Duration) -> Self {
|
||||
@@ -134,30 +138,55 @@ where
|
||||
}
|
||||
|
||||
/// Returns a vector of referencing all keys in the map.
|
||||
pub fn keys_vec(&self) -> Vec<&K> {
|
||||
self.entries.keys().collect()
|
||||
pub fn keys(&self) -> impl Iterator<Item = &K> {
|
||||
self.entries.keys()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K> Stream for HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone,
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
type Item = K;
|
||||
type Error = &'static str;
|
||||
type Item = Result<K, String>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
match self.expirations.poll() {
|
||||
Ok(Async::Ready(Some(key))) => {
|
||||
let key = key.into_inner();
|
||||
match self.entries.remove(&key) {
|
||||
Some(_) => Ok(Async::Ready(Some(key))),
|
||||
None => Err("Value no longer exists in expirations"),
|
||||
}
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
match self.expirations.poll_expired(cx) {
|
||||
Poll::Ready(Some(Ok(key))) => match self.entries.remove(key.get_ref()) {
|
||||
Some(_) => Poll::Ready(Some(Ok(key.into_inner()))),
|
||||
None => Poll::Ready(Some(Err("Value no longer exists in expirations".into()))),
|
||||
},
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
Poll::Ready(Some(Err(format!("delay queue error: {:?}", e))))
|
||||
}
|
||||
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_) => Err("Error polling HashSetDelay"),
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn should_not_panic() {
|
||||
let key = 2u8;
|
||||
|
||||
let mut map = HashSetDelay::default();
|
||||
|
||||
map.insert(key);
|
||||
map.update_timeout(&key, Duration::from_secs(100));
|
||||
|
||||
let fut = |cx: &mut Context| {
|
||||
let _ = map.poll_next_unpin(cx);
|
||||
let _ = map.poll_next_unpin(cx);
|
||||
Poll::Ready(())
|
||||
};
|
||||
|
||||
future::poll_fn(fut).await;
|
||||
|
||||
map.insert(key);
|
||||
map.update_timeout(&key, Duration::from_secs(100));
|
||||
}
|
||||
}
|
||||
12
eth2/utils/hashset_delay/src/lib.rs
Normal file
12
eth2/utils/hashset_delay/src/lib.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
//! This crate provides a single type (its counter-part HashMapDelay has been removed as it
|
||||
//! currently is not in use in lighthouse):
|
||||
//! - `HashSetDelay`
|
||||
//!
|
||||
//! # HashSetDelay
|
||||
//!
|
||||
//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This
|
||||
//! allows users to add objects and check their expiry deadlines before the `Stream`
|
||||
//! consumes them.
|
||||
|
||||
mod hashset_delay;
|
||||
pub use crate::hashset_delay::HashSetDelay;
|
||||
@@ -5,8 +5,8 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bytes = "0.4.12"
|
||||
bytes = "0.5.4"
|
||||
|
||||
[dev-dependencies]
|
||||
yaml-rust = "0.4.3"
|
||||
hex = "0.3"
|
||||
hex = "0.4.2"
|
||||
|
||||
@@ -8,4 +8,4 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.4.0"
|
||||
prometheus = "0.7.0"
|
||||
prometheus = "0.8.0"
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
|
||||
use prometheus::{HistogramOpts, HistogramTimer, Opts};
|
||||
|
||||
pub use prometheus::{Encoder, Histogram, IntCounter, IntGauge, Result, TextEncoder};
|
||||
pub use prometheus::{Encoder, Gauge, Histogram, IntCounter, IntGauge, Result, TextEncoder};
|
||||
|
||||
/// Collect all the metrics for reporting.
|
||||
pub fn gather() -> Vec<prometheus::proto::MetricFamily> {
|
||||
@@ -81,6 +81,15 @@ pub fn try_create_int_gauge(name: &str, help: &str) -> Result<IntGauge> {
|
||||
Ok(gauge)
|
||||
}
|
||||
|
||||
/// Attempts to crate a `Gauge`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_float_gauge(name: &str, help: &str) -> Result<Gauge> {
|
||||
let opts = Opts::new(name, help);
|
||||
let gauge = Gauge::with_opts(opts)?;
|
||||
prometheus::register(Box::new(gauge.clone()))?;
|
||||
Ok(gauge)
|
||||
}
|
||||
|
||||
/// Attempts to crate a `Histogram`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_histogram(name: &str, help: &str) -> Result<Histogram> {
|
||||
@@ -124,6 +133,24 @@ pub fn set_gauge(gauge: &Result<IntGauge>, value: i64) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_set_gauge(gauge: &Result<IntGauge>, value_opt: Option<i64>) {
|
||||
if let Some(value) = value_opt {
|
||||
set_gauge(gauge, value)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_float_gauge(gauge: &Result<Gauge>, value: f64) {
|
||||
if let Ok(gauge) = gauge {
|
||||
gauge.set(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_set_float_gauge(gauge: &Result<Gauge>, value_opt: Option<f64>) {
|
||||
if let Some(value) = value_opt {
|
||||
set_float_gauge(gauge, value)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the value of a `Histogram` manually.
|
||||
pub fn observe(histogram: &Result<Histogram>, value: f64) {
|
||||
if let Ok(histogram) = histogram {
|
||||
|
||||
@@ -6,6 +6,6 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
slog = "2.5.2"
|
||||
slog-term = "2.4.2"
|
||||
slog-term = "2.5.0"
|
||||
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
||||
lazy_static = "1.4.0"
|
||||
|
||||
@@ -5,14 +5,14 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.9"
|
||||
ethereum-types = "0.9.1"
|
||||
eth2_hashing = "0.1.0"
|
||||
lazy_static = "1.4.0"
|
||||
safe_arith = { path = "../safe_arith" }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "0.9.0"
|
||||
quickcheck_macros = "0.8.0"
|
||||
quickcheck = "0.9.2"
|
||||
quickcheck_macros = "0.9.1"
|
||||
|
||||
[features]
|
||||
arbitrary = ["ethereum-types/arbitrary"]
|
||||
|
||||
@@ -7,15 +7,15 @@ edition = "2018"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
reqwest = "0.9"
|
||||
url = "1.2"
|
||||
serde = "1.0"
|
||||
futures = "0.1.25"
|
||||
reqwest = { version = "0.10.4", features = ["json"] }
|
||||
url = "2.1.1"
|
||||
serde = "1.0.110"
|
||||
futures = "0.3.5"
|
||||
types = { path = "../../../eth2/types" }
|
||||
rest_types = { path = "../rest_types" }
|
||||
hex = "0.3"
|
||||
eth2_ssz = { path = "../../../eth2/utils/ssz" }
|
||||
serde_json = "^1.0"
|
||||
hex = "0.4.2"
|
||||
eth2_ssz = "0.1.2"
|
||||
serde_json = "1.0.52"
|
||||
eth2_config = { path = "../../../eth2/utils/eth2_config" }
|
||||
proto_array_fork_choice = { path = "../../../eth2/proto_array_fork_choice" }
|
||||
operation_pool = { path = "../../../eth2/operation_pool" }
|
||||
|
||||
@@ -4,11 +4,7 @@
|
||||
//! Presently, this is only used for testing but it _could_ become a user-facing library.
|
||||
|
||||
use eth2_config::Eth2Config;
|
||||
use futures::{future, Future, IntoFuture};
|
||||
use reqwest::{
|
||||
r#async::{Client, ClientBuilder, Response},
|
||||
StatusCode,
|
||||
};
|
||||
use reqwest::{Client, ClientBuilder, Response, StatusCode};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use ssz::Encode;
|
||||
use std::marker::PhantomData;
|
||||
@@ -119,33 +115,33 @@ impl<E: EthSpec> HttpClient<E> {
|
||||
self.url.join(path).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn json_post<T: Serialize>(
|
||||
&self,
|
||||
url: Url,
|
||||
body: T,
|
||||
) -> impl Future<Item = Response, Error = Error> {
|
||||
pub async fn json_post<T: Serialize>(&self, url: Url, body: T) -> Result<Response, Error> {
|
||||
self.client
|
||||
.post(&url.to_string())
|
||||
.json(&body)
|
||||
.send()
|
||||
.await
|
||||
.map_err(Error::from)
|
||||
}
|
||||
|
||||
pub fn json_get<T: DeserializeOwned>(
|
||||
pub async fn json_get<T: DeserializeOwned>(
|
||||
&self,
|
||||
mut url: Url,
|
||||
query_pairs: Vec<(String, String)>,
|
||||
) -> impl Future<Item = T, Error = Error> {
|
||||
) -> Result<T, Error> {
|
||||
query_pairs.into_iter().for_each(|(key, param)| {
|
||||
url.query_pairs_mut().append_pair(&key, ¶m);
|
||||
});
|
||||
|
||||
self.client
|
||||
let response = self
|
||||
.client
|
||||
.get(&url.to_string())
|
||||
.send()
|
||||
.map_err(Error::from)
|
||||
.and_then(|response| error_for_status(response).map_err(Error::from))
|
||||
.and_then(|mut success| success.json::<T>().map_err(Error::from))
|
||||
.await
|
||||
.map_err(Error::from)?;
|
||||
|
||||
let success = error_for_status(response).await.map_err(Error::from)?;
|
||||
success.json::<T>().await.map_err(Error::from)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,18 +149,17 @@ impl<E: EthSpec> HttpClient<E> {
|
||||
///
|
||||
/// Distinct from `Response::error_for_status` because it includes the body of the response as
|
||||
/// text. This ensures the error message from the server is not discarded.
|
||||
fn error_for_status(
|
||||
mut response: Response,
|
||||
) -> Box<dyn Future<Item = Response, Error = Error> + Send> {
|
||||
async fn error_for_status(response: Response) -> Result<Response, Error> {
|
||||
let status = response.status();
|
||||
|
||||
if status.is_success() {
|
||||
Box::new(future::ok(response))
|
||||
return Ok(response);
|
||||
} else {
|
||||
Box::new(response.text().then(move |text_result| match text_result {
|
||||
let text_result = response.text().await;
|
||||
match text_result {
|
||||
Err(e) => Err(Error::ReqwestError(e)),
|
||||
Ok(body) => Err(Error::DidNotSucceed { status, body }),
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,94 +194,86 @@ impl<E: EthSpec> Validator<E> {
|
||||
}
|
||||
|
||||
/// Produces an unsigned attestation.
|
||||
pub fn produce_attestation(
|
||||
pub async fn produce_attestation(
|
||||
&self,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex,
|
||||
) -> impl Future<Item = Attestation<E>, Error = Error> {
|
||||
) -> Result<Attestation<E>, Error> {
|
||||
let query_params = vec![
|
||||
("slot".into(), format!("{}", slot)),
|
||||
("committee_index".into(), format!("{}", committee_index)),
|
||||
];
|
||||
|
||||
let client = self.0.clone();
|
||||
self.url("attestation")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, query_params))
|
||||
let url = self.url("attestation")?;
|
||||
client.json_get(url, query_params).await
|
||||
}
|
||||
|
||||
/// Produces an aggregate attestation.
|
||||
pub fn produce_aggregate_attestation(
|
||||
pub async fn produce_aggregate_attestation(
|
||||
&self,
|
||||
attestation_data: &AttestationData,
|
||||
) -> impl Future<Item = Attestation<E>, Error = Error> {
|
||||
) -> Result<Attestation<E>, Error> {
|
||||
let query_params = vec![(
|
||||
"attestation_data".into(),
|
||||
as_ssz_hex_string(attestation_data),
|
||||
)];
|
||||
|
||||
let client = self.0.clone();
|
||||
self.url("aggregate_attestation")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, query_params))
|
||||
let url = self.url("aggregate_attestation")?;
|
||||
client.json_get(url, query_params).await
|
||||
}
|
||||
|
||||
/// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network.
|
||||
pub fn publish_attestations(
|
||||
pub async fn publish_attestations(
|
||||
&self,
|
||||
attestation: Vec<Attestation<E>>,
|
||||
) -> impl Future<Item = PublishStatus, Error = Error> {
|
||||
) -> Result<PublishStatus, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("attestations")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_post::<_>(url, attestation))
|
||||
.and_then(|mut response| {
|
||||
response
|
||||
.text()
|
||||
.map(|text| (response, text))
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.and_then(|(response, text)| match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
})
|
||||
let url = self.url("attestations")?;
|
||||
let response = client.json_post::<_>(url, attestation).await?;
|
||||
|
||||
match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
|
||||
response.text().await.map_err(Error::from)?,
|
||||
)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
/// Posts a list of signed aggregates and proofs to the beacon node, expecting it to verify it and publish it to the network.
|
||||
pub fn publish_aggregate_and_proof(
|
||||
pub async fn publish_aggregate_and_proof(
|
||||
&self,
|
||||
signed_aggregate_and_proofs: Vec<SignedAggregateAndProof<E>>,
|
||||
) -> impl Future<Item = PublishStatus, Error = Error> {
|
||||
) -> Result<PublishStatus, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("aggregate_and_proofs")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_post::<_>(url, signed_aggregate_and_proofs))
|
||||
.and_then(|mut response| {
|
||||
response
|
||||
.text()
|
||||
.map(|text| (response, text))
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.and_then(|(response, text)| match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
})
|
||||
let url = self.url("aggregate_and_proofs")?;
|
||||
let response = client
|
||||
.json_post::<_>(url, signed_aggregate_and_proofs)
|
||||
.await?;
|
||||
|
||||
match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
|
||||
response.text().await.map_err(Error::from)?,
|
||||
)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the duties required of the given validator pubkeys in the given epoch.
|
||||
pub fn get_duties(
|
||||
pub async fn get_duties(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
validator_pubkeys: &[PublicKey],
|
||||
) -> impl Future<Item = Vec<ValidatorDutyBytes>, Error = Error> {
|
||||
) -> Result<Vec<ValidatorDutyBytes>, Error> {
|
||||
let client = self.0.clone();
|
||||
|
||||
let bulk_request = ValidatorDutiesRequest {
|
||||
@@ -297,79 +284,68 @@ impl<E: EthSpec> Validator<E> {
|
||||
.collect(),
|
||||
};
|
||||
|
||||
self.url("duties")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_post::<_>(url, bulk_request))
|
||||
.and_then(|response| error_for_status(response).map_err(Error::from))
|
||||
.and_then(|mut success| success.json().map_err(Error::from))
|
||||
let url = self.url("duties")?;
|
||||
let response = client.json_post::<_>(url, bulk_request).await?;
|
||||
let success = error_for_status(response).await.map_err(Error::from)?;
|
||||
success.json().await.map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Posts a block to the beacon node, expecting it to verify it and publish it to the network.
|
||||
pub fn publish_block(
|
||||
&self,
|
||||
block: SignedBeaconBlock<E>,
|
||||
) -> impl Future<Item = PublishStatus, Error = Error> {
|
||||
pub async fn publish_block(&self, block: SignedBeaconBlock<E>) -> Result<PublishStatus, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("block")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_post::<_>(url, block))
|
||||
.and_then(|mut response| {
|
||||
response
|
||||
.text()
|
||||
.map(|text| (response, text))
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.and_then(|(response, text)| match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
})
|
||||
let url = self.url("block")?;
|
||||
let response = client.json_post::<_>(url, block).await?;
|
||||
|
||||
match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
|
||||
response.text().await.map_err(Error::from)?,
|
||||
)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
/// Requests a new (unsigned) block from the beacon node.
|
||||
pub fn produce_block(
|
||||
pub async fn produce_block(
|
||||
&self,
|
||||
slot: Slot,
|
||||
randao_reveal: Signature,
|
||||
) -> impl Future<Item = BeaconBlock<E>, Error = Error> {
|
||||
) -> Result<BeaconBlock<E>, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("block").into_future().and_then(move |url| {
|
||||
client.json_get::<BeaconBlock<E>>(
|
||||
let url = self.url("block")?;
|
||||
client
|
||||
.json_get::<BeaconBlock<E>>(
|
||||
url,
|
||||
vec![
|
||||
("slot".into(), format!("{}", slot.as_u64())),
|
||||
("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)),
|
||||
],
|
||||
)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Subscribes a list of validators to particular slots for attestation production/publication.
|
||||
pub fn subscribe(
|
||||
pub async fn subscribe(
|
||||
&self,
|
||||
subscriptions: Vec<ValidatorSubscription>,
|
||||
) -> impl Future<Item = PublishStatus, Error = Error> {
|
||||
) -> Result<PublishStatus, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("subscribe")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_post::<_>(url, subscriptions))
|
||||
.and_then(|mut response| {
|
||||
response
|
||||
.text()
|
||||
.map(|text| (response, text))
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.and_then(|(response, text)| match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
})
|
||||
let url = self.url("subscribe")?;
|
||||
let response = client.json_post::<_>(url, subscriptions).await?;
|
||||
|
||||
match response.status() {
|
||||
StatusCode::OK => Ok(PublishStatus::Valid),
|
||||
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
|
||||
response.text().await.map_err(Error::from)?,
|
||||
)),
|
||||
_ => response
|
||||
.error_for_status()
|
||||
.map_err(Error::from)
|
||||
.map(|_| PublishStatus::Unknown),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,120 +362,116 @@ impl<E: EthSpec> Beacon<E> {
|
||||
}
|
||||
|
||||
/// Returns the genesis time.
|
||||
pub fn get_genesis_time(&self) -> impl Future<Item = u64, Error = Error> {
|
||||
pub async fn get_genesis_time(&self) -> Result<u64, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("genesis_time")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("genesis_time")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
|
||||
/// Returns the genesis validators root.
|
||||
pub fn get_genesis_validators_root(&self) -> impl Future<Item = Hash256, Error = Error> {
|
||||
pub async fn get_genesis_validators_root(&self) -> Result<Hash256, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("genesis_validators_root")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("genesis_validators_root")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
|
||||
/// Returns the fork at the head of the beacon chain.
|
||||
pub fn get_fork(&self) -> impl Future<Item = Fork, Error = Error> {
|
||||
pub async fn get_fork(&self) -> Result<Fork, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("fork")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("fork")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
|
||||
/// Returns info about the head of the canonical beacon chain.
|
||||
pub fn get_head(&self) -> impl Future<Item = CanonicalHeadResponse, Error = Error> {
|
||||
pub async fn get_head(&self) -> Result<CanonicalHeadResponse, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("head")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get::<CanonicalHeadResponse>(url, vec![]))
|
||||
let url = self.url("head")?;
|
||||
client.json_get::<CanonicalHeadResponse>(url, vec![]).await
|
||||
}
|
||||
|
||||
/// Returns the set of known beacon chain head blocks. One of these will be the canonical head.
|
||||
pub fn get_heads(&self) -> impl Future<Item = Vec<HeadBeaconBlock>, Error = Error> {
|
||||
pub async fn get_heads(&self) -> Result<Vec<HeadBeaconBlock>, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("heads")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("heads")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
|
||||
/// Returns the block and block root at the given slot.
|
||||
pub fn get_block_by_slot(
|
||||
pub async fn get_block_by_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
) -> impl Future<Item = (SignedBeaconBlock<E>, Hash256), Error = Error> {
|
||||
) -> Result<(SignedBeaconBlock<E>, Hash256), Error> {
|
||||
self.get_block("slot".to_string(), format!("{}", slot.as_u64()))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns the block and block root at the given root.
|
||||
pub fn get_block_by_root(
|
||||
pub async fn get_block_by_root(
|
||||
&self,
|
||||
root: Hash256,
|
||||
) -> impl Future<Item = (SignedBeaconBlock<E>, Hash256), Error = Error> {
|
||||
) -> Result<(SignedBeaconBlock<E>, Hash256), Error> {
|
||||
self.get_block("root".to_string(), root_as_string(root))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns the block and block root at the given slot.
|
||||
fn get_block(
|
||||
async fn get_block(
|
||||
&self,
|
||||
query_key: String,
|
||||
query_param: String,
|
||||
) -> impl Future<Item = (SignedBeaconBlock<E>, Hash256), Error = Error> {
|
||||
) -> Result<(SignedBeaconBlock<E>, Hash256), Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("block")
|
||||
.into_future()
|
||||
.and_then(move |url| {
|
||||
client.json_get::<BlockResponse<E>>(url, vec![(query_key, query_param)])
|
||||
})
|
||||
let url = self.url("block")?;
|
||||
client
|
||||
.json_get::<BlockResponse<E>>(url, vec![(query_key, query_param)])
|
||||
.await
|
||||
.map(|response| (response.beacon_block, response.root))
|
||||
}
|
||||
|
||||
/// Returns the state and state root at the given slot.
|
||||
pub fn get_state_by_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
) -> impl Future<Item = (BeaconState<E>, Hash256), Error = Error> {
|
||||
pub async fn get_state_by_slot(&self, slot: Slot) -> Result<(BeaconState<E>, Hash256), Error> {
|
||||
self.get_state("slot".to_string(), format!("{}", slot.as_u64()))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns the state and state root at the given root.
|
||||
pub fn get_state_by_root(
|
||||
pub async fn get_state_by_root(
|
||||
&self,
|
||||
root: Hash256,
|
||||
) -> impl Future<Item = (BeaconState<E>, Hash256), Error = Error> {
|
||||
) -> Result<(BeaconState<E>, Hash256), Error> {
|
||||
self.get_state("root".to_string(), root_as_string(root))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns the root of the state at the given slot.
|
||||
pub fn get_state_root(&self, slot: Slot) -> impl Future<Item = Hash256, Error = Error> {
|
||||
pub async fn get_state_root(&self, slot: Slot) -> Result<Hash256, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("state_root").into_future().and_then(move |url| {
|
||||
client.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))])
|
||||
})
|
||||
let url = self.url("state_root")?;
|
||||
client
|
||||
.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))])
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns the root of the block at the given slot.
|
||||
pub fn get_block_root(&self, slot: Slot) -> impl Future<Item = Hash256, Error = Error> {
|
||||
pub async fn get_block_root(&self, slot: Slot) -> Result<Hash256, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("block_root").into_future().and_then(move |url| {
|
||||
client.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))])
|
||||
})
|
||||
let url = self.url("block_root")?;
|
||||
client
|
||||
.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))])
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns the state and state root at the given slot.
|
||||
fn get_state(
|
||||
async fn get_state(
|
||||
&self,
|
||||
query_key: String,
|
||||
query_param: String,
|
||||
) -> impl Future<Item = (BeaconState<E>, Hash256), Error = Error> {
|
||||
) -> Result<(BeaconState<E>, Hash256), Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("state")
|
||||
.into_future()
|
||||
.and_then(move |url| {
|
||||
client.json_get::<StateResponse<E>>(url, vec![(query_key, query_param)])
|
||||
})
|
||||
let url = self.url("state")?;
|
||||
client
|
||||
.json_get::<StateResponse<E>>(url, vec![(query_key, query_param)])
|
||||
.await
|
||||
.map(|response| (response.beacon_state, response.root))
|
||||
}
|
||||
|
||||
@@ -507,11 +479,11 @@ impl<E: EthSpec> Beacon<E> {
|
||||
///
|
||||
/// If `state_root` is `Some`, the query will use the given state instead of the default
|
||||
/// canonical head state.
|
||||
pub fn get_validators(
|
||||
pub async fn get_validators(
|
||||
&self,
|
||||
validator_pubkeys: Vec<PublicKey>,
|
||||
state_root: Option<Hash256>,
|
||||
) -> impl Future<Item = Vec<ValidatorResponse>, Error = Error> {
|
||||
) -> Result<Vec<ValidatorResponse>, Error> {
|
||||
let client = self.0.clone();
|
||||
|
||||
let bulk_request = ValidatorRequest {
|
||||
@@ -522,21 +494,20 @@ impl<E: EthSpec> Beacon<E> {
|
||||
.collect(),
|
||||
};
|
||||
|
||||
self.url("validators")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_post::<_>(url, bulk_request))
|
||||
.and_then(|response| error_for_status(response).map_err(Error::from))
|
||||
.and_then(|mut success| success.json().map_err(Error::from))
|
||||
let url = self.url("validators")?;
|
||||
let response = client.json_post::<_>(url, bulk_request).await?;
|
||||
let success = error_for_status(response).await.map_err(Error::from)?;
|
||||
success.json().await.map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Returns all validators.
|
||||
///
|
||||
/// If `state_root` is `Some`, the query will use the given state instead of the default
|
||||
/// canonical head state.
|
||||
pub fn get_all_validators(
|
||||
pub async fn get_all_validators(
|
||||
&self,
|
||||
state_root: Option<Hash256>,
|
||||
) -> impl Future<Item = Vec<ValidatorResponse>, Error = Error> {
|
||||
) -> Result<Vec<ValidatorResponse>, Error> {
|
||||
let client = self.0.clone();
|
||||
|
||||
let query_params = if let Some(state_root) = state_root {
|
||||
@@ -545,19 +516,18 @@ impl<E: EthSpec> Beacon<E> {
|
||||
vec![]
|
||||
};
|
||||
|
||||
self.url("validators/all")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, query_params))
|
||||
let url = self.url("validators/all")?;
|
||||
client.json_get(url, query_params).await
|
||||
}
|
||||
|
||||
/// Returns the active validators.
|
||||
///
|
||||
/// If `state_root` is `Some`, the query will use the given state instead of the default
|
||||
/// canonical head state.
|
||||
pub fn get_active_validators(
|
||||
pub async fn get_active_validators(
|
||||
&self,
|
||||
state_root: Option<Hash256>,
|
||||
) -> impl Future<Item = Vec<ValidatorResponse>, Error = Error> {
|
||||
) -> Result<Vec<ValidatorResponse>, Error> {
|
||||
let client = self.0.clone();
|
||||
|
||||
let query_params = if let Some(state_root) = state_root {
|
||||
@@ -566,53 +536,42 @@ impl<E: EthSpec> Beacon<E> {
|
||||
vec![]
|
||||
};
|
||||
|
||||
self.url("validators/active")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, query_params))
|
||||
let url = self.url("validators/active")?;
|
||||
client.json_get(url, query_params).await
|
||||
}
|
||||
|
||||
/// Returns committees at the given epoch.
|
||||
pub fn get_committees(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
) -> impl Future<Item = Vec<Committee>, Error = Error> {
|
||||
pub async fn get_committees(&self, epoch: Epoch) -> Result<Vec<Committee>, Error> {
|
||||
let client = self.0.clone();
|
||||
|
||||
self.url("committees").into_future().and_then(move |url| {
|
||||
client.json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))])
|
||||
})
|
||||
let url = self.url("committees")?;
|
||||
client
|
||||
.json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))])
|
||||
.await
|
||||
}
|
||||
|
||||
pub fn proposer_slashing(
|
||||
pub async fn proposer_slashing(
|
||||
&self,
|
||||
proposer_slashing: ProposerSlashing,
|
||||
) -> impl Future<Item = bool, Error = Error> {
|
||||
) -> Result<bool, Error> {
|
||||
let client = self.0.clone();
|
||||
|
||||
self.url("proposer_slashing")
|
||||
.into_future()
|
||||
.and_then(move |url| {
|
||||
client
|
||||
.json_post::<_>(url, proposer_slashing)
|
||||
.and_then(|response| error_for_status(response).map_err(Error::from))
|
||||
.and_then(|mut success| success.json().map_err(Error::from))
|
||||
})
|
||||
let url = self.url("proposer_slashing")?;
|
||||
let response = client.json_post::<_>(url, proposer_slashing).await?;
|
||||
let success = error_for_status(response).await.map_err(Error::from)?;
|
||||
success.json().await.map_err(Error::from)
|
||||
}
|
||||
|
||||
pub fn attester_slashing(
|
||||
pub async fn attester_slashing(
|
||||
&self,
|
||||
attester_slashing: AttesterSlashing<E>,
|
||||
) -> impl Future<Item = bool, Error = Error> {
|
||||
) -> Result<bool, Error> {
|
||||
let client = self.0.clone();
|
||||
|
||||
self.url("attester_slashing")
|
||||
.into_future()
|
||||
.and_then(move |url| {
|
||||
client
|
||||
.json_post::<_>(url, attester_slashing)
|
||||
.and_then(|response| error_for_status(response).map_err(Error::from))
|
||||
.and_then(|mut success| success.json().map_err(Error::from))
|
||||
})
|
||||
let url = self.url("attester_slashing")?;
|
||||
let response = client.json_post::<_>(url, attester_slashing).await?;
|
||||
let success = error_for_status(response).await.map_err(Error::from)?;
|
||||
success.json().await.map_err(Error::from)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -628,11 +587,10 @@ impl<E: EthSpec> Spec<E> {
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_eth2_config(&self) -> impl Future<Item = Eth2Config, Error = Error> {
|
||||
pub async fn get_eth2_config(&self) -> Result<Eth2Config, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("eth2_config")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("eth2_config")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -648,18 +606,16 @@ impl<E: EthSpec> Node<E> {
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_version(&self) -> impl Future<Item = String, Error = Error> {
|
||||
pub async fn get_version(&self) -> Result<String, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("version")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("version")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
|
||||
pub fn syncing_status(&self) -> impl Future<Item = SyncingResponse, Error = Error> {
|
||||
pub async fn syncing_status(&self) -> Result<SyncingResponse, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("syncing")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("syncing")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -676,21 +632,17 @@ impl<E: EthSpec> Advanced<E> {
|
||||
}
|
||||
|
||||
/// Gets the core `ProtoArray` struct from the node.
|
||||
pub fn get_fork_choice(&self) -> impl Future<Item = ProtoArray, Error = Error> {
|
||||
pub async fn get_fork_choice(&self) -> Result<ProtoArray, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("fork_choice")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("fork_choice")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
|
||||
/// Gets the core `PersistedOperationPool` struct from the node.
|
||||
pub fn get_operation_pool(
|
||||
&self,
|
||||
) -> impl Future<Item = PersistedOperationPool<E>, Error = Error> {
|
||||
pub async fn get_operation_pool(&self) -> Result<PersistedOperationPool<E>, Error> {
|
||||
let client = self.0.clone();
|
||||
self.url("operation_pool")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, vec![]))
|
||||
let url = self.url("operation_pool")?;
|
||||
client.json_get(url, vec![]).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -707,31 +659,26 @@ impl<E: EthSpec> Consensus<E> {
|
||||
}
|
||||
|
||||
/// Gets a `IndividualVote` for each of the given `pubkeys`.
|
||||
pub fn get_individual_votes(
|
||||
pub async fn get_individual_votes(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
pubkeys: Vec<PublicKeyBytes>,
|
||||
) -> impl Future<Item = IndividualVotesResponse, Error = Error> {
|
||||
) -> Result<IndividualVotesResponse, Error> {
|
||||
let client = self.0.clone();
|
||||
let req_body = IndividualVotesRequest { epoch, pubkeys };
|
||||
|
||||
self.url("individual_votes")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_post::<_>(url, req_body))
|
||||
.and_then(|response| error_for_status(response).map_err(Error::from))
|
||||
.and_then(|mut success| success.json().map_err(Error::from))
|
||||
let url = self.url("individual_votes")?;
|
||||
let response = client.json_post::<_>(url, req_body).await?;
|
||||
let success = error_for_status(response).await.map_err(Error::from)?;
|
||||
success.json().await.map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Gets a `VoteCount` for the given `epoch`.
|
||||
pub fn get_vote_count(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
) -> impl Future<Item = IndividualVotesResponse, Error = Error> {
|
||||
pub async fn get_vote_count(&self, epoch: Epoch) -> Result<IndividualVotesResponse, Error> {
|
||||
let client = self.0.clone();
|
||||
let query_params = vec![("epoch".into(), format!("{}", epoch.as_u64()))];
|
||||
self.url("vote_count")
|
||||
.into_future()
|
||||
.and_then(move |url| client.json_get(url, query_params))
|
||||
let url = self.url("vote_count")?;
|
||||
client.json_get(url, query_params).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,11 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
types = { path = "../../types" }
|
||||
eth2_ssz_derive = { path = "../ssz_derive" }
|
||||
eth2_ssz = { path = "../ssz" }
|
||||
eth2_hashing = { path = "../eth2_hashing" }
|
||||
tree_hash = { path = "../tree_hash" }
|
||||
eth2_ssz_derive = "0.1.0"
|
||||
eth2_ssz = "0.1.2"
|
||||
eth2_hashing = "0.1.0"
|
||||
tree_hash = "0.1.0"
|
||||
state_processing = { path = "../../state_processing" }
|
||||
bls = { path = "../bls" }
|
||||
serde = { version = "1.0.102", features = ["derive"] }
|
||||
serde = { version = "1.0.110", features = ["derive"] }
|
||||
rayon = "1.3.0"
|
||||
|
||||
@@ -5,5 +5,5 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0.102"
|
||||
hex = "0.3"
|
||||
serde = "1.0.110"
|
||||
hex = "0.4.2"
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
use hex::ToHex;
|
||||
use serde::de::{self, Visitor};
|
||||
use std::fmt;
|
||||
|
||||
pub fn encode<T: AsRef<[u8]>>(data: T) -> String {
|
||||
let mut hex = String::with_capacity(data.as_ref().len() * 2);
|
||||
|
||||
// Writing to a string never errors, so we can unwrap here.
|
||||
data.write_hex(&mut hex).unwrap();
|
||||
|
||||
let hex = hex::encode(data);
|
||||
let mut s = "0x".to_string();
|
||||
|
||||
s.push_str(hex.as_str());
|
||||
|
||||
s
|
||||
}
|
||||
|
||||
|
||||
@@ -8,4 +8,4 @@ edition = "2018"
|
||||
types = { path = "../../types" }
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
||||
parking_lot = "0.9.0"
|
||||
parking_lot = "0.10.2"
|
||||
|
||||
@@ -14,7 +14,7 @@ eth2_ssz_derive = "0.1.0"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.9.1"
|
||||
smallvec = "1.2.0"
|
||||
smallvec = "1.4.0"
|
||||
|
||||
[features]
|
||||
arbitrary = ["ethereum-types/arbitrary"]
|
||||
|
||||
@@ -11,5 +11,5 @@ name = "ssz_derive"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
syn = "0.15"
|
||||
quote = "0.6"
|
||||
syn = "1.0.18"
|
||||
quote = "1.0.4"
|
||||
|
||||
@@ -54,7 +54,8 @@ fn get_serializable_field_types<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a
|
||||
/// The field attribute is: `#[ssz(skip_serializing)]`
|
||||
fn should_skip_serializing(field: &syn::Field) -> bool {
|
||||
field.attrs.iter().any(|attr| {
|
||||
attr.path.is_ident("ssz") && attr.tts.to_string().replace(" ", "") == "(skip_serializing)"
|
||||
attr.path.is_ident("ssz")
|
||||
&& attr.tokens.to_string().replace(" ", "") == "(skip_serializing)"
|
||||
})
|
||||
}
|
||||
|
||||
@@ -148,7 +149,8 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream {
|
||||
/// The field attribute is: `#[ssz(skip_deserializing)]`
|
||||
fn should_skip_deserializing(field: &syn::Field) -> bool {
|
||||
field.attrs.iter().any(|attr| {
|
||||
attr.path.is_ident("ssz") && attr.tts.to_string().replace(" ", "") == "(skip_deserializing)"
|
||||
attr.path.is_ident("ssz")
|
||||
&& attr.tokens.to_string().replace(" ", "") == "(skip_deserializing)"
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -9,13 +9,13 @@ name = "ssz_types"
|
||||
|
||||
[dependencies]
|
||||
tree_hash = "0.1.0"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.110"
|
||||
serde_hex = { path = "../serde_hex" }
|
||||
eth2_ssz = "0.1.2"
|
||||
typenum = "1.11.2"
|
||||
arbitrary = { version = "0.4", features = ["derive"], optional = true }
|
||||
typenum = "1.12.0"
|
||||
arbitrary = { version = "0.4.4", features = ["derive"], optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_yaml = "0.8.11"
|
||||
tree_hash_derive = "0.2"
|
||||
tree_hash_derive = "0.2.0"
|
||||
|
||||
@@ -9,9 +9,9 @@ name = "benches"
|
||||
harness = false
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.3.0"
|
||||
criterion = "0.3.2"
|
||||
yaml-rust = "0.4.3"
|
||||
hex = "0.3"
|
||||
hex = "0.4.2"
|
||||
|
||||
[dependencies]
|
||||
eth2_hashing = "0.1.0"
|
||||
|
||||
@@ -9,5 +9,5 @@ description = "Procedural derive macros for implementation of TestRandom trait"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
syn = "0.15"
|
||||
quote = "0.6"
|
||||
syn = "1.0.18"
|
||||
quote = "1.0.4"
|
||||
|
||||
@@ -10,7 +10,7 @@ use syn::{parse_macro_input, DeriveInput};
|
||||
/// The field attribute is: `#[test_random(default)]`
|
||||
fn should_use_default(field: &syn::Field) -> bool {
|
||||
field.attrs.iter().any(|attr| {
|
||||
attr.path.is_ident("test_random") && attr.tts.to_string().replace(" ", "") == "(default)"
|
||||
attr.path.is_ident("test_random") && attr.tokens.to_string().replace(" ", "") == "(default)"
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -11,16 +11,16 @@ name = "benches"
|
||||
harness = false
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.3.0"
|
||||
rand = "0.7.2"
|
||||
tree_hash_derive = "0.2"
|
||||
criterion = "0.3.2"
|
||||
rand = "0.7.3"
|
||||
tree_hash_derive = "0.2.0"
|
||||
types = { path = "../../types" }
|
||||
lazy_static = "1.4.0"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.9"
|
||||
ethereum-types = "0.9.1"
|
||||
eth2_hashing = "0.1.0"
|
||||
smallvec = "1.2.0"
|
||||
smallvec = "1.4.0"
|
||||
|
||||
[features]
|
||||
arbitrary = ["ethereum-types/arbitrary"]
|
||||
|
||||
@@ -10,5 +10,5 @@ license = "Apache-2.0"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
syn = "0.15"
|
||||
quote = "0.6"
|
||||
syn = "1.0.18"
|
||||
quote = "1.0.4"
|
||||
|
||||
@@ -51,7 +51,7 @@ fn get_cache_field_for(field: &syn::Field) -> Option<syn::Ident> {
|
||||
let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs);
|
||||
if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] {
|
||||
nested.iter().find_map(|x| match x {
|
||||
NestedMeta::Meta(Meta::Word(cache_field_ident)) => Some(cache_field_ident.clone()),
|
||||
NestedMeta::Meta(Meta::Path(path)) => path.get_ident().cloned(),
|
||||
_ => None,
|
||||
})
|
||||
} else {
|
||||
@@ -73,7 +73,8 @@ fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec<Meta> {
|
||||
/// The field attribute is: `#[tree_hash(skip_hashing)]`
|
||||
fn should_skip_hashing(field: &syn::Field) -> bool {
|
||||
field.attrs.iter().any(|attr| {
|
||||
attr.path.is_ident("tree_hash") && attr.tts.to_string().replace(" ", "") == "(skip_hashing)"
|
||||
attr.path.is_ident("tree_hash")
|
||||
&& attr.tokens.to_string().replace(" ", "") == "(skip_hashing)"
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user