mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-18 13:28:33 +00:00
Merge branch 'master' into proto-array + more changes
This commit is contained in:
@@ -25,9 +25,17 @@ pub enum Error {
|
||||
InvalidNodeDelta(usize),
|
||||
DeltaOverflow(usize),
|
||||
IndexOverflow(&'static str),
|
||||
InvalidDeltaLen { deltas: usize, indices: usize },
|
||||
InvalidDeltaLen {
|
||||
deltas: usize,
|
||||
indices: usize,
|
||||
},
|
||||
RevertedFinalizedEpoch,
|
||||
InvalidFindHeadStartRoot,
|
||||
InvalidFindHeadStartRoot {
|
||||
justified_epoch: Epoch,
|
||||
finalized_epoch: Epoch,
|
||||
node_justified_epoch: Epoch,
|
||||
node_finalized_epoch: Epoch,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Default, PartialEq, Clone, Encode, Decode)]
|
||||
@@ -159,10 +167,6 @@ impl ProtoArrayForkChoice {
|
||||
|
||||
let new_balances = justified_state_balances;
|
||||
|
||||
proto_array
|
||||
.maybe_prune(finalized_epoch, finalized_root)
|
||||
.map_err(|e| format!("find_head maybe_prune failed: {:?}", e))?;
|
||||
|
||||
let deltas = compute_deltas(
|
||||
&proto_array.indices,
|
||||
&mut votes,
|
||||
@@ -172,7 +176,7 @@ impl ProtoArrayForkChoice {
|
||||
.map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?;
|
||||
|
||||
proto_array
|
||||
.apply_score_changes(deltas, justified_epoch)
|
||||
.apply_score_changes(deltas, justified_epoch, finalized_epoch)
|
||||
.map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?;
|
||||
|
||||
*old_balances = new_balances.to_vec();
|
||||
|
||||
@@ -61,6 +61,7 @@ impl ProtoArray {
|
||||
&mut self,
|
||||
mut deltas: Vec<i64>,
|
||||
justified_epoch: Epoch,
|
||||
finalized_epoch: Epoch,
|
||||
) -> Result<(), Error> {
|
||||
if deltas.len() != self.indices.len() {
|
||||
return Err(Error::InvalidDeltaLen {
|
||||
@@ -73,9 +74,11 @@ impl ProtoArray {
|
||||
// finalized/justified epoch of all nodes against the epochs in `self`.
|
||||
//
|
||||
// This behaviour is equivalent to the `filter_block_tree` function in the spec.
|
||||
self.ffg_update_required = justified_epoch != self.justified_epoch;
|
||||
self.ffg_update_required =
|
||||
justified_epoch != self.justified_epoch || finalized_epoch != self.finalized_epoch;
|
||||
if self.ffg_update_required {
|
||||
self.justified_epoch = justified_epoch;
|
||||
self.finalized_epoch = finalized_epoch;
|
||||
}
|
||||
|
||||
// Iterate backwards through all indices in `self.nodes`.
|
||||
@@ -238,7 +241,7 @@ impl ProtoArray {
|
||||
|
||||
// If the blocks justified and finalized epochs match our values, then try and see if it
|
||||
// becomes the best child.
|
||||
if justified_epoch == self.justified_epoch && finalized_epoch == self.finalized_epoch {
|
||||
if self.node_is_viable_for_head(&node) {
|
||||
if let Some(parent_index) = node.parent {
|
||||
let parent = self
|
||||
.nodes
|
||||
@@ -285,10 +288,13 @@ impl ProtoArray {
|
||||
|
||||
// It is a logic error to try and find the head starting from a block that does not match
|
||||
// the filter.
|
||||
if justified_node.justified_epoch != self.justified_epoch
|
||||
|| justified_node.finalized_epoch != self.finalized_epoch
|
||||
{
|
||||
return Err(Error::InvalidFindHeadStartRoot);
|
||||
if !self.node_is_viable_for_head(&justified_node) {
|
||||
return Err(Error::InvalidFindHeadStartRoot {
|
||||
justified_epoch: self.justified_epoch,
|
||||
finalized_epoch: self.finalized_epoch,
|
||||
node_justified_epoch: justified_node.justified_epoch,
|
||||
node_finalized_epoch: justified_node.finalized_epoch,
|
||||
});
|
||||
}
|
||||
|
||||
let best_descendant_index = justified_node
|
||||
@@ -422,6 +428,8 @@ impl ProtoArray {
|
||||
/// Any node that has a different finalized or justified epoch should not be viable for the
|
||||
/// head.
|
||||
fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool {
|
||||
node.justified_epoch == self.justified_epoch && node.finalized_epoch == self.finalized_epoch
|
||||
(node.justified_epoch == self.justified_epoch || self.justified_epoch == Epoch::new(0))
|
||||
&& (node.finalized_epoch == self.finalized_epoch
|
||||
|| self.finalized_epoch == Epoch::new(0))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,19 +18,22 @@ pub fn initiate_validator_exit<T: EthSpec>(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure the exit cache is built.
|
||||
state.exit_cache.build(&state.validators, spec)?;
|
||||
|
||||
// Compute exit queue epoch
|
||||
let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec);
|
||||
let mut exit_queue_epoch = state
|
||||
.exit_cache
|
||||
.max_epoch()
|
||||
.max_epoch()?
|
||||
.map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch));
|
||||
let exit_queue_churn = state.exit_cache.get_churn_at(exit_queue_epoch);
|
||||
let exit_queue_churn = state.exit_cache.get_churn_at(exit_queue_epoch)?;
|
||||
|
||||
if exit_queue_churn >= state.get_churn_limit(spec)? {
|
||||
exit_queue_epoch += 1;
|
||||
}
|
||||
|
||||
state.exit_cache.record_validator_exit(exit_queue_epoch);
|
||||
state.exit_cache.record_validator_exit(exit_queue_epoch)?;
|
||||
state.validators[index].exit_epoch = exit_queue_epoch;
|
||||
state.validators[index].withdrawable_epoch =
|
||||
exit_queue_epoch + spec.min_validator_withdrawability_delay;
|
||||
|
||||
@@ -7,7 +7,7 @@ use types::*;
|
||||
/// Any of the `...Error` variants indicate that at some point during block (and block operation)
|
||||
/// verification, there was an error. There is no indication as to _where_ that error happened
|
||||
/// (e.g., when processing attestations instead of when processing deposits).
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum BlockProcessingError {
|
||||
RandaoSignatureInvalid,
|
||||
BulkSignatureVerificationFailed,
|
||||
@@ -122,7 +122,7 @@ pub type AttestationValidationError = BlockOperationError<AttestationInvalid>;
|
||||
pub type DepositValidationError = BlockOperationError<DepositInvalid>;
|
||||
pub type ExitValidationError = BlockOperationError<ExitInvalid>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum BlockOperationError<T> {
|
||||
Invalid(T),
|
||||
BeaconStateError(BeaconStateError),
|
||||
@@ -153,7 +153,7 @@ impl<T> From<ssz_types::Error> for BlockOperationError<T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum HeaderInvalid {
|
||||
ProposalSignatureInvalid,
|
||||
StateSlotMismatch,
|
||||
@@ -161,7 +161,7 @@ pub enum HeaderInvalid {
|
||||
ProposerSlashed(usize),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum ProposerSlashingInvalid {
|
||||
/// The proposer index is not a known validator.
|
||||
ProposerUnknown(u64),
|
||||
@@ -179,7 +179,7 @@ pub enum ProposerSlashingInvalid {
|
||||
BadProposal2Signature,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum AttesterSlashingInvalid {
|
||||
/// The attestations were not in conflict.
|
||||
NotSlashable,
|
||||
@@ -196,7 +196,7 @@ pub enum AttesterSlashingInvalid {
|
||||
}
|
||||
|
||||
/// Describes why an object is invalid.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum AttestationInvalid {
|
||||
/// Commmittee index exceeds number of committees in that slot.
|
||||
BadCommitteeIndex,
|
||||
@@ -251,7 +251,7 @@ impl From<BlockOperationError<IndexedAttestationInvalid>>
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum IndexedAttestationInvalid {
|
||||
/// The number of indices exceeds the global maximum.
|
||||
///
|
||||
@@ -270,7 +270,7 @@ pub enum IndexedAttestationInvalid {
|
||||
SignatureSetError(SignatureSetError),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum DepositInvalid {
|
||||
/// The signature (proof-of-possession) does not match the given pubkey.
|
||||
BadSignature,
|
||||
@@ -281,7 +281,7 @@ pub enum DepositInvalid {
|
||||
BadMerkleProof,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum ExitInvalid {
|
||||
/// The specified validator is not active.
|
||||
NotActive(u64),
|
||||
|
||||
@@ -14,7 +14,7 @@ use types::{
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum Error {
|
||||
/// Signature verification failed. The block is invalid.
|
||||
SignatureInvalid,
|
||||
@@ -248,10 +248,14 @@ fn validator_pubkey<'a, T: EthSpec>(
|
||||
.ok_or_else(|| Error::ValidatorUnknown(validator_index as u64))?
|
||||
.pubkey;
|
||||
|
||||
pubkey_bytes
|
||||
.try_into()
|
||||
.map(|pubkey: PublicKey| Cow::Owned(pubkey.as_raw().point.clone()))
|
||||
.map_err(|_| Error::BadBlsBytes {
|
||||
validator_index: validator_index as u64,
|
||||
})
|
||||
if let Some(pubkey) = pubkey_bytes.decompressed() {
|
||||
Ok(Cow::Borrowed(&pubkey.as_raw().point))
|
||||
} else {
|
||||
pubkey_bytes
|
||||
.try_into()
|
||||
.map(|pubkey: PublicKey| Cow::Owned(pubkey.as_raw().point.clone()))
|
||||
.map_err(|_| Error::BadBlsBytes {
|
||||
validator_index: validator_index as u64,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ mod tests;
|
||||
pub const CACHED_EPOCHS: usize = 3;
|
||||
const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum Error {
|
||||
EpochOutOfBounds,
|
||||
SlotOutOfBounds,
|
||||
@@ -58,9 +58,11 @@ pub enum Error {
|
||||
PreviousCommitteeCacheUninitialized,
|
||||
CurrentCommitteeCacheUninitialized,
|
||||
RelativeEpochError(RelativeEpochError),
|
||||
ExitCacheUninitialized,
|
||||
CommitteeCacheUninitialized(Option<RelativeEpoch>),
|
||||
SszTypesError(ssz_types::Error),
|
||||
CachedTreeHashError(cached_tree_hash::Error),
|
||||
InvalidValidatorPubkey(ssz::DecodeError),
|
||||
}
|
||||
|
||||
/// Control whether an epoch-indexed field can be indexed at the next epoch or not.
|
||||
@@ -464,6 +466,21 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
Ok(hash(&preimage))
|
||||
}
|
||||
|
||||
/// Get the canonical root of the `latest_block_header`, filling in its state root if necessary.
|
||||
///
|
||||
/// It needs filling in on all slots where there isn't a skip.
|
||||
///
|
||||
/// Spec v0.9.1
|
||||
pub fn get_latest_block_root(&self, current_state_root: Hash256) -> Hash256 {
|
||||
if self.latest_block_header.state_root.is_zero() {
|
||||
let mut latest_block_header = self.latest_block_header.clone();
|
||||
latest_block_header.state_root = current_state_root;
|
||||
latest_block_header.canonical_root()
|
||||
} else {
|
||||
self.latest_block_header.canonical_root()
|
||||
}
|
||||
}
|
||||
|
||||
/// Safely obtains the index for latest block roots, given some `slot`.
|
||||
///
|
||||
/// Spec v0.9.1
|
||||
@@ -764,13 +781,20 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Build all the caches, if they need to be built.
|
||||
pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
|
||||
self.build_all_committee_caches(spec)?;
|
||||
self.update_pubkey_cache()?;
|
||||
self.build_tree_hash_cache()?;
|
||||
self.exit_cache.build(&self.validators, spec)?;
|
||||
self.decompress_validator_pubkeys()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build all committee caches, if they need to be built.
|
||||
pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
|
||||
self.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
||||
self.build_committee_cache(RelativeEpoch::Current, spec)?;
|
||||
self.build_committee_cache(RelativeEpoch::Next, spec)?;
|
||||
self.update_pubkey_cache()?;
|
||||
self.build_tree_hash_cache()?;
|
||||
self.exit_cache.build_from_registry(&self.validators, spec);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -923,6 +947,23 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
self.tree_hash_cache = BeaconTreeHashCache::default();
|
||||
}
|
||||
|
||||
/// Iterate through all validators and decompress their public key, unless it has already been
|
||||
/// decompressed.
|
||||
///
|
||||
/// Does not check the validity of already decompressed keys.
|
||||
pub fn decompress_validator_pubkeys(&mut self) -> Result<(), Error> {
|
||||
self.validators.iter_mut().try_for_each(|validator| {
|
||||
if validator.pubkey.decompressed().is_none() {
|
||||
validator
|
||||
.pubkey
|
||||
.decompress()
|
||||
.map_err(|e| Error::InvalidValidatorPubkey(e))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clone_without_caches(&self) -> Self {
|
||||
BeaconState {
|
||||
genesis_time: self.genesis_time,
|
||||
|
||||
@@ -1,35 +1,68 @@
|
||||
use super::{ChainSpec, Epoch, Validator};
|
||||
use super::{BeaconStateError, ChainSpec, Epoch, Validator};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Map from exit epoch to the number of validators known to be exiting/exited at that epoch.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ExitCache(HashMap<Epoch, u64>);
|
||||
pub struct ExitCache {
|
||||
initialized: bool,
|
||||
exits_per_epoch: HashMap<Epoch, u64>,
|
||||
}
|
||||
|
||||
impl ExitCache {
|
||||
/// Ensure the cache is built, and do nothing if it's already initialized.
|
||||
pub fn build(
|
||||
&mut self,
|
||||
validators: &[Validator],
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), BeaconStateError> {
|
||||
if self.initialized {
|
||||
Ok(())
|
||||
} else {
|
||||
self.force_build(validators, spec)
|
||||
}
|
||||
}
|
||||
|
||||
/// Add all validators with a non-trivial exit epoch to the cache.
|
||||
pub fn build_from_registry(&mut self, validators: &[Validator], spec: &ChainSpec) {
|
||||
pub fn force_build(
|
||||
&mut self,
|
||||
validators: &[Validator],
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), BeaconStateError> {
|
||||
self.initialized = true;
|
||||
validators
|
||||
.iter()
|
||||
.filter(|validator| validator.exit_epoch != spec.far_future_epoch)
|
||||
.for_each(|validator| self.record_validator_exit(validator.exit_epoch));
|
||||
.try_for_each(|validator| self.record_validator_exit(validator.exit_epoch))
|
||||
}
|
||||
|
||||
/// Check that the cache is initialized and return an error if it isn't.
|
||||
pub fn check_initialized(&self) -> Result<(), BeaconStateError> {
|
||||
if self.initialized {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(BeaconStateError::ExitCacheUninitialized)
|
||||
}
|
||||
}
|
||||
|
||||
/// Record the exit of a single validator in the cache.
|
||||
///
|
||||
/// Must only be called once per exiting validator.
|
||||
pub fn record_validator_exit(&mut self, exit_epoch: Epoch) {
|
||||
*self.0.entry(exit_epoch).or_insert(0) += 1;
|
||||
pub fn record_validator_exit(&mut self, exit_epoch: Epoch) -> Result<(), BeaconStateError> {
|
||||
self.check_initialized()?;
|
||||
*self.exits_per_epoch.entry(exit_epoch).or_insert(0) += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the greatest epoch for which validator exits are known.
|
||||
pub fn max_epoch(&self) -> Option<Epoch> {
|
||||
// This could probably be made even faster by caching the maximum.
|
||||
self.0.keys().max().cloned()
|
||||
pub fn max_epoch(&self) -> Result<Option<Epoch>, BeaconStateError> {
|
||||
self.check_initialized()?;
|
||||
Ok(self.exits_per_epoch.keys().max().cloned())
|
||||
}
|
||||
|
||||
/// Get the number of validators exiting/exited at a given epoch, or zero if not known.
|
||||
pub fn get_churn_at(&self, epoch: Epoch) -> u64 {
|
||||
self.0.get(&epoch).cloned().unwrap_or(0)
|
||||
pub fn get_churn_at(&self, epoch: Epoch) -> Result<u64, BeaconStateError> {
|
||||
self.check_initialized()?;
|
||||
Ok(self.exits_per_epoch.get(&epoch).cloned().unwrap_or(0))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKeyBytes};
|
||||
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use test_random_derive::TestRandom;
|
||||
|
||||
@@ -83,7 +83,10 @@ macro_rules! bytes_struct {
|
||||
#[doc = $small_name]
|
||||
#[doc = " (e.g., from the deposit contract)."]
|
||||
#[derive(Clone)]
|
||||
pub struct $name([u8; $byte_size]);
|
||||
pub struct $name {
|
||||
bytes: [u8; $byte_size],
|
||||
decompressed: Option<$type>
|
||||
}
|
||||
};
|
||||
($name: ident, $type: ty, $byte_size: expr, $small_name: expr) => {
|
||||
bytes_struct!($name, $type, $byte_size, $small_name, stringify!($type),
|
||||
@@ -91,15 +94,21 @@ macro_rules! bytes_struct {
|
||||
|
||||
impl $name {
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
Ok(Self(Self::get_bytes(bytes)?))
|
||||
Ok(Self {
|
||||
bytes: Self::get_bytes(bytes)?,
|
||||
decompressed: None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn empty() -> Self {
|
||||
Self([0; $byte_size])
|
||||
Self {
|
||||
bytes: [0; $byte_size],
|
||||
decompressed: None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_vec()
|
||||
self.bytes.to_vec()
|
||||
}
|
||||
|
||||
fn get_bytes(bytes: &[u8]) -> Result<[u8; $byte_size], ssz::DecodeError> {
|
||||
@@ -114,23 +123,32 @@ macro_rules! bytes_struct {
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decompress(&mut self) -> Result<(), ssz::DecodeError> {
|
||||
self.decompressed = Some(<&Self as std::convert::TryInto<$type>>::try_into(self)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decompressed(&self) -> &Option<$type> {
|
||||
&self.decompressed
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for $name {
|
||||
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
self.0[..].fmt(formatter)
|
||||
self.bytes[..].fmt(formatter)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for $name {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
&self.0[..] == &other.0[..]
|
||||
&self.bytes[..] == &other.bytes[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl std::hash::Hash for $name {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.0.hash(state)
|
||||
self.bytes.hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,7 +158,7 @@ macro_rules! bytes_struct {
|
||||
type Error = ssz::DecodeError;
|
||||
|
||||
fn try_into(self) -> Result<$type, Self::Error> {
|
||||
<$type>::from_bytes(&self.0[..])
|
||||
<$type>::from_bytes(&self.bytes[..])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ impl fmt::Display for PublicKey {
|
||||
|
||||
impl fmt::Debug for PublicKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "0x{}", self.as_hex_string())
|
||||
write!(f, "{}", self.as_hex_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ pub use crate::multi_cache::MultiTreeHashCache;
|
||||
use ethereum_types::H256 as Hash256;
|
||||
use tree_hash::TreeHash;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum Error {
|
||||
/// Attempting to provide more than 2^depth leaves to a Merkle tree is disallowed.
|
||||
TooManyLeaves,
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
- "enr:-Iu4QPONEndy6aWOJLWBaCLS1KRg7YPeK0qptnxJzuBW8OcFP9tLgA_ewmAvHBzn9zPG6XIgdH83Mq_5cyLF5yWRYmYBgmlkgnY0gmlwhDaZ6cGJc2VjcDI1NmsxoQK-9tWOso2Kco7L5L-zKoj-MwPfeBbEP12bxr9bqzwZV4N0Y3CCIyiDdWRwgiMo"
|
||||
- "enr:-Iu4QGVXt2bKzkITBsPKqFOhxPMmZhMJvEzPdk_zhfvoWHxBX4oGrtiup1ReLVJijfEazL8Iv-0t7ZQnZy9NvqI4F0YBgmlkgnY0gmlwhDQrTeaJc2VjcDI1NmsxoQOb5IvXo9O253FD1AYoPwQpNM79-mLg8_HV1NevjZnTt4N0Y3CCIyiDdWRwgiMo"
|
||||
- -Iu4QMK9kcJ246666k9FlE_8IXy8NGP_zO-M8hN4di66eZNjBhoipKSoZaJnHZERnb-0aZ4U4UMPT-We7yx1sfMdbeEDgmlkgnY0gmlwhDbOLfeJc2VjcDI1NmsxoQLVqNEoCVTC74VmUx25USyFe7lL0TgpXHaCX9CDy9H6boN0Y3CCIyiDdWRwgiMo
|
||||
- -Iu4QB2V3Y21rbjnId2ZqpssdcQlKB49prr-ggq2smRJLg0xc5_7ar6GfuQkbnlj1LBHKY3l__Lkk8aIM29VgpLCDBgBgmlkgnY0gmlwhCOhiGqJc2VjcDI1NmsxoQMrmBYg_yR_ZKZKoLiChvlpNqdwXwodXmgw_TRow7RVwYN0Y3CCIyiDdWRwgiMo
|
||||
- -Iu4QCCuoEoHoOyYO728lOOiMqVZsNs_jd-uhj6_kiuBUySwYtHQLVkycNu8o-4oCzr-ql1u_n8g6tCw3ZXAEf0U008BgmlkgnY0gmlwhA01ZgSJc2VjcDI1NmsxoQPk2OMW7stSjbdcMgrKEdFOLsRkIuxgBFryA3tIJM0YxYN0Y3CCIyiDdWRwgiMo
|
||||
- -Iu4QF5vgIx1BDlqFKAiAeejWPphhc_OeHDzM5feKhibcyWHLlDlov8E8L7MDJ8dQvCyaCynJGf0eWUfo5pzLhM_e8QBgmlkgnY0gmlwhDRCMUyJc2VjcDI1NmsxoQJZ8jY1HYauxirnJkVI32FoN7_7KrE05asCkZb7nj_b-YN0Y3CCIyiDdWRwgiMo
|
||||
|
||||
@@ -18,6 +18,7 @@ BLS_WITHDRAWAL_PREFIX: 0x00
|
||||
SECONDS_PER_SLOT: 12
|
||||
MIN_ATTESTATION_INCLUSION_DELAY: 1
|
||||
MIN_SEED_LOOKAHEAD: 1
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
||||
PERSISTENT_COMMITTEE_PERIOD: 2048
|
||||
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
|
||||
|
||||
@@ -1 +1 @@
|
||||
1773705
|
||||
1857277
|
||||
@@ -1 +1 @@
|
||||
0x13e4d66c7215d7b63fec7b52fc65e6655093d906
|
||||
0x74a03685a1cbc279efe4ea88b5a86d6cb0c6cedb
|
||||
Binary file not shown.
@@ -3,7 +3,7 @@ use super::*;
|
||||
pub mod impls;
|
||||
|
||||
/// Returned when SSZ decoding fails.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum DecodeError {
|
||||
/// The bytes supplied were too short to be decoded into the specified type.
|
||||
InvalidByteLength { len: usize, expected: usize },
|
||||
|
||||
@@ -53,7 +53,7 @@ pub mod length {
|
||||
}
|
||||
|
||||
/// Returned when an item encounters an error.
|
||||
#[derive(PartialEq, Debug)]
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub enum Error {
|
||||
OutOfBounds {
|
||||
i: usize,
|
||||
|
||||
Reference in New Issue
Block a user