mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-22 22:34:45 +00:00
* some blob reprocessing work
* remove ForceBlockLookup
* reorder enum match arms in sync manager
* a lot more reprocessing work
* impl logic for triggerng blob lookups along with block lookups
* deal with rpc blobs in groups per block in the da checker. don't cache missing blob ids in the da checker.
* make single block lookup generic
* more work
* add delayed processing logic and combine some requests
* start fixing some compile errors
* fix compilation in main block lookup mod
* much work
* get things compiling
* parent blob lookups
* fix compile
* revert red/stevie changes
* fix up sync manager delay message logic
* add peer usefulness enum
* should remove lookup refactor
* consolidate retry error handling
* improve peer scoring during certain failures in parent lookups
* improve retry code
* drop parent lookup if either req has a peer disconnect during download
* refactor single block processed method
* processing peer refactor
* smol bugfix
* fix some todos
* fix lints
* fix lints
* fix compile in lookup tests
* fix lints
* fix lints
* fix existing block lookup tests
* renamings
* fix after merge
* cargo fmt
* compilation fix in beacon chain tests
* fix
* refactor lookup tests to work with multiple forks and response types
* make tests into macros
* wrap availability check error
* fix compile after merge
* add random blobs
* start fixing up lookup verify error handling
* some bug fixes and the start of deneb only tests
* make tests work for all forks
* track information about peer source
* error refactoring
* improve peer scoring
* fix test compilation
* make sure blobs are sent for processing after stream termination, delete copied tests
* add some tests and fix a bug
* smol bugfixes and moar tests
* add tests and fix some things
* compile after merge
* lots of refactoring
* retry on invalid block/blob
* merge unknown parent messages before current slot lookup
* get tests compiling
* penalize blob peer on invalid blobs
* Check disk on in-memory cache miss
* Update beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs
* Update beacon_node/network/src/sync/network_context.rs
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
* fix bug in matching blocks and blobs in range sync
* pr feedback
* fix conflicts
* upgrade logs from warn to crit when we receive incorrect response in range
* synced_and_connected_within_tolerance -> should_search_for_block
* remove todo
* add data gas used and update excess data gas to u64
* Fix Broken Overflow Tests
* payload verification with commitments
* fix merge conflicts
* restore payload file
* Restore payload file
* remove todo
* add max blob commitments per block
* c-kzg lib update
* Fix ef tests
* Abstract over minimal/mainnet spec in kzg crate
* Start integrating new KZG
* checkpoint sync without alignment
* checkpoint sync without alignment
* add import
* add import
* query for checkpoint state by slot rather than state root (teku doesn't serve by state root)
* query for checkpoint state by slot rather than state root (teku doesn't serve by state root)
* loosen check
* get state first and query by most recent block root
* Revert "loosen check"
This reverts commit 069d13dd63.
* get state first and query by most recent block root
* merge max blobs change
* simplify delay logic
* rename unknown parent sync message variants
* rename parameter, block_slot -> slot
* add some docs to the lookup module
* use interval instead of sleep
* drop request if blocks and blobs requests both return `None` for `Id`
* clean up `find_single_lookup` logic
* add lookup source enum
* clean up `find_single_lookup` logic
* add docs to find_single_lookup_request
* move LookupSource our of param where unnecessary
* remove unnecessary todo
* query for block by `state.latest_block_header.slot`
* fix lint
* fix merge transition ef tests
* fix test
* fix test
* fix observed blob sidecars test
* Add some metrics (#33)
* fix protocol limits for blobs by root
* Update Engine API for 1:1 Structure Method
* make beacon chain tests to fix devnet 6 changes
* get ckzg working and fix some tests
* fix remaining tests
* fix lints
* Fix KZG linking issues
* remove unused dep
* lockfile
* test fixes
* remove dbgs
* remove unwrap
* cleanup tx generator
* small fixes
* fixing fixes
* more self reivew
* more self review
* refactor genesis header initialization
* refactor mock el instantiations
* fix compile
* fix network test, make sure they run for each fork
* pr feedback
* fix last test (hopefully)
---------
Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
Co-authored-by: Mark Mackey <mark@sigmaprime.io>
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
161 lines
5.1 KiB
Rust
161 lines
5.1 KiB
Rust
use c_kzg::{BYTES_PER_G1_POINT, BYTES_PER_G2_POINT, FIELD_ELEMENTS_PER_BLOB};
|
|
use serde::{
|
|
de::{self, Deserializer, Visitor},
|
|
Deserialize, Serialize,
|
|
};
|
|
|
|
/// Wrapper over a BLS G1 point's byte representation.
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
struct G1Point([u8; BYTES_PER_G1_POINT]);
|
|
|
|
/// Wrapper over a BLS G2 point's byte representation.
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
struct G2Point([u8; BYTES_PER_G2_POINT]);
|
|
|
|
/// Contains the trusted setup parameters that are required to instantiate a
|
|
/// `c_kzg::KzgSettings` object.
|
|
///
|
|
/// The serialize/deserialize implementations are written according to
|
|
/// the format specified in the the ethereum consensus specs trusted setup files.
|
|
///
|
|
/// See https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/testing_trusted_setups.json
|
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
pub struct TrustedSetup {
|
|
#[serde(rename = "setup_G1_lagrange")]
|
|
#[serde(deserialize_with = "deserialize_g1_points")]
|
|
g1_points: Vec<G1Point>,
|
|
#[serde(rename = "setup_G2")]
|
|
g2_points: Vec<G2Point>,
|
|
}
|
|
|
|
impl TrustedSetup {
|
|
pub fn g1_points(&self) -> Vec<[u8; BYTES_PER_G1_POINT]> {
|
|
self.g1_points.iter().map(|p| p.0).collect()
|
|
}
|
|
|
|
pub fn g2_points(&self) -> Vec<[u8; BYTES_PER_G2_POINT]> {
|
|
self.g2_points.iter().map(|p| p.0).collect()
|
|
}
|
|
|
|
pub fn g1_len(&self) -> usize {
|
|
self.g1_points.len()
|
|
}
|
|
}
|
|
|
|
impl Serialize for G1Point {
|
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
where
|
|
S: serde::Serializer,
|
|
{
|
|
let point = hex::encode(self.0);
|
|
serializer.serialize_str(&point)
|
|
}
|
|
}
|
|
|
|
impl Serialize for G2Point {
|
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
where
|
|
S: serde::Serializer,
|
|
{
|
|
let point = hex::encode(self.0);
|
|
serializer.serialize_str(&point)
|
|
}
|
|
}
|
|
|
|
impl<'de> Deserialize<'de> for G1Point {
|
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
where
|
|
D: Deserializer<'de>,
|
|
{
|
|
struct G1PointVisitor;
|
|
|
|
impl<'de> Visitor<'de> for G1PointVisitor {
|
|
type Value = G1Point;
|
|
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
formatter.write_str("A 48 byte hex encoded string")
|
|
}
|
|
|
|
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
|
where
|
|
E: de::Error,
|
|
{
|
|
let point = hex::decode(strip_prefix(v))
|
|
.map_err(|e| de::Error::custom(format!("Failed to decode G1 point: {}", e)))?;
|
|
if point.len() != BYTES_PER_G1_POINT {
|
|
return Err(de::Error::custom(format!(
|
|
"G1 point has invalid length. Expected {} got {}",
|
|
BYTES_PER_G1_POINT,
|
|
point.len()
|
|
)));
|
|
}
|
|
let mut res = [0; BYTES_PER_G1_POINT];
|
|
res.copy_from_slice(&point);
|
|
Ok(G1Point(res))
|
|
}
|
|
}
|
|
|
|
deserializer.deserialize_str(G1PointVisitor)
|
|
}
|
|
}
|
|
|
|
impl<'de> Deserialize<'de> for G2Point {
|
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
where
|
|
D: Deserializer<'de>,
|
|
{
|
|
struct G2PointVisitor;
|
|
|
|
impl<'de> Visitor<'de> for G2PointVisitor {
|
|
type Value = G2Point;
|
|
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
formatter.write_str("A 96 byte hex encoded string")
|
|
}
|
|
|
|
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
|
where
|
|
E: de::Error,
|
|
{
|
|
let point = hex::decode(strip_prefix(v))
|
|
.map_err(|e| de::Error::custom(format!("Failed to decode G2 point: {}", e)))?;
|
|
if point.len() != BYTES_PER_G2_POINT {
|
|
return Err(de::Error::custom(format!(
|
|
"G2 point has invalid length. Expected {} got {}",
|
|
BYTES_PER_G2_POINT,
|
|
point.len()
|
|
)));
|
|
}
|
|
let mut res = [0; BYTES_PER_G2_POINT];
|
|
res.copy_from_slice(&point);
|
|
Ok(G2Point(res))
|
|
}
|
|
}
|
|
|
|
deserializer.deserialize_str(G2PointVisitor)
|
|
}
|
|
}
|
|
|
|
fn deserialize_g1_points<'de, D>(deserializer: D) -> Result<Vec<G1Point>, D::Error>
|
|
where
|
|
D: Deserializer<'de>,
|
|
{
|
|
let mut decoded: Vec<G1Point> = serde::de::Deserialize::deserialize(deserializer)?;
|
|
// FIELD_ELEMENTS_PER_BLOB is a compile time parameter that
|
|
// depends on whether lighthouse is compiled with minimal or mainnet features.
|
|
// Minimal and mainnet trusted setup parameters differ only by the
|
|
// number of G1 points they contain.
|
|
//
|
|
// Hence, we truncate the number of G1 points after deserialisation
|
|
// to ensure that we have the right number of g1 points in the
|
|
// trusted setup.
|
|
decoded.truncate(FIELD_ELEMENTS_PER_BLOB);
|
|
Ok(decoded)
|
|
}
|
|
|
|
fn strip_prefix(s: &str) -> &str {
|
|
if let Some(stripped) = s.strip_prefix("0x") {
|
|
stripped
|
|
} else {
|
|
s
|
|
}
|
|
}
|