diff --git a/.ai/CODE_REVIEW.md b/.ai/CODE_REVIEW.md new file mode 100644 index 0000000000..e4da3b22d5 --- /dev/null +++ b/.ai/CODE_REVIEW.md @@ -0,0 +1,277 @@ +# Lighthouse Code Review Guidelines + +Code review guidelines based on patterns from Lighthouse maintainers. + +## Core Principles + +- **Correctness** over clever code +- **Clarity** through good documentation and naming +- **Safety** through proper error handling and panic avoidance +- **Maintainability** for long-term health + +## Critical: Consensus Crate (`consensus/` excluding `types/`) + +**Extra scrutiny required** - bugs here cause consensus failures. + +### Requirements + +1. **Safe Math Only** + ```rust + // NEVER + let result = a + b; + + // ALWAYS + let result = a.saturating_add(b); + // or use safe_arith crate + let result = a.safe_add(b)?; + ``` + +2. **Zero Panics** + - No `.unwrap()`, `.expect()`, array indexing `[i]` + - Return `Result` or `Option` instead + +3. **Deterministic Behavior** + - Identical results across all platforms + - No undefined behavior + +## Panic Avoidance (All Code) + +```rust +// NEVER at runtime +let value = option.unwrap(); +let item = array[1]; + +// ALWAYS +let value = option.ok_or(Error::Missing)?; +let item = array.get(1)?; + +// Only acceptable during startup for CLI/config validation +let flag = matches.get_one::("flag") + .expect("Required due to clap validation"); +``` + +## Code Clarity + +### Variable Naming +```rust +// BAD - ambiguous +let bb = ...; +let bl = ...; + +// GOOD - clear +let beacon_block = ...; +let blob = ...; +``` + +### Comments +- Explain the "why" not just the "what" +- All `TODO` comments must link to a GitHub issue +- Remove dead/commented-out code + +## Error Handling + +### Don't Silently Swallow Errors +```rust +// BAD +self.store.get_info().unwrap_or(None) + +// GOOD +self.store.get_info().unwrap_or_else(|e| { + error!(self.log, "Failed to read info"; "error" => ?e); + None +}) +``` + +### Check Return Values +Ask: "What happens if this returns `Ok(Failed)`?" Don't ignore results that might indicate failure. + +## Performance & Concurrency + +### Lock Safety +- Document lock ordering requirements +- Keep lock scopes narrow +- Seek detailed review for lock-related changes +- Use `try_read` when falling back to an alternative is acceptable +- Use blocking `read` when alternative is more expensive (e.g., state reconstruction) + +### Async Patterns +```rust +// NEVER block in async context +async fn handler() { + expensive_computation(); // blocks runtime +} + +// ALWAYS spawn blocking +async fn handler() { + tokio::task::spawn_blocking(|| expensive_computation()).await?; +} +``` + +### Rayon +- Use scoped rayon pools from beacon processor +- Avoid global thread pool (causes CPU oversubscription) + +## Review Process + +### Focus on Actionable Issues + +**Limit to 3-5 key comments.** Prioritize: +1. Correctness issues - bugs, race conditions, panics +2. Missing test coverage - especially edge cases +3. Complex logic needing documentation +4. API design concerns + +**Don't comment on:** +- Minor style issues +- Things caught by CI (formatting, linting) +- Nice-to-haves that aren't important + +### Keep Comments Natural and Minimal + +**Tone**: Natural and conversational, not robotic. + +**Good review comment:** +``` +Missing test coverage for the None blobs path. The existing test at +`store_tests.rs:2874` still provides blobs. Should add a test passing +None to verify backfill handles this correctly. +``` + +**Good follow-up after author addresses comments:** +``` +LGTM, thanks! +``` +or +``` +Thanks for the updates, looks good! +``` + +**Avoid:** +- Checklists or structured formatting (✅ Item 1 fixed...) +- Repeating what was fixed (makes it obvious it's AI-generated) +- Headers, subsections, "Summary" sections +- Verbose multi-paragraph explanations + +### Use Natural Language + +``` +BAD (prescriptive): +"This violates coding standards which strictly prohibit runtime panics." + +GOOD (conversational): +"Should we avoid `.expect()` here? This gets called in hot paths and +we typically try to avoid runtime panics outside of startup." +``` + +### Verify Before Commenting + +- If CI passes, trust it - types/imports must exist +- Check the full diff, not just visible parts +- Ask for verification rather than asserting things are missing + +## Common Review Patterns + +### Fork-Specific Changes +- Verify production fork code path unchanged +- Check SSZ compatibility (field order) +- Verify rollback/error paths handle edge cases + +### API Design +- Constructor signatures should be consistent +- Avoid `Option` parameters when value is always required + +### Concurrency +- Lock ordering documented? +- Potential deadlocks? +- Race conditions? + +### Error Handling +- Errors logged? +- Edge cases handled? +- Context provided with errors? + +## Deep Review Techniques + +### Verify Against Specifications +- Read the actual spec in `./consensus-specs/` +- Compare formulas exactly +- Check constant values match spec definitions + +### Trace Data Flow End-to-End +For new config fields: +1. Config file - Does YAML contain the field? +2. Config struct - Is it parsed with serde attributes? +3. apply_to_chain_spec - Is it actually applied? +4. Runtime usage - Used correctly everywhere? + +### Check Error Handling Fallbacks +Examine every `.unwrap_or()`, `.unwrap_or_else()`: +- If the fallback triggers, does code behave correctly? +- Does it silently degrade or fail loudly? + +### Look for Incomplete Migrations +When a PR changes a pattern across the codebase: +- Search for old pattern - all occurrences updated? +- Check test files - often lag behind implementation + +## Architecture & Design + +### Avoid Dependency Bloat +- Question whether imports add unnecessary dependencies +- Consider feature flags for optional functionality +- Large imports when only primitives are needed may warrant a `core` or `primitives` feature + +### Schema Migrations +- Database schema changes require migrations +- Don't forget to add migration code when changing stored types +- Review pattern: "Needs a schema migration" + +### Backwards Compatibility +- Consider existing users when changing behavior +- Document breaking changes clearly +- Prefer additive changes when possible + +## Anti-Patterns to Avoid + +### Over-Engineering +- Don't add abstractions until needed +- Keep solutions simple and focused +- "Three similar lines of code is better than a premature abstraction" + +### Unnecessary Complexity +- Avoid feature flags for simple changes +- Don't add fallbacks for scenarios that can't happen +- Trust internal code and framework guarantees + +### Premature Optimization +- Optimize hot paths based on profiling, not assumptions +- Document performance considerations but don't over-optimize + +### Hiding Important Information +- Don't use generic variable names when specific ones are clearer +- Don't skip logging just to keep code shorter +- Don't omit error context + +## Design Principles + +### Simplicity First +Question every layer of abstraction: +- Is this `Arc` needed, or is the inner type already `Clone`? +- Is this `Mutex` needed, or can ownership be restructured? +- Is this wrapper type adding value or just indirection? + +If you can't articulate why a layer of abstraction exists, it probably shouldn't. + +### High Cohesion +Group related state and behavior together. If two fields are always set together, used together, and invalid without each other, they belong in a struct. + +## Before Approval Checklist + +- [ ] No panics: No `.unwrap()`, `.expect()`, unchecked array indexing +- [ ] Consensus safe: If touching consensus crate, all arithmetic is safe +- [ ] Errors logged: Not silently swallowed +- [ ] Clear naming: Variable names are unambiguous +- [ ] TODOs linked: All TODOs have GitHub issue links +- [ ] Tests present: Non-trivial changes have tests +- [ ] Lock safety: Lock ordering is safe and documented +- [ ] No blocking: Async code doesn't block runtime diff --git a/.ai/DEVELOPMENT.md b/.ai/DEVELOPMENT.md new file mode 100644 index 0000000000..1204f21ead --- /dev/null +++ b/.ai/DEVELOPMENT.md @@ -0,0 +1,200 @@ +# Lighthouse Development Guide + +Development patterns, commands, and architecture for AI assistants and contributors. + +## Development Commands + +**Important**: Always branch from `unstable` and target `unstable` when creating pull requests. + +### Building + +- `make install` - Build and install Lighthouse in release mode +- `make install-lcli` - Build and install `lcli` utility +- `cargo build --release` - Standard release build +- `cargo build --bin lighthouse --features "gnosis,slasher-lmdb"` - Build with specific features + +### Testing + +- `make test` - Full test suite in release mode +- `make test-release` - Run tests using nextest (faster parallel runner) +- `cargo nextest run -p ` - Run tests for specific package (preferred for iteration) +- `cargo nextest run -p ` - Run individual test +- `FORK_NAME=electra cargo nextest run -p beacon_chain` - Run tests for specific fork +- `make test-ef` - Ethereum Foundation test vectors + +**Fork-specific testing**: `beacon_chain` and `http_api` tests support fork-specific testing via `FORK_NAME` env var when `beacon_chain/fork_from_env` feature is enabled. + +**Note**: Full test suite takes ~20 minutes. Prefer targeted tests when iterating. + +### Linting + +- `make lint` - Run Clippy with project rules +- `make lint-full` - Comprehensive linting including tests +- `cargo fmt --all && make lint-fix` - Format and fix linting issues +- `cargo sort` - Sort dependencies (enforced on CI) + +## Architecture Overview + +Lighthouse is a modular Ethereum consensus client with two main components: + +### Beacon Node (`beacon_node/`) + +- Main consensus client syncing with Ethereum network +- Beacon chain state transition logic (`beacon_node/beacon_chain/`) +- Networking, storage, P2P communication +- HTTP API for validator clients +- Entry point: `beacon_node/src/lib.rs` + +### Validator Client (`validator_client/`) + +- Manages validator keystores and duties +- Block proposals, attestations, sync committee duties +- Slashing protection and doppelganger detection +- Entry point: `validator_client/src/lib.rs` + +### Key Subsystems + +| Subsystem | Location | Purpose | +|-----------|----------|---------| +| Consensus Types | `consensus/types/` | Core data structures, SSZ encoding | +| Storage | `beacon_node/store/` | Hot/cold database (LevelDB, RocksDB, REDB backends) | +| Networking | `beacon_node/lighthouse_network/` | Libp2p, gossipsub, discovery | +| Fork Choice | `consensus/fork_choice/` | Proto-array fork choice | +| Execution Layer | `beacon_node/execution_layer/` | EL client integration | +| Slasher | `slasher/` | Optional slashing detection | + +### Utilities + +- `account_manager/` - Validator account management +- `lcli/` - Command-line debugging utilities +- `database_manager/` - Database maintenance tools + +## Code Quality Standards + +### Panic Avoidance (Critical) + +**Panics should be avoided at all costs.** + +```rust +// NEVER at runtime +let value = some_result.unwrap(); +let item = array[1]; + +// ALWAYS prefer +let value = some_result?; +let item = array.get(1)?; + +// Only acceptable during startup +let config = matches.get_one::("flag") + .expect("Required due to clap validation"); +``` + +### Consensus Crate Safety (`consensus/` excluding `types/`) + +Extra scrutiny required - bugs here cause consensus failures. + +```rust +// NEVER standard arithmetic +let result = a + b; + +// ALWAYS safe math +let result = a.saturating_add(b); +// or +use safe_arith::SafeArith; +let result = a.safe_add(b)?; +``` + +Requirements: +- Use `saturating_*` or `checked_*` operations +- Zero panics - no `.unwrap()`, `.expect()`, or `array[i]` +- Deterministic behavior across all platforms + +### Error Handling + +- Return `Result` or `Option` instead of panicking +- Log errors, don't silently swallow them +- Provide context with errors + +### Async Patterns + +```rust +// NEVER block in async context +async fn handler() { + expensive_computation(); // blocks runtime +} + +// ALWAYS spawn blocking +async fn handler() { + tokio::task::spawn_blocking(|| expensive_computation()).await?; +} +``` + +### Concurrency + +- **Lock ordering**: Document lock ordering to avoid deadlocks. See [`canonical_head.rs:9-32`](beacon_node/beacon_chain/src/canonical_head.rs) for excellent example documenting three locks and safe acquisition order. +- Keep lock scopes narrow +- Seek detailed review for lock-related changes + +### Rayon Thread Pools + +Avoid using the rayon global thread pool - it causes CPU oversubscription when beacon processor has fully allocated all CPUs to workers. Use scoped rayon pools started by beacon processor for computationally intensive tasks. + +### Tracing Spans + +- Avoid spans on simple getter methods (performance overhead) +- Be cautious of span explosion with recursive functions +- Use spans per meaningful computation step, not every function +- **Never** use `span.enter()` or `span.entered()` in async tasks + +### Documentation + +- All `TODO` comments must link to a GitHub issue +- Prefer line comments (`//`) over block comments +- Keep comments concise, explain "why" not "what" + +## Logging Levels + +| Level | Use Case | +|-------|----------| +| `crit` | Lighthouse may not function - needs immediate attention | +| `error` | Moderate impact - expect user reports | +| `warn` | Unexpected but recoverable | +| `info` | High-level status - not excessive | +| `debug` | Developer events, expected errors | + +## Testing Patterns + +- **Unit tests**: Single component edge cases +- **Integration tests**: Use [`BeaconChainHarness`](beacon_node/beacon_chain/src/test_utils.rs) for end-to-end workflows +- **Sync components**: Use [`TestRig`](beacon_node/network/src/sync/tests/mod.rs) pattern with event-based testing +- **Mocking**: `mockall` for unit tests, `mockito` for HTTP APIs +- **Adapter pattern**: For testing `BeaconChain` dependent components, create adapter structs. See [`fetch_blobs/tests.rs`](beacon_node/beacon_chain/src/fetch_blobs/tests.rs) +- **Local testnet**: See `scripts/local_testnet/README.md` + +## Build Notes + +- Full builds take 5+ minutes - use large timeouts (300s+) +- Use `cargo check` for faster iteration +- MSRV documented in `Cargo.toml` + +### Cross-compilation + +- `make build-x86_64` - Cross-compile for x86_64 Linux +- `make build-aarch64` - Cross-compile for ARM64 Linux +- `make build-riscv64` - Cross-compile for RISC-V 64-bit Linux + +## Parallel Development + +For working on multiple branches simultaneously, use git worktrees: + +```bash +git worktree add -b my-feature ../lighthouse-my-feature unstable +``` + +This creates a separate working directory without needing multiple clones. To save disk space across worktrees, configure a shared target directory: + +```bash +# In .cargo/config.toml at your workspace root +[build] +target-dir = "/path/to/shared-target" +``` diff --git a/.ai/ISSUES.md b/.ai/ISSUES.md new file mode 100644 index 0000000000..ce79198b4d --- /dev/null +++ b/.ai/ISSUES.md @@ -0,0 +1,130 @@ +# GitHub Issue & PR Guidelines + +Guidelines for creating well-structured GitHub issues and PRs for Lighthouse. + +## Issue Structure + +### Start with Description + +Always begin with `## Description`: + +```markdown +## Description + +We presently prune all knowledge of non-canonical blocks once they conflict with +finalization. The pruning is not always immediate, fork choice currently prunes +once the number of nodes reaches a threshold of 256. + +It would be nice to develop a simple system for handling messages relating to +blocks that are non-canonical. +``` + +**Guidelines:** +- First paragraph: problem and brief solution +- Provide context about current behavior +- Link to related issues, PRs, or specs +- Be technical and specific + +### Steps to Resolve (when applicable) + +```markdown +## Steps to resolve + +I see two ways to fix this: a strict approach, and a pragmatic one. + +The strict approach would only check once the slot is finalized. This would have +0 false positives, but would be slower to detect missed blocks. + +The pragmatic approach might be to only process `BeaconState`s from the canonical +chain. I don't have a strong preference between approaches. +``` + +**Guidelines:** +- Don't be overly prescriptive - present options +- Mention relevant constraints +- It's okay to say "I don't have a strong preference" + +### Optional Sections + +- `## Additional Info` - Edge cases, related issues +- `## Metrics` - Performance data, observations +- `## Version` - For bug reports + +## Code References + +**Use GitHub permalinks with commit hashes** so code renders properly: + +``` +https://github.com/sigp/lighthouse/blob/261322c3e3ee/beacon_node/beacon_processor/src/lib.rs#L809 +``` + +Get commit hash: `git rev-parse unstable` + +For line ranges: `#L809-L825` + +## Writing Style + +### Be Natural and Concise +- Direct and objective +- Precise technical terminology +- Avoid AI-sounding language + +### Be Honest About Uncertainty +- Don't guess - ask questions +- Use tentative language when appropriate ("might", "I think") +- Present multiple options without picking one + +### Think About Trade-offs +- Present multiple approaches +- Discuss pros and cons +- Consider backward compatibility +- Note performance implications + +## Labels + +**Type:** `bug`, `enhancement`, `optimization`, `code-quality`, `security`, `RFC` + +**Component:** `database`, `HTTP-API`, `fork-choice`, `beacon-processor`, etc. + +**Effort:** `good first issue`, `low-hanging-fruit`, `major-task` + +## Pull Request Guidelines + +```markdown +## Description + +[What does this PR do? Why is it needed? Be concise and technical.] + +Closes #[issue-number] + +## Additional Info + +[Breaking changes, performance impacts, migration steps, etc.] +``` + +### Commit Messages + +Format: +- First line: Brief summary (imperative mood) +- Blank line +- Additional details if needed + +``` +Add custody info API for data columns + +Implements `/lighthouse/custody/info` endpoint that returns custody group +count, custodied columns, and earliest available data column slot. +``` + +## Anti-Patterns + +- Vague descriptions without details +- No code references when describing code +- Premature solutions without understanding the problem +- Making claims without validating against codebase + +## Good Examples + +- https://github.com/sigp/lighthouse/issues/6120 +- https://github.com/sigp/lighthouse/issues/4388 +- https://github.com/sigp/lighthouse/issues/8216 diff --git a/.claude/commands/issue.md b/.claude/commands/issue.md new file mode 100644 index 0000000000..85ff46fe22 --- /dev/null +++ b/.claude/commands/issue.md @@ -0,0 +1,49 @@ +# GitHub Issue Creation Task + +You are creating a GitHub issue for the Lighthouse project. + +## Required Reading + +**Before creating an issue, read `.ai/ISSUES.md`** for issue and PR writing guidelines. + +## Structure + +1. **Description** (required) + - First paragraph: problem and brief solution + - Context about current behavior + - Links to related issues, PRs, or specs + - Technical and specific + +2. **Steps to Resolve** (when applicable) + - Present options and considerations + - Don't be overly prescriptive + - Mention relevant constraints + +3. **Code References** + - Use GitHub permalinks with commit hashes + - Get hash: `git rev-parse unstable` + +## Style + +- Natural, concise, direct +- Avoid AI-sounding language +- Be honest about uncertainty +- Present trade-offs + +## Labels to Suggest + +- **Type**: bug, enhancement, optimization, code-quality +- **Component**: database, HTTP-API, fork-choice, beacon-processor +- **Effort**: good first issue, low-hanging-fruit, major-task + +## Output + +Provide the complete issue text ready to paste into GitHub. + +## After Feedback + +If the developer refines your issue/PR text or suggests a different format: + +1. **Apply their feedback** to the current issue +2. **Offer to update docs** - Ask: "Should I update `.ai/ISSUES.md` to capture this preference?" +3. **Document patterns** the team prefers that aren't yet in the guidelines diff --git a/.claude/commands/release.md b/.claude/commands/release.md new file mode 100644 index 0000000000..1694e90cc5 --- /dev/null +++ b/.claude/commands/release.md @@ -0,0 +1,85 @@ +# Release Notes Generation Task + +You are generating release notes for a new Lighthouse version. + +## Input Required + +- **Version number** (e.g., v8.1.0) +- **Base branch** (typically `stable` for previous release) +- **Release branch** (e.g., `release-v8.1`) +- **Release name** (Rick and Morty character - check existing to avoid duplicates) + +## Step 1: Gather Changes + +```bash +# Get commits between branches +git log --oneline origin/..origin/ + +# Check existing release names +gh release list --repo sigp/lighthouse --limit 50 +``` + +## Step 2: Analyze PRs + +For each PR: +1. Extract PR numbers from commit messages +2. Check for `backwards-incompat` label: + ```bash + gh pr view --repo sigp/lighthouse --json labels --jq '[.labels[].name] | join(",")' + ``` +3. Get PR details for context + +## Step 3: Categorize + +Group into sections (skip empty): +- **Breaking Changes** - schema changes, CLI changes, API changes +- **Performance Improvements** - user-noticeable optimizations +- **Validator Client Improvements** - VC-specific changes +- **Other Notable Changes** - new features, metrics +- **CLI Changes** - new/changed flags (note if BN or VC) +- **Bug Fixes** - significant user-facing fixes only + +## Step 4: Write Release Notes + +```markdown +## + +## Summary + +Lighthouse v includes . + +This is a upgrade for . + +##
+ +- **** (#<PR>): <User-facing description> + +## Update Priority + +| User Class | Beacon Node | Validator Client | +|:------------------|:------------|:-----------------| +| Staking Users | Low/Medium/High | Low/Medium/High | +| Non-Staking Users | Low/Medium/High | --- | + +## All Changes + +- <commit title> (#<PR>) + +## Binaries + +[See pre-built binaries documentation.](https://lighthouse-book.sigmaprime.io/installation_binaries.html) +``` + +## Guidelines + +- State **user impact**, not implementation details +- Avoid jargon users won't understand +- For CLI flags, mention if BN or VC +- Check PR descriptions for context + +## Step 5: Generate Announcements + +Create drafts for: +- **Email** - Formal, include priority table +- **Discord** - Tag @everyone, shorter +- **Twitter** - Single tweet, 2-3 highlights diff --git a/.claude/commands/review.md b/.claude/commands/review.md new file mode 100644 index 0000000000..7867716c79 --- /dev/null +++ b/.claude/commands/review.md @@ -0,0 +1,57 @@ +# Code Review Task + +You are reviewing code for the Lighthouse project. + +## Required Reading + +**Before reviewing, read `.ai/CODE_REVIEW.md`** for Lighthouse-specific safety requirements and review etiquette. + +## Focus Areas + +1. **Consensus Crate Safety** (if applicable) + - Safe math operations (saturating_*, checked_*) + - Zero panics + - Deterministic behavior + +2. **General Code Safety** + - No `.unwrap()` or `.expect()` at runtime + - No array indexing without bounds checks + - Proper error handling + +3. **Code Clarity** + - Clear variable names (avoid ambiguous abbreviations) + - Well-documented complex logic + - TODOs linked to GitHub issues + +4. **Error Handling** + - Errors are logged, not silently swallowed + - Edge cases are handled + - Return values are checked + +5. **Concurrency & Performance** + - Lock ordering is safe + - No blocking in async context + - Proper use of rayon thread pools + +## Output + +- Keep to 3-5 actionable comments +- Use natural, conversational language +- Provide specific line references +- Ask questions rather than making demands + +## After Review Discussion + +If the developer corrects your feedback or you learn something new: + +1. **Acknowledge and learn** - Note what you got wrong +2. **Offer to update docs** - Ask: "Should I update `.ai/CODE_REVIEW.md` with this lesson?" +3. **Format the lesson:** + ```markdown + ### Lesson: [Title] + **Issue:** [What went wrong] + **Feedback:** [What developer said] + **Learning:** [What to do differently] + ``` + +This keeps the review guidelines improving over time. diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 46fa15da86..72ea9d41ae 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -319,6 +319,8 @@ jobs: bins: cargo-audit,cargo-deny - name: Check formatting with cargo fmt run: make cargo-fmt + - name: Check dependencies for unencrypted HTTP links + run: make insecure-deps - name: Lint code for quality and style with Clippy run: make lint-full - name: Certify Cargo.lock freshness diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000000..4ab3ec9333 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,10 @@ +# Lighthouse AI Assistant Guide + +See [`CLAUDE.md`](CLAUDE.md) for AI assistant guidance. + +This file exists for OpenAI Codex compatibility. Codex can read files, so refer to `CLAUDE.md` for the full documentation including: + +- Quick reference commands +- Critical rules (panics, safe math, async) +- Project structure +- Pointers to detailed guides in `.ai/` diff --git a/CLAUDE.md b/CLAUDE.md index 3e9ab169f3..441c8e4274 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,332 +1,151 @@ -# CLAUDE.md +# Lighthouse AI Assistant Guide -This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. +This file provides guidance for AI assistants (Claude Code, Codex, etc.) working with Lighthouse. -## Development Commands +## Quick Reference -**Important**: Always branch from `unstable` and target `unstable` when creating pull requests. +```bash +# Build +make install # Build and install Lighthouse +cargo build --release # Standard release build -### Building and Installation +# Test (prefer targeted tests when iterating) +cargo nextest run -p <package> # Test specific package +cargo nextest run -p <package> <test> # Run individual test +make test # Full test suite (~20 min) -- `make install` - Build and install the main Lighthouse binary in release mode -- `make install-lcli` - Build and install the `lcli` utility binary -- `cargo build --release` - Standard Rust release build -- `cargo build --bin lighthouse --features "gnosis,slasher-lmdb"` - Build with specific features - -### Testing - -- `make test` - Run the full test suite in release mode (excludes EF tests, beacon_chain, slasher, network, http_api) -- `make test-release` - Run tests using nextest (faster parallel test runner) -- `make test-beacon-chain` - Run beacon chain tests for all supported forks -- `make test-slasher` - Run slasher tests with all database backend combinations -- `make test-ef` - Download and run Ethereum Foundation test vectors -- `make test-full` - Complete test suite including linting, EF tests, and execution engine tests -- `cargo nextest run -p <package_name>` - Run tests for a specific package -- `cargo nextest run -p <package_name> <test_name>` - Run individual test (preferred during development iteration) -- `FORK_NAME=electra cargo nextest run -p beacon_chain` - Run tests for specific fork - -**Note**: Full test suite takes ~20 minutes. When iterating, prefer running individual tests. - -### Linting and Code Quality - -- `make lint` - Run Clippy linter with project-specific rules -- `make lint-full` - Run comprehensive linting including tests (recommended for thorough checking) -- `make cargo-fmt` - Check code formatting with rustfmt -- `make check-benches` - Typecheck benchmark code -- `make audit` - Run security audit on dependencies - -### Cross-compilation - -- `make build-x86_64` - Cross-compile for x86_64 Linux -- `make build-aarch64` - Cross-compile for ARM64 Linux -- `make build-riscv64` - Cross-compile for RISC-V 64-bit Linux - -## Architecture Overview - -Lighthouse is a modular Ethereum consensus client with two main components: - -### Core Components - -**Beacon Node** (`beacon_node/`) - -- Main consensus client that syncs with the Ethereum network -- Contains the beacon chain state transition logic (`beacon_node/beacon_chain/`) -- Handles networking, storage, and P2P communication -- Provides HTTP API for validator clients and external tools -- Entry point: `beacon_node/src/lib.rs` - -**Validator Client** (`validator_client/`) - -- Manages validator keystores and performs validator duties -- Connects to beacon nodes via HTTP API -- Handles block proposals, attestations, and sync committee duties -- Includes slashing protection and doppelganger detection -- Entry point: `validator_client/src/lib.rs` - -### Key Subsystems - -**Consensus Types** (`consensus/types/`) - -- Core Ethereum consensus data structures (BeaconState, BeaconBlock, etc.) -- Ethereum specification implementations for different networks (mainnet, gnosis) -- SSZ encoding/decoding and state transition primitives - -**Storage** (`beacon_node/store/`) - -- Hot/cold database architecture for efficient beacon chain storage -- Supports multiple backends (LevelDB, RocksDB, REDB) -- Handles state pruning and historical data management - -**Networking** (`beacon_node/lighthouse_network/`, `beacon_node/network/`) - -- Libp2p-based P2P networking stack -- Gossipsub for message propagation -- Discovery v5 for peer discovery -- Request/response protocols for sync - -**Fork Choice** (`consensus/fork_choice/`, `consensus/proto_array/`) - -- Implements Ethereum's fork choice algorithm (proto-array) -- Manages chain reorganizations and finality - -**Execution Layer Integration** (`beacon_node/execution_layer/`) - -- Interfaces with execution clients -- Retrieves payloads from local execution layer or external block builders -- Handles payload validation and builder integration - -**Slasher** (`slasher/`) - -- Optional slashing detection service -- Supports LMDB, MDBX, and REDB database backends -- Can be enabled with `--slasher` flag - -### Utilities - -**Account Manager** (`account_manager/`) - CLI tool for managing validator accounts and keystores -**LCLI** (`lcli/`) - Lighthouse command-line utilities for debugging and testing -**Database Manager** (`database_manager/`) - Database maintenance and migration tools - -### Build System Notes - -- Uses Cargo workspace with 90+ member crates -- Supports multiple Ethereum specifications via feature flags (`gnosis`, `spec-minimal`) -- Cross-compilation support for Linux x86_64, ARM64, and RISC-V -- Multiple build profiles: `release`, `maxperf`, `reproducible` -- Feature-based compilation for different database backends and optional components - -### Network Support - -- **Mainnet**: Default production network -- **Gnosis**: Alternative network (requires `gnosis` feature) -- **Testnets**: Holesky, Sepolia via built-in network configs -- **Custom networks**: Via `--testnet-dir` flag - -### Key Configuration - -- Default data directory: `~/.lighthouse/{network}` -- Beacon node data: `~/.lighthouse/{network}/beacon` -- Validator data: `~/.lighthouse/{network}/validators` -- Configuration primarily via CLI flags and YAML files - -## Common Review Standards - -### CI/Testing Requirements - -- All checks must pass before merge -- Test coverage expected for significant changes -- Flaky tests are actively addressed and fixed -- New features often require corresponding tests -- `beacon_chain` and `http_api` tests support fork-specific testing using `FORK_NAME` env var when `beacon_chain/fork_from_env` feature is enabled - -### Code Quality Standards - -- Clippy warnings must be fixed promptly (multiple PRs show this pattern) -- Code formatting with `cargo fmt` enforced -- Must run `cargo sort` when adding dependencies - dependency order is enforced on CI -- Performance considerations for hot paths - -### Documentation and Context - -- PRs require clear descriptions of what and why -- Breaking changes need migration documentation -- API changes require documentation updates -- When CLI is updated, run `make cli-local` to generate updated help text in lighthouse book -- Comments appreciated for complex logic - -### Security and Safety - -- Careful review of consensus-critical code paths -- Error handling patterns must be comprehensive -- Input validation for external data - -## Development Patterns and Best Practices - -### Panics and Error Handling - -- **Panics should be avoided at all costs** -- Always prefer returning a `Result` or `Option` over causing a panic (e.g., prefer `array.get(1)?` over `array[1]`) -- Avoid `expect` or `unwrap` at runtime - only acceptable during startup when validating CLI flags or configurations -- If you must make assumptions about panics, use `.expect("Helpful message")` instead of `.unwrap()` and provide detailed reasoning in nearby comments -- Use proper error handling with `Result` types and graceful error propagation - -### Rayon Usage - -- Avoid using the rayon global thread pool as it results in CPU oversubscription when the beacon processor has fully allocated all CPUs to workers -- Use scoped rayon pools started by beacon processor for computational intensive tasks - -### Locks - -- Take great care to avoid deadlocks when working with fork choice locks - seek detailed review ([reference](beacon_node/beacon_chain/src/canonical_head.rs:9)) -- Keep lock scopes as narrow as possible to avoid blocking fast-responding functions like the networking stack - -### Async Patterns - -- Avoid blocking computations in async tasks -- Spawn a blocking task instead for CPU-intensive work - -### Tracing - -- Design spans carefully and avoid overuse of spans just to add context data to events -- Avoid using spans on simple getter methods as it can result in performance overhead -- Be cautious of span explosion with recursive functions -- Use spans per meaningful step or computationally critical step -- Avoid using `span.enter()` or `span.entered()` in async tasks - -### Database - -- Maintain schema continuity on `unstable` branch -- Database migrations must be backward compatible - -### Consensus Crate - -- Use safe math methods like `saturating_xxx` or `checked_xxx` -- Critical that this crate behaves deterministically and MUST not have undefined behavior - -### Testing Patterns - -- **Use appropriate test types for the right scenarios**: - - **Unit tests** for single component edge cases and isolated logic - - **Integration tests** using [`BeaconChainHarness`](beacon_node/beacon_chain/src/test_utils.rs:668) for end-to-end workflows -- **`BeaconChainHarness` guidelines**: - - Excellent for integration testing but slower than unit tests - - Prefer unit tests instead for testing edge cases of single components - - Reserve for testing component interactions and full workflows -- **Mocking strategies**: - - Use `mockall` crate for unit test mocking - - Use `mockito` for HTTP API mocking (see [`validator_test_rig`](testing/validator_test_rig/src/mock_beacon_node.rs:20) for examples) -- **Event-based testing for sync components**: - - Use [`TestRig`](beacon_node/network/src/sync/tests/mod.rs) pattern for testing sync components - - Sync components interact with the network and beacon chain via events (their public API), making event-based testing more suitable than using internal functions and mutating internal states - - Enables testing of complex state transitions and timing-sensitive scenarios -- **Testing `BeaconChain` dependent components**: - - `BeaconChain` is difficult to create for TDD - - Create intermediate adapter structs to enable easy mocking - - See [`beacon_node/beacon_chain/src/fetch_blobs/tests.rs`](beacon_node/beacon_chain/src/fetch_blobs/tests.rs) for the adapter pattern -- **Local testnet for manual/full E2E testing**: - - Use Kurtosis-based local testnet setup for comprehensive testing - - See [`scripts/local_testnet/README.md`](scripts/local_testnet/README.md) for setup instructions - -### TODOs and Comments - -- All `TODO` statements must be accompanied by a GitHub issue link -- Prefer line (`//`) comments to block comments (`/* ... */`) -- Use doc comments (`///`) before attributes for public items -- Keep documentation concise and clear - avoid verbose explanations -- Provide examples in doc comments for public APIs when helpful - -## Logging Guidelines - -Use appropriate log levels for different scenarios: - -- **`crit`**: Critical issues with major impact to Lighthouse functionality - Lighthouse may not function correctly without resolving. Needs immediate attention. -- **`error`**: Error cases that may have moderate impact to Lighthouse functionality. Expect to receive reports from users for this level. -- **`warn`**: Unexpected code paths that don't have major impact - fully recoverable. Expect user reports if excessive warning logs occur. -- **`info`**: High-level logs indicating beacon node status and block import status. Should not be used excessively. -- **`debug`**: Events lower level than info useful for developers. Can also log errors expected during normal operation that users don't need to action. - -## Code Examples - -### Safe Math in Consensus Crate - -```rust -// ❌ Avoid - could panic -let result = a + b; - -// ✅ Preferred -let result = a.saturating_add(b); -// or -use safe_arith::SafeArith; - -let result = a.safe_add(b)?; +# Lint +make lint # Run Clippy +cargo fmt --all && make lint-fix # Format and fix ``` -### Panics and Error Handling +## Before You Start + +Read the relevant guide for your task: + +| Task | Read This First | +|------|-----------------| +| **Code review** | `.ai/CODE_REVIEW.md` | +| **Creating issues/PRs** | `.ai/ISSUES.md` | +| **Development patterns** | `.ai/DEVELOPMENT.md` | + +## Critical Rules (consensus failures or crashes) + +### 1. No Panics at Runtime ```rust -// ❌ Avoid - could panic at runtime -let value = some_result.unwrap(); +// NEVER +let value = option.unwrap(); let item = array[1]; -// ✅ Preferred - proper error handling -let value = some_result.map_err(|e| CustomError::SomeVariant(e))?; +// ALWAYS +let value = option?; let item = array.get(1)?; - -// ✅ Acceptable during startup for CLI/config validation -let config_value = matches.get_one::<String>("required-flag") - .expect("Required flag must be present due to clap validation"); - -// ✅ If you must make runtime assumptions, use expect with explanation -let item = array.get(1).expect("Array always has at least 2 elements due to validation in constructor"); -// Detailed reasoning should be provided in nearby comments ``` -### TODO Format +Only acceptable during startup for CLI/config validation. + +### 2. Consensus Crate: Safe Math Only + +In `consensus/` (excluding `types/`), use saturating or checked arithmetic: ```rust -pub fn my_function(&mut self, _something: &[u8]) -> Result<String, Error> { - // TODO: Implement proper validation here - // https://github.com/sigp/lighthouse/issues/1234 -} +// NEVER +let result = a + b; + +// ALWAYS +let result = a.saturating_add(b); ``` -### Async Task Spawning for Blocking Work +## Important Rules (bugs or performance issues) + +### 3. Never Block Async ```rust -// ❌ Avoid - blocking in async context -async fn some_handler() { - let result = expensive_computation(); // blocks async runtime -} +// NEVER +async fn handler() { expensive_computation(); } -// ✅ Preferred -async fn some_handler() { - let result = tokio::task::spawn_blocking(|| { - expensive_computation() - }).await?; +// ALWAYS +async fn handler() { + tokio::task::spawn_blocking(|| expensive_computation()).await?; } ``` -### Tracing Span Usage +### 4. Lock Ordering -```rust -// ❌ Avoid - span on simple getter -#[instrument] -fn get_head_block_root(&self) -> Hash256 { - self.head_block_root -} +Document lock ordering to avoid deadlocks. See [`canonical_head.rs:9-32`](beacon_node/beacon_chain/src/canonical_head.rs) for the pattern. -// ✅ Preferred - span on meaningful operations -#[instrument(skip(self))] -async fn process_block(&self, block: Block) -> Result<(), Error> { - // meaningful computation -} +### 5. Rayon Thread Pools + +Use scoped rayon pools from beacon processor, not global pool. Global pool causes CPU oversubscription when beacon processor has allocated all CPUs. + +## Good Practices + +### 6. TODOs Need Issues + +All `TODO` comments must link to a GitHub issue. + +### 7. Clear Variable Names + +Avoid ambiguous abbreviations (`bb`, `bl`). Use `beacon_block`, `blob`. + +## Branch & PR Guidelines + +- Branch from `unstable`, target `unstable` for PRs +- Run `cargo sort` when adding dependencies +- Run `make cli-local` when updating CLI flags + +## Project Structure + +``` +beacon_node/ # Consensus client + beacon_chain/ # State transition logic + store/ # Database (hot/cold) + network/ # P2P networking + execution_layer/ # EL integration +validator_client/ # Validator duties +consensus/ + types/ # Core data structures + fork_choice/ # Proto-array ``` -## Build and Development Notes +See `.ai/DEVELOPMENT.md` for detailed architecture. -- Full builds and tests take 5+ minutes - use large timeouts (300s+) for any `cargo build`, `cargo nextest`, or `make` commands -- Use `cargo check` for faster iteration during development and always run after code changes -- Prefer targeted package tests (`cargo nextest run -p <package>`) and individual tests over full test suite when debugging specific issues -- Use `cargo fmt --all && make lint-fix` to format code and fix linting issues once a task is complete -- Always understand the broader codebase patterns before making changes -- Minimum Supported Rust Version (MSRV) is documented in `lighthouse/Cargo.toml` - ensure Rust version meets or exceeds this requirement +## Maintaining These Docs + +**These AI docs should evolve based on real interactions.** + +### After Code Reviews + +If a developer corrects your review feedback or points out something you missed: +- Ask: "Should I update `.ai/CODE_REVIEW.md` with this lesson?" +- Add to the "Common Review Patterns" or create a new "Lessons Learned" entry +- Include: what went wrong, what the feedback was, what to do differently + +### After PR/Issue Creation + +If a developer refines your PR description or issue format: +- Ask: "Should I update `.ai/ISSUES.md` to capture this?" +- Document the preferred style or format + +### After Development Work + +If you learn something about the codebase architecture or patterns: +- Ask: "Should I update `.ai/DEVELOPMENT.md` with this?" +- Add to relevant section or create new patterns + +### Format for Lessons + +```markdown +### Lesson: [Brief Title] + +**Context:** [What task were you doing?] +**Issue:** [What went wrong or was corrected?] +**Learning:** [What to do differently next time] +``` + +### When NOT to Update + +- Minor preference differences (not worth documenting) +- One-off edge cases unlikely to recur +- Already covered by existing documentation diff --git a/Cargo.lock b/Cargo.lock index 913382fe66..69204ccaec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "8.0.1" +version = "8.1.0" dependencies = [ "account_utils", "bls", @@ -111,6 +111,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloca" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" +dependencies = [ + "cc", +] + [[package]] name = "allocator-api2" version = "0.2.21" @@ -1031,7 +1040,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 1.1.2", + "rustix", "slab", "windows-sys 0.61.2", ] @@ -1232,7 +1241,7 @@ dependencies = [ "genesis", "hex", "int_to_bytes", - "itertools 0.10.5", + "itertools 0.14.0", "kzg", "lighthouse_version", "logging", @@ -1276,7 +1285,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.0.1" +version = "8.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -1315,7 +1324,7 @@ dependencies = [ "clap", "eth2", "futures", - "itertools 0.10.5", + "itertools 0.14.0", "sensitive_url", "serde", "slot_clock", @@ -1334,7 +1343,7 @@ version = "0.1.0" dependencies = [ "fnv", "futures", - "itertools 0.10.5", + "itertools 0.14.0", "lighthouse_network", "logging", "metrics", @@ -1371,15 +1380,32 @@ dependencies = [ "itertools 0.12.1", "lazy_static", "lazycell", - "log", - "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash 1.1.0", "shlex", "syn 2.0.111", - "which", +] + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.10.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn 2.0.111", ] [[package]] @@ -1455,6 +1481,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-padding" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +dependencies = [ + "generic-array", +] + [[package]] name = "block2" version = "0.6.2" @@ -1513,7 +1548,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.1" +version = "8.1.0" dependencies = [ "beacon_node", "bytes", @@ -1668,6 +1703,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher", +] + [[package]] name = "cc" version = "1.2.49" @@ -1807,7 +1851,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", "terminal_size", ] @@ -1897,6 +1941,18 @@ dependencies = [ "cc", ] +[[package]] +name = "cms" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b77c319abfd5219629c45c34c89ba945ed3c5e49fcde9d16b6c3885f118a730" +dependencies = [ + "const-oid", + "der", + "spki", + "x509-cert", +] + [[package]] name = "colorchoice" version = "1.0.4" @@ -1914,9 +1970,9 @@ dependencies = [ [[package]] name = "compare_fields" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05162add7c8618791829528194a271dca93f69194d35b19db1ca7fbfb8275278" +checksum = "f6f45d0b4d61b582303179fb7a1a142bc9d647b7583db3b0d5f25a21d286fab9" dependencies = [ "compare_fields_derive", "itertools 0.14.0", @@ -1924,12 +1980,12 @@ dependencies = [ [[package]] name = "compare_fields_derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ee468b2e568b668e2a686112935e7bbe9a81bf4fa6b9f6fc3410ea45fb7ce" +checksum = "92ff1dbbda10d495b2c92749c002b2025e0be98f42d1741ecc9ff820d2f04dce" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -2026,9 +2082,9 @@ dependencies = [ [[package]] name = "context_deserialize" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5f9ea0a0ae2de4943f5ca71590b6dbd0b952475f0a0cafb30a470cec78c8b9" +checksum = "4c523eea4af094b5970c321f4604abc42c5549d3cbae332e98325403fbbdbf70" dependencies = [ "context_deserialize_derive", "serde", @@ -2036,12 +2092,12 @@ dependencies = [ [[package]] name = "context_deserialize_derive" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c57b2db1e4e3ed804dcc49894a144b68fe6c754b8f545eb1dda7ad3c7dbe7e6" +checksum = "3b7bf98c48ffa511b14bb3c76202c24a8742cea1efa9570391c5d41373419a09" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -2129,25 +2185,24 @@ dependencies = [ [[package]] name = "criterion" -version = "0.5.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" dependencies = [ + "alloca", "anes", "cast", "ciborium", "clap", "criterion-plot", - "is-terminal", - "itertools 0.10.5", + "itertools 0.13.0", "num-traits", - "once_cell", "oorandom", + "page_size", "plotters", "rayon", "regex", "serde", - "serde_derive", "serde_json", "tinytemplate", "walkdir", @@ -2155,12 +2210,12 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.5.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" dependencies = [ "cast", - "itertools 0.10.5", + "itertools 0.13.0", ] [[package]] @@ -2279,26 +2334,6 @@ dependencies = [ "syn 2.0.111", ] -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" -dependencies = [ - "darling_core 0.20.11", - "darling_macro 0.20.11", -] - [[package]] name = "darling" version = "0.21.3" @@ -2310,31 +2345,13 @@ dependencies = [ ] [[package]] -name = "darling_core" -version = "0.13.4" +name = "darling" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_core" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.11.1", - "syn 2.0.111", + "darling_core 0.23.0", + "darling_macro 0.23.0", ] [[package]] @@ -2348,29 +2365,20 @@ dependencies = [ "proc-macro2", "quote", "serde", - "strsim 0.11.1", + "strsim", "syn 2.0.111", ] [[package]] -name = "darling_macro" -version = "0.13.4" +name = "darling_core" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" -dependencies = [ - "darling_core 0.20.11", + "ident_case", + "proc-macro2", "quote", + "strsim", "syn 2.0.111", ] @@ -2385,6 +2393,17 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "darling_macro" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core 0.23.0", + "quote", + "syn 2.0.111", +] + [[package]] name = "darwin-libproc" version = "0.1.2" @@ -2503,6 +2522,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", + "der_derive", + "flagset", + "pem-rfc7468", "zeroize", ] @@ -2520,6 +2542,17 @@ dependencies = [ "rusticata-macros", ] +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "deranged" version = "0.5.5" @@ -2552,37 +2585,6 @@ dependencies = [ "syn 2.0.111", ] -[[package]] -name = "derive_builder" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" -dependencies = [ - "darling 0.20.11", - "proc-macro2", - "quote", - "syn 2.0.111", -] - -[[package]] -name = "derive_builder_macro" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" -dependencies = [ - "derive_builder_core", - "syn 2.0.111", -] - [[package]] name = "derive_more" version = "0.99.20" @@ -2619,6 +2621,15 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "des" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdd80ce8ce993de27e9f063a444a4d53ce8e8db4c1f00cc03af5ad5a9867a1e" +dependencies = [ + "cipher", +] + [[package]] name = "digest" version = "0.9.0" @@ -3263,15 +3274,15 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8cd8c4f47dfb947dbfe3cdf2945ae1da808dbedc592668658e827a12659ba1" +checksum = "2128a84f7a3850d54ee343334e3392cca61f9f6aa9441eec481b9394b43c238b" dependencies = [ "alloy-primitives", "arbitrary", "context_deserialize", "ethereum_serde_utils", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_derive", "smallvec", @@ -3280,11 +3291,11 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78d247bc40823c365a62e572441a8f8b12df03f171713f06bc76180fcd56ab71" +checksum = "cd596f91cff004fc8d02be44c21c0f9b93140a04b66027ae052f5f8e05b48eba" dependencies = [ - "darling 0.20.11", + "darling 0.23.0", "proc-macro2", "quote", "syn 2.0.111", @@ -3528,6 +3539,12 @@ dependencies = [ "safe_arith", ] +[[package]] +name = "flagset" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7ac824320a75a52197e8f2d787f6a38b6718bb6897a35142d749af3c0e8f4fe" + [[package]] name = "flate2" version = "1.1.5" @@ -3558,21 +3575,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "fork_choice" version = "0.1.0" @@ -3810,18 +3812,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "getset" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" -dependencies = [ - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "ghash" version = "0.5.1" @@ -4137,15 +4127,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "home" -version = "0.5.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" -dependencies = [ - "windows-sys 0.61.2", -] - [[package]] name = "http" version = "0.2.12" @@ -4370,22 +4351,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper 1.8.1", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.19" @@ -4552,6 +4517,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "if-addrs" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf39cc0423ee66021dc5eccface85580e4a001e0c5288bae8bea7ecb69225e90" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "if-watch" version = "3.2.1" @@ -4562,7 +4537,7 @@ dependencies = [ "core-foundation 0.9.4", "fnv", "futures", - "if-addrs", + "if-addrs 0.10.2", "ipnet", "log", "netlink-packet-core", @@ -4651,7 +4626,9 @@ dependencies = [ "filesystem", "lockfile", "metrics", + "p12-keystore", "parking_lot", + "pem", "rand 0.9.2", "reqwest", "serde", @@ -4672,6 +4649,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ + "block-padding", "generic-array", ] @@ -4721,17 +4699,6 @@ dependencies = [ "serde", ] -[[package]] -name = "is-terminal" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.61.2", -] - [[package]] name = "is_terminal_polyfill" version = "1.70.2" @@ -4897,7 +4864,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.1" +version = "8.1.0" dependencies = [ "account_utils", "beacon_chain", @@ -5383,7 +5350,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.1" +version = "8.1.0" dependencies = [ "account_manager", "account_utils", @@ -5453,11 +5420,11 @@ dependencies = [ "fnv", "futures", "hex", - "itertools 0.10.5", + "if-addrs 0.14.0", + "itertools 0.14.0", "libp2p", "libp2p-mplex", "lighthouse_version", - "local-ip-address", "logging", "lru 0.12.5", "lru_cache", @@ -5515,17 +5482,11 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "8.0.1" +version = "8.1.0" dependencies = [ "regex", ] -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - [[package]] name = "linux-raw-sys" version = "0.11.0" @@ -5559,18 +5520,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "local-ip-address" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a60bf300a990b2d1ebdde4228e873e8e4da40d834adbf5265f3da1457ede652" -dependencies = [ - "libc", - "neli", - "thiserror 2.0.17", - "windows-sys 0.61.2", -] - [[package]] name = "lock_api" version = "0.4.14" @@ -5695,13 +5644,13 @@ checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] name = "match-lookup" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -5730,7 +5679,7 @@ name = "mdbx-sys" version = "0.11.6-4" source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a#e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a" dependencies = [ - "bindgen", + "bindgen 0.69.5", "cc", "cmake", "libc", @@ -5770,25 +5719,25 @@ dependencies = [ [[package]] name = "metastruct" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d74f54f231f9a18d77393ecc5cc7ab96709b2a61ee326c2b2b291009b0cc5a07" +checksum = "969a1be9bd80794bdf93b23ab552c2ec6f3e83b33164824553fd996cdad513b8" dependencies = [ "metastruct_macro", ] [[package]] name = "metastruct_macro" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985e7225f3a4dfbec47a0c6a730a874185fda840d365d7bbd6ba199dd81796d5" +checksum = "de9164f767d73a507c19205868c84da411dc7795f4bdabf497d3dd93cfef9930" dependencies = [ - "darling 0.13.4", - "itertools 0.10.5", + "darling 0.23.0", + "itertools 0.14.0", "proc-macro2", "quote", "smallvec", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -6031,52 +5980,6 @@ dependencies = [ "unsigned-varint", ] -[[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework 2.11.1", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "neli" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" -dependencies = [ - "bitflags 2.10.0", - "byteorder", - "derive_builder", - "getset", - "libc", - "log", - "neli-proc-macros", - "parking_lot", -] - -[[package]] -name = "neli-proc-macros" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" -dependencies = [ - "either", - "proc-macro2", - "quote", - "serde", - "syn 2.0.111", -] - [[package]] name = "netlink-packet-core" version = "0.7.0" @@ -6164,7 +6067,7 @@ dependencies = [ "genesis", "hex", "igd-next", - "itertools 0.10.5", + "itertools 0.14.0", "k256", "kzg", "libp2p", @@ -6323,9 +6226,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-integer" @@ -6461,60 +6364,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" -dependencies = [ - "bitflags 2.10.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "openssl-src" -version = "300.5.4+3.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507b3792995dae9b0df8a1c1e3771e8418b7c2d9f0baeba32e6fe8b06c7cb72" -dependencies = [ - "cc", -] - -[[package]] -name = "openssl-sys" -version = "0.9.111" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" -dependencies = [ - "cc", - "libc", - "openssl-src", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.30.0" @@ -6600,7 +6455,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "fixed_bytes", - "itertools 0.10.5", + "itertools 0.14.0", "maplit", "metrics", "parking_lot", @@ -6615,6 +6470,39 @@ dependencies = [ "types", ] +[[package]] +name = "p12-keystore" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8d55319bae67f92141ce4da80c5392acd3d1323bd8312c1ffdfb018927d07d7" +dependencies = [ + "base64 0.22.1", + "cbc", + "cms", + "der", + "des", + "hex", + "hmac", + "pkcs12", + "pkcs5", + "rand 0.9.2", + "rc2", + "sha1", + "sha2", + "thiserror 2.0.17", + "x509-parser", +] + +[[package]] +name = "page_size" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "pairing" version = "0.23.0" @@ -6707,6 +6595,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -6755,6 +6652,36 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs12" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "695b3df3d3cc1015f12d70235e35b6b79befc5fa7a9b95b951eab1dd07c9efc2" +dependencies = [ + "cms", + "const-oid", + "der", + "digest 0.10.7", + "spki", + "x509-cert", + "zeroize", +] + +[[package]] +name = "pkcs5" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e847e2c91a18bfa887dd028ec33f2fe6f25db77db3619024764914affe8b69a6" +dependencies = [ + "aes", + "cbc", + "der", + "pbkdf2", + "scrypt", + "sha2", + "spki", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -6815,7 +6742,7 @@ dependencies = [ "concurrent-queue", "hermit-abi", "pin-project-lite", - "rustix 1.1.2", + "rustix", "windows-sys 0.61.2", ] @@ -6975,7 +6902,7 @@ checksum = "25485360a54d6861439d60facef26de713b1e126bf015ec8f98239467a2b82f7" dependencies = [ "bitflags 2.10.0", "procfs-core", - "rustix 1.1.2", + "rustix", ] [[package]] @@ -7353,6 +7280,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rc2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62c64daa8e9438b84aaae55010a93f396f8e60e3911590fcba770d04643fc1dd" +dependencies = [ + "cipher", +] + [[package]] name = "rcgen" version = "0.13.2" @@ -7460,11 +7396,9 @@ dependencies = [ "http-body-util", "hyper 1.8.1", "hyper-rustls", - "hyper-tls", "hyper-util", "js-sys", "log", - "native-tls", "percent-encoding", "pin-project-lite", "quinn", @@ -7475,7 +7409,6 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-native-tls", "tokio-rustls 0.26.4", "tokio-util", "tower 0.5.2", @@ -7693,19 +7626,6 @@ dependencies = [ "nom", ] -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.10.0", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - [[package]] name = "rustix" version = "1.1.2" @@ -7715,7 +7635,7 @@ dependencies = [ "bitflags 2.10.0", "errno", "libc", - "linux-raw-sys 0.11.0", + "linux-raw-sys", "windows-sys 0.61.2", ] @@ -7757,7 +7677,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", + "security-framework", ] [[package]] @@ -7960,19 +7880,6 @@ dependencies = [ "cc", ] -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.10.0", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - [[package]] name = "security-framework" version = "3.5.1" @@ -8500,7 +8407,7 @@ dependencies = [ "fixed_bytes", "int_to_bytes", "integer-sqrt", - "itertools 0.10.5", + "itertools 0.14.0", "merkle_proof", "metrics", "milhouse", @@ -8548,7 +8455,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "fixed_bytes", - "itertools 0.10.5", + "itertools 0.14.0", "leveldb", "logging", "lru 0.12.5", @@ -8573,12 +8480,6 @@ dependencies = [ "zstd", ] -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -8614,12 +8515,12 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "superstruct" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b986e4a629907f20a2c2a639a75bc22a8b5d99b444e0d83c395f4cb309022bf" +checksum = "bae4a9ccd7882533c1f210e400763ec6ee64c390fc12248c238276281863719e" dependencies = [ - "darling 0.20.11", - "itertools 0.13.0", + "darling 0.23.0", + "itertools 0.14.0", "proc-macro2", "quote", "smallvec", @@ -8780,7 +8681,7 @@ dependencies = [ "fastrand", "getrandom 0.3.4", "once_cell", - "rustix 1.1.2", + "rustix", "windows-sys 0.61.2", ] @@ -8790,7 +8691,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.1.2", + "rustix", "windows-sys 0.60.2", ] @@ -8899,30 +8800,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -9029,16 +8930,6 @@ dependencies = [ "syn 2.0.111", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.25.0" @@ -9359,9 +9250,9 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2db21caa355767db4fd6129876e5ae278a8699f4a6959b1e3e7aff610b532d52" +checksum = "f7fd51aa83d2eb83b04570808430808b5d24fdbf479a4d5ac5dee4a2e2dd2be4" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -9372,11 +9263,11 @@ dependencies = [ [[package]] name = "tree_hash_derive" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711cc655fcbb48384a87dc2bf641b991a15c5ad9afc3caa0b1ab1df3b436f70f" +checksum = "8840ad4d852e325d3afa7fde8a50b2412f89dce47d7eb291c0cc7f87cd040f38" dependencies = [ - "darling 0.21.3", + "darling 0.23.0", "proc-macro2", "quote", "syn 2.0.111", @@ -9435,7 +9326,7 @@ dependencies = [ "fixed_bytes", "hex", "int_to_bytes", - "itertools 0.10.5", + "itertools 0.14.0", "kzg", "maplit", "merkle_proof", @@ -9622,7 +9513,7 @@ dependencies = [ [[package]] name = "validator_client" -version = "8.0.1" +version = "8.1.0" dependencies = [ "account_utils", "beacon_node_fallback", @@ -9693,7 +9584,7 @@ dependencies = [ "graffiti_file", "health_metrics", "initialized_validators", - "itertools 0.10.5", + "itertools 0.14.0", "lighthouse_validator_store", "lighthouse_version", "logging", @@ -10086,18 +9977,6 @@ dependencies = [ "rustls-pki-types", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.44", -] - [[package]] name = "widestring" version = "0.4.3" @@ -10532,6 +10411,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x509-cert" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" +dependencies = [ + "const-oid", + "der", + "spki", +] + [[package]] name = "x509-parser" version = "0.17.0" @@ -10552,15 +10442,15 @@ dependencies = [ [[package]] name = "xdelta3" version = "0.1.5" -source = "git+https://github.com/sigp/xdelta3-rs?rev=4db64086bb02e9febb584ba93b9d16bb2ae3825a#4db64086bb02e9febb584ba93b9d16bb2ae3825a" +source = "git+https://github.com/sigp/xdelta3-rs?rev=fe3906605c87b6c0515bd7c8fc671f47875e3ccc#fe3906605c87b6c0515bd7c8fc671f47875e3ccc" dependencies = [ - "bindgen", + "bindgen 0.72.1", "cc", "futures-io", "futures-util", "libc", "log", - "rand 0.8.5", + "rand 0.9.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 78c63875d3..100a916c50 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,7 @@ resolver = "2" [workspace.package] edition = "2024" -version = "8.0.1" +version = "8.1.0" [workspace.dependencies] account_utils = { path = "common/account_utils" } @@ -126,7 +126,7 @@ clap_utils = { path = "common/clap_utils" } compare_fields = "0.1" console-subscriber = "0.4" context_deserialize = "0.2" -criterion = "0.5" +criterion = "0.8" delay_map = "0.4" deposit_contract = { path = "common/deposit_contract" } directory = { path = "common/directory" } @@ -164,7 +164,7 @@ http_api = { path = "beacon_node/http_api" } hyper = "1" initialized_validators = { path = "validator_client/initialized_validators" } int_to_bytes = { path = "consensus/int_to_bytes" } -itertools = "0.10" +itertools = "0.14" kzg = { path = "crypto/kzg" } libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", default-features = false, features = [ "identify", @@ -224,7 +224,6 @@ reqwest = { version = "0.12", default-features = false, features = [ "json", "stream", "rustls-tls", - "native-tls-vendored", ] } ring = "0.17" rpds = "0.11" @@ -286,7 +285,7 @@ validator_test_rig = { path = "testing/validator_test_rig" } warp = { version = "0.3.7", default-features = false, features = ["tls"] } warp_utils = { path = "common/warp_utils" } workspace_members = { path = "common/workspace_members" } -xdelta3 = { git = "https://github.com/sigp/xdelta3-rs", rev = "4db64086bb02e9febb584ba93b9d16bb2ae3825a" } +xdelta3 = { git = "https://github.com/sigp/xdelta3-rs", rev = "fe3906605c87b6c0515bd7c8fc671f47875e3ccc" } zeroize = { version = "1", features = ["zeroize_derive", "serde"] } zip = { version = "6.0", default-features = false, features = ["deflate"] } zstd = "0.13" diff --git a/Makefile b/Makefile index 9d08c3ebe1..9e2b1d24c5 100644 --- a/Makefile +++ b/Makefile @@ -343,6 +343,12 @@ vendor: udeps: cargo +$(PINNED_NIGHTLY) udeps --tests --all-targets --release --features "$(TEST_FEATURES)" +# Checks Cargo.toml files for unencrypted HTTP links +insecure-deps: + @ BAD_LINKS=$$(find . -name Cargo.toml | xargs grep -n "http://" || true); \ + if [ -z "$$BAD_LINKS" ]; then echo "No insecure HTTP links found"; \ + else echo "$$BAD_LINKS"; echo "Using plain HTTP in Cargo.toml files is forbidden"; exit 1; fi + # Performs a `cargo` clean and cleans the `ef_tests` directory. clean: cargo clean diff --git a/beacon_node/beacon_chain/benches/benches.rs b/beacon_node/beacon_chain/benches/benches.rs index 0d4040155d..e71a19d8c1 100644 --- a/beacon_node/beacon_chain/benches/benches.rs +++ b/beacon_node/beacon_chain/benches/benches.rs @@ -1,8 +1,9 @@ +use std::hint::black_box; use std::sync::Arc; use beacon_chain::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; use beacon_chain::test_utils::get_kzg; -use criterion::{Criterion, black_box, criterion_group, criterion_main}; +use criterion::{Criterion, criterion_group, criterion_main}; use bls::Signature; use kzg::{KzgCommitment, KzgProof}; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1b0aea5a2f..9bb6757341 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -2148,11 +2148,13 @@ pub fn verify_header_signature<T: BeaconChainTypes, Err: BlockBlobError>( .get(header.message.proposer_index as usize) .cloned() .ok_or(Err::unknown_validator_error(header.message.proposer_index))?; - let head_fork = chain.canonical_head.cached_head().head_fork(); + let fork = chain + .spec + .fork_at_epoch(header.message.slot.epoch(T::EthSpec::slots_per_epoch())); if header.verify_signature::<T::EthSpec>( &proposer_pubkey, - &head_fork, + &fork, chain.genesis_validators_root, &chain.spec, ) { diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 84e600cd40..6a028e6c98 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -279,7 +279,7 @@ impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> { } } -#[derive(Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub struct BlockImportData<E: EthSpec> { pub block_root: Hash256, pub state: BeaconState<E>, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c095b57849..0079335d8d 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1050,7 +1050,6 @@ where complete_blob_backfill, slot_clock, self.kzg.clone(), - store, Arc::new(custody_context), self.spec, ) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index db071db166..1a08ac3f88 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -918,6 +918,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_column_sidecars.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.observed_slashable.write().prune( new_view .finalized_checkpoint diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 666ba7cc41..e266e02f7f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -5,9 +5,7 @@ use crate::block_verification_types::{AvailabilityPendingExecutedBlock, Availabl use crate::data_availability_checker::overflow_lru_cache::{ DataAvailabilityCheckerInner, ReconstructColumnsDecision, }; -use crate::{ - BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, CustodyContext, metrics, -}; +use crate::{BeaconChain, BeaconChainTypes, BlockProcessStatus, CustodyContext, metrics}; use educe::Educe; use kzg::Kzg; use slot_clock::SlotClock; @@ -27,7 +25,6 @@ use types::{ mod error; mod overflow_lru_cache; -mod state_lru_cache; use crate::data_availability_checker::error::Error; use crate::data_column_verification::{ @@ -53,7 +50,6 @@ use types::new_non_zero_usize; /// `PendingComponents` are now never removed from the cache manually are only removed via LRU /// eviction to prevent race conditions (#7961), so we expect this cache to be full all the time. const OVERFLOW_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); -const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); /// Cache to hold fully valid data that can't be imported to fork-choice yet. After Dencun hard-fork /// blocks have a sidecar of data that is received separately from the network. We call the concept @@ -122,13 +118,11 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> { complete_blob_backfill: bool, slot_clock: T::SlotClock, kzg: Arc<Kzg>, - store: BeaconStore<T>, custody_context: Arc<CustodyContext<T::EthSpec>>, spec: Arc<ChainSpec>, ) -> Result<Self, AvailabilityCheckError> { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY_NON_ZERO, - store, custody_context.clone(), spec.clone(), )?; @@ -469,7 +463,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> { /// Collects metrics from the data availability checker. pub fn metrics(&self) -> DataAvailabilityCheckerMetrics { DataAvailabilityCheckerMetrics { - state_cache_size: self.availability_cache.state_cache_size(), block_cache_size: self.availability_cache.block_cache_size(), } } @@ -565,7 +558,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> { /// Helper struct to group data availability checker metrics. pub struct DataAvailabilityCheckerMetrics { - pub state_cache_size: usize, pub block_cache_size: usize, } @@ -912,7 +904,6 @@ mod test { use std::collections::HashSet; use std::sync::Arc; use std::time::Duration; - use store::HotColdDB; use types::data::DataColumn; use types::{ ChainSpec, ColumnIndex, DataColumnSidecarFulu, EthSpec, ForkName, MainnetEthSpec, Slot, @@ -1253,7 +1244,6 @@ mod test { spec.get_slot_duration(), ); let kzg = get_kzg(&spec); - let store = Arc::new(HotColdDB::open_ephemeral(<_>::default(), spec.clone()).unwrap()); let ordered_custody_column_indices = generate_data_column_indices_rand_order::<E>(); let custody_context = Arc::new(CustodyContext::new( NodeCustodyType::Fullnode, @@ -1265,7 +1255,6 @@ mod test { complete_blob_backfill, slot_clock, kzg, - store, custody_context, spec, ) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index ff098a827d..f7bd646f82 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1,7 +1,5 @@ use super::AvailableBlockData; -use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; use crate::CustodyContext; -use crate::beacon_chain::BeaconStore; use crate::blob_verification::KzgVerifiedBlob; use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, @@ -23,10 +21,9 @@ use types::{ DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, }; -#[derive(Clone)] pub enum CachedBlock<E: EthSpec> { PreExecution(Arc<SignedBeaconBlock<E>>, BlockImportSource), - Executed(Box<DietAvailabilityPendingExecutedBlock<E>>), + Executed(Box<AvailabilityPendingExecutedBlock<E>>), } impl<E: EthSpec> CachedBlock<E> { @@ -43,7 +40,7 @@ impl<E: EthSpec> CachedBlock<E> { fn as_block(&self) -> &SignedBeaconBlock<E> { match self { CachedBlock::PreExecution(b, _) => b, - CachedBlock::Executed(b) => b.as_block(), + CachedBlock::Executed(b) => b.block.as_ref(), } } @@ -84,14 +81,6 @@ impl<E: EthSpec> PendingComponents<E> { &self.verified_blobs } - #[cfg(test)] - fn get_diet_block(&self) -> Option<&DietAvailabilityPendingExecutedBlock<E>> { - self.block.as_ref().and_then(|block| match block { - CachedBlock::Executed(block) => Some(block.as_ref()), - _ => None, - }) - } - /// Returns an immutable reference to the cached data column. pub fn get_cached_data_column( &self, @@ -129,7 +118,7 @@ impl<E: EthSpec> PendingComponents<E> { } /// Inserts an executed block into the cache. - pub fn insert_executed_block(&mut self, block: DietAvailabilityPendingExecutedBlock<E>) { + pub fn insert_executed_block(&mut self, block: AvailabilityPendingExecutedBlock<E>) { self.block = Some(CachedBlock::Executed(Box::new(block))) } @@ -201,7 +190,7 @@ impl<E: EthSpec> PendingComponents<E> { /// Inserts a new block and revalidates the existing blobs against it. /// /// Blobs that don't match the new block's commitments are evicted. - pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock<E>) { + pub fn merge_block(&mut self, block: AvailabilityPendingExecutedBlock<E>) { self.insert_executed_block(block); let reinsert = self.get_cached_blobs_mut().take(); self.merge_blobs(reinsert); @@ -209,21 +198,11 @@ impl<E: EthSpec> PendingComponents<E> { /// Returns Some if the block has received all its required data for import. The return value /// must be persisted in the DB along with the block. - /// - /// WARNING: This function can potentially take a lot of time if the state needs to be - /// reconstructed from disk. Ensure you are not holding any write locks while calling this. - pub fn make_available<R>( + pub fn make_available( &self, spec: &Arc<ChainSpec>, num_expected_columns_opt: Option<usize>, - recover: R, - ) -> Result<Option<AvailableExecutedBlock<E>>, AvailabilityCheckError> - where - R: FnOnce( - DietAvailabilityPendingExecutedBlock<E>, - &Span, - ) -> Result<AvailabilityPendingExecutedBlock<E>, AvailabilityCheckError>, - { + ) -> Result<Option<AvailableExecutedBlock<E>>, AvailabilityCheckError> { let Some(CachedBlock::Executed(block)) = &self.block else { // Block not available yet return Ok(None); @@ -266,7 +245,7 @@ impl<E: EthSpec> PendingComponents<E> { ))); } Ordering::Equal => { - let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; + let max_blobs = spec.max_blobs_per_block(block.block.epoch()) as usize; let blobs_vec = self .verified_blobs .iter() @@ -311,11 +290,11 @@ impl<E: EthSpec> PendingComponents<E> { block, import_data, payload_verification_outcome, - } = recover(*block.clone(), &self.span)?; + } = block.as_ref(); let available_block = AvailableBlock { block_root: self.block_root, - block, + block: block.clone(), blob_data, blobs_available_timestamp, spec: spec.clone(), @@ -326,8 +305,8 @@ impl<E: EthSpec> PendingComponents<E> { }); Ok(Some(AvailableExecutedBlock::new( available_block, - import_data, - payload_verification_outcome, + import_data.clone(), + payload_verification_outcome.clone(), ))) } @@ -399,9 +378,6 @@ impl<E: EthSpec> PendingComponents<E> { pub struct DataAvailabilityCheckerInner<T: BeaconChainTypes> { /// Contains all the data we keep in memory, protected by an RwLock critical: RwLock<LruCache<Hash256, PendingComponents<T::EthSpec>>>, - /// This cache holds a limited number of states in memory and reconstructs them - /// from disk when necessary. This is necessary until we merge tree-states - state_cache: StateLRUCache<T>, custody_context: Arc<CustodyContext<T::EthSpec>>, spec: Arc<ChainSpec>, } @@ -418,13 +394,11 @@ pub(crate) enum ReconstructColumnsDecision<E: EthSpec> { impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> { pub fn new( capacity: NonZeroUsize, - beacon_store: BeaconStore<T>, custody_context: Arc<CustodyContext<T::EthSpec>>, spec: Arc<ChainSpec>, ) -> Result<Self, AvailabilityCheckError> { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), - state_cache: StateLRUCache::new(beacon_store, spec.clone()), custody_context, spec, }) @@ -441,7 +415,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> { BlockProcessStatus::NotValidated(b.clone(), *source) } CachedBlock::Executed(b) => { - BlockProcessStatus::ExecutionValidated(b.block_cloned()) + BlockProcessStatus::ExecutionValidated(b.block.clone()) } }) }) @@ -580,11 +554,9 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> { pending_components: MappedRwLockReadGuard<'_, PendingComponents<T::EthSpec>>, num_expected_columns_opt: Option<usize>, ) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> { - if let Some(available_block) = pending_components.make_available( - &self.spec, - num_expected_columns_opt, - |block, span| self.state_cache.recover_pending_executed_block(block, span), - )? { + if let Some(available_block) = + pending_components.make_available(&self.spec, num_expected_columns_opt)? + { // Explicitly drop read lock before acquiring write lock drop(pending_components); if let Some(components) = self.critical.write().get_mut(&block_root) { @@ -739,14 +711,9 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> { let epoch = executed_block.as_block().epoch(); let block_root = executed_block.import_data.block_root; - // register the block to get the diet block - let diet_executed_block = self - .state_cache - .register_pending_executed_block(executed_block); - let pending_components = self.update_or_insert_pending_components(block_root, epoch, |pending_components| { - pending_components.merge_block(diet_executed_block); + pending_components.merge_block(executed_block); Ok(()) })?; @@ -780,9 +747,6 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> { /// maintain the cache pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { - // clean up any lingering states in the state cache - self.state_cache.do_maintenance(cutoff_epoch); - // Collect keys of pending blocks from a previous epoch to cutoff let mut write_lock = self.critical.write(); let mut keys_to_remove = vec![]; @@ -801,17 +765,6 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> { Ok(()) } - #[cfg(test)] - /// get the state cache for inspection (used only for tests) - pub fn state_lru_cache(&self) -> &StateLRUCache<T> { - &self.state_cache - } - - /// Number of states stored in memory in the cache. - pub fn state_cache_size(&self) -> usize { - self.state_cache.lru_cache().read().len() - } - /// Number of pending component entries in memory in the cache. pub fn block_cache_size(&self) -> usize { self.critical.read().len() @@ -828,21 +781,18 @@ mod test { block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, custody_context::NodeCustodyType, - data_availability_checker::STATE_LRU_CAPACITY_NON_ZERO, test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; use fork_choice::PayloadVerificationStatus; use logging::create_test_tracing_subscriber; use state_processing::ConsensusContext; - use std::collections::VecDeque; use store::{HotColdDB, ItemStore, StoreConfig, database::interface::BeaconNodeBackend}; use tempfile::{TempDir, tempdir}; - use tracing::{debug_span, info}; + use tracing::info; use types::new_non_zero_usize; use types::{ExecPayload, MinimalEthSpec}; const LOW_VALIDATOR_COUNT: usize = 32; - const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); fn get_store_with_spec<E: EthSpec>( db_path: &TempDir, @@ -1021,7 +971,6 @@ mod test { let chain_db_path = tempdir().expect("should get temp dir"); let harness = get_deneb_chain(&chain_db_path).await; let spec = harness.spec.clone(); - let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); let custody_context = Arc::new(CustodyContext::new( NodeCustodyType::Fullnode, @@ -1031,7 +980,6 @@ mod test { let cache = Arc::new( DataAvailabilityCheckerInner::<T>::new( capacity_non_zero, - test_store, custody_context, spec.clone(), ) @@ -1137,121 +1085,6 @@ mod test { "cache should still have available block" ); } - - #[tokio::test] - // ensure the state cache keeps memory usage low and that it can properly recover states - // THIS TEST CAN BE DELETED ONCE TREE STATES IS MERGED AND WE RIP OUT THE STATE CACHE - async fn overflow_cache_test_state_cache() { - type E = MinimalEthSpec; - type T = DiskHarnessType<E>; - let capacity = STATE_LRU_CAPACITY * 2; - let (harness, cache, _path) = setup_harness_and_cache::<E, T>(capacity).await; - - let mut pending_blocks = VecDeque::new(); - let mut states = Vec::new(); - let mut state_roots = Vec::new(); - // Get enough blocks to fill the cache to capacity, ensuring all blocks have blobs - while pending_blocks.len() < capacity { - let (mut pending_block, _) = availability_pending_block(&harness).await; - if pending_block.num_blobs_expected() == 0 { - // we need blocks with blobs - continue; - } - let state_root = pending_block.import_data.state.canonical_root().unwrap(); - states.push(pending_block.import_data.state.clone()); - pending_blocks.push_back(pending_block); - state_roots.push(state_root); - } - - let state_cache = cache.state_lru_cache().lru_cache(); - let mut pushed_diet_blocks = VecDeque::new(); - - for i in 0..capacity { - let pending_block = pending_blocks.pop_front().expect("should have block"); - let block_root = pending_block.as_block().canonical_root(); - - assert_eq!( - state_cache.read().len(), - std::cmp::min(i, STATE_LRU_CAPACITY), - "state cache should be empty at start" - ); - - if i >= STATE_LRU_CAPACITY { - let lru_root = state_roots[i - STATE_LRU_CAPACITY]; - assert_eq!( - state_cache.read().peek_lru().map(|(root, _)| root), - Some(&lru_root), - "lru block should be in cache" - ); - } - - // put the block in the cache - let availability = cache - .put_executed_block(pending_block) - .expect("should put block"); - - // grab the diet block from the cache for later testing - let diet_block = cache - .critical - .read() - .peek(&block_root) - .and_then(|pending_components| pending_components.get_diet_block().cloned()) - .expect("should exist"); - pushed_diet_blocks.push_back(diet_block); - - // should be unavailable since we made sure all blocks had blobs - assert!( - matches!(availability, Availability::MissingComponents(_)), - "should be pending blobs" - ); - - if i >= STATE_LRU_CAPACITY { - let evicted_index = i - STATE_LRU_CAPACITY; - let evicted_root = state_roots[evicted_index]; - assert!( - state_cache.read().peek(&evicted_root).is_none(), - "lru root should be evicted" - ); - // get the diet block via direct conversion (testing only) - let diet_block = pushed_diet_blocks.pop_front().expect("should have block"); - // reconstruct the pending block by replaying the block on the parent state - let recovered_pending_block = cache - .state_lru_cache() - .recover_pending_executed_block(diet_block, &debug_span!("test")) - .expect("should reconstruct pending block"); - - // assert the recovered state is the same as the original - assert_eq!( - recovered_pending_block.import_data.state, states[evicted_index], - "recovered state should be the same as the original" - ); - } - } - - // now check the last block - let last_block = pushed_diet_blocks.pop_back().expect("should exist").clone(); - // the state should still be in the cache - assert!( - state_cache - .read() - .peek(&last_block.as_block().state_root()) - .is_some(), - "last block state should still be in cache" - ); - // get the diet block via direct conversion (testing only) - let diet_block = last_block.clone(); - // recover the pending block from the cache - let recovered_pending_block = cache - .state_lru_cache() - .recover_pending_executed_block(diet_block, &debug_span!("test")) - .expect("should reconstruct pending block"); - // assert the recovered state is the same as the original - assert_eq!( - Some(&recovered_pending_block.import_data.state), - states.last(), - "recovered state should be the same as the original" - ); - } } #[cfg(test)] @@ -1307,7 +1140,7 @@ mod pending_components_tests { } type PendingComponentsSetup<E> = ( - DietAvailabilityPendingExecutedBlock<E>, + AvailabilityPendingExecutedBlock<E>, RuntimeFixedVector<Option<KzgVerifiedBlob<E>>>, RuntimeFixedVector<Option<KzgVerifiedBlob<E>>>, ); @@ -1351,7 +1184,7 @@ mod pending_components_tests { is_valid_merge_transition_block: false, }, }; - (block.into(), blobs, invalid_blobs) + (block, blobs, invalid_blobs) } pub fn assert_cache_consistent(cache: PendingComponents<E>, max_len: usize) { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs deleted file mode 100644 index 24f9237e3c..0000000000 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ /dev/null @@ -1,215 +0,0 @@ -use crate::block_verification_types::AsBlock; -use crate::{ - AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, - block_verification_types::BlockImportData, - data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO}, -}; -use lru::LruCache; -use parking_lot::RwLock; -use state_processing::BlockReplayer; -use std::sync::Arc; -use store::OnDiskConsensusContext; -use tracing::{Span, debug_span, instrument}; -use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; - -/// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except -/// that it is much smaller because it contains only a state root instead of -/// a full `BeaconState`. -#[derive(Clone)] -pub struct DietAvailabilityPendingExecutedBlock<E: EthSpec> { - block: Arc<SignedBeaconBlock<E>>, - state_root: Hash256, - parent_block: SignedBeaconBlock<E, BlindedPayload<E>>, - consensus_context: OnDiskConsensusContext<E>, - payload_verification_outcome: PayloadVerificationOutcome, -} - -/// just implementing the same methods as `AvailabilityPendingExecutedBlock` -impl<E: EthSpec> DietAvailabilityPendingExecutedBlock<E> { - pub fn as_block(&self) -> &SignedBeaconBlock<E> { - &self.block - } - - pub fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> { - self.block.clone() - } - - pub fn num_blobs_expected(&self) -> usize { - self.block - .message() - .body() - .blob_kzg_commitments() - .map_or(0, |commitments| commitments.len()) - } - - /// Returns the epoch corresponding to `self.slot()`. - pub fn epoch(&self) -> Epoch { - self.block.slot().epoch(E::slots_per_epoch()) - } -} - -/// This LRU cache holds BeaconStates used for block import. If the cache overflows, -/// the least recently used state will be dropped. If the dropped state is needed -/// later on, it will be recovered from the parent state and replaying the block. -/// -/// WARNING: This cache assumes the parent block of any `AvailabilityPendingExecutedBlock` -/// has already been imported into ForkChoice. If this is not the case, the cache -/// will fail to recover the state when the cache overflows because it can't load -/// the parent state! -pub struct StateLRUCache<T: BeaconChainTypes> { - states: RwLock<LruCache<Hash256, BeaconState<T::EthSpec>>>, - store: BeaconStore<T>, - spec: Arc<ChainSpec>, -} - -impl<T: BeaconChainTypes> StateLRUCache<T> { - pub fn new(store: BeaconStore<T>, spec: Arc<ChainSpec>) -> Self { - Self { - states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY_NON_ZERO)), - store, - spec, - } - } - - /// This will store the state in the LRU cache and return a - /// `DietAvailabilityPendingExecutedBlock` which is much cheaper to - /// keep around in memory. - pub fn register_pending_executed_block( - &self, - executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>, - ) -> DietAvailabilityPendingExecutedBlock<T::EthSpec> { - let state = executed_block.import_data.state; - let state_root = executed_block.block.state_root(); - self.states.write().put(state_root, state); - - DietAvailabilityPendingExecutedBlock { - block: executed_block.block, - state_root, - parent_block: executed_block.import_data.parent_block, - consensus_context: OnDiskConsensusContext::from_consensus_context( - executed_block.import_data.consensus_context, - ), - payload_verification_outcome: executed_block.payload_verification_outcome, - } - } - - /// Recover the `AvailabilityPendingExecutedBlock` from the diet version. - /// This method will first check the cache and if the state is not found - /// it will reconstruct the state by loading the parent state from disk and - /// replaying the block. - #[instrument(skip_all, parent = _span, level = "debug")] - pub fn recover_pending_executed_block( - &self, - diet_executed_block: DietAvailabilityPendingExecutedBlock<T::EthSpec>, - _span: &Span, - ) -> Result<AvailabilityPendingExecutedBlock<T::EthSpec>, AvailabilityCheckError> { - // Keep the state in the cache to prevent reconstruction in race conditions - let state = if let Some(state) = self.states.write().get(&diet_executed_block.state_root) { - state.clone() - } else { - self.reconstruct_state(&diet_executed_block)? - }; - let block_root = diet_executed_block.block.canonical_root(); - Ok(AvailabilityPendingExecutedBlock { - block: diet_executed_block.block, - import_data: BlockImportData { - block_root, - state, - parent_block: diet_executed_block.parent_block, - consensus_context: diet_executed_block - .consensus_context - .into_consensus_context(), - }, - payload_verification_outcome: diet_executed_block.payload_verification_outcome, - }) - } - - /// Reconstruct the state by loading the parent state from disk and replaying - /// the block. - #[instrument(skip_all, level = "debug")] - fn reconstruct_state( - &self, - diet_executed_block: &DietAvailabilityPendingExecutedBlock<T::EthSpec>, - ) -> Result<BeaconState<T::EthSpec>, AvailabilityCheckError> { - let parent_block_root = diet_executed_block.parent_block.canonical_root(); - let parent_block_state_root = diet_executed_block.parent_block.state_root(); - let (parent_state_root, parent_state) = self - .store - .get_advanced_hot_state( - parent_block_root, - diet_executed_block.parent_block.slot(), - parent_block_state_root, - ) - .map_err(AvailabilityCheckError::StoreError)? - .ok_or(AvailabilityCheckError::ParentStateMissing( - parent_block_state_root, - ))?; - - let state_roots = vec![ - Ok((parent_state_root, diet_executed_block.parent_block.slot())), - Ok(( - diet_executed_block.state_root, - diet_executed_block.block.slot(), - )), - ]; - - let block_replayer: BlockReplayer<'_, T::EthSpec, AvailabilityCheckError, _> = - BlockReplayer::new(parent_state, &self.spec) - .no_signature_verification() - .state_root_iter(state_roots.into_iter()) - .minimal_block_root_verification(); - - let block_replayer = debug_span!("reconstruct_state_apply_blocks").in_scope(|| { - block_replayer.apply_blocks(vec![diet_executed_block.block.clone_as_blinded()], None) - }); - - block_replayer - .map(|block_replayer| block_replayer.into_state()) - .and_then(|mut state| { - state - .build_exit_cache(&self.spec) - .map_err(AvailabilityCheckError::RebuildingStateCaches)?; - state - .update_tree_hash_cache() - .map_err(AvailabilityCheckError::RebuildingStateCaches)?; - Ok(state) - }) - } - - /// returns the state cache for inspection - pub fn lru_cache(&self) -> &RwLock<LruCache<Hash256, BeaconState<T::EthSpec>>> { - &self.states - } - - /// remove any states from the cache from before the given epoch - pub fn do_maintenance(&self, cutoff_epoch: Epoch) { - let mut write_lock = self.states.write(); - while let Some((_, state)) = write_lock.peek_lru() { - if state.slot().epoch(T::EthSpec::slots_per_epoch()) < cutoff_epoch { - write_lock.pop_lru(); - } else { - break; - } - } - } -} - -/// This can only be used during testing. The intended way to -/// obtain a `DietAvailabilityPendingExecutedBlock` is to call -/// `register_pending_executed_block` on the `StateLRUCache`. -#[cfg(test)] -impl<E: EthSpec> From<AvailabilityPendingExecutedBlock<E>> - for DietAvailabilityPendingExecutedBlock<E> -{ - fn from(mut value: AvailabilityPendingExecutedBlock<E>) -> Self { - Self { - block: value.block, - state_root: value.import_data.state.canonical_root().unwrap(), - parent_block: value.import_data.parent_block, - consensus_context: OnDiskConsensusContext::from_consensus_context( - value.import_data.consensus_context, - ), - payload_verification_outcome: value.payload_verification_outcome, - } - } -} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 6be07faa24..9de67ca93f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1869,13 +1869,6 @@ pub static DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE: LazyLock<Result<I "Number of entries in the data availability overflow block memory cache.", ) }); -pub static DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE: LazyLock<Result<IntGauge>> = - LazyLock::new(|| { - try_create_int_gauge( - "data_availability_overflow_memory_state_cache_size", - "Number of entries in the data availability overflow state memory cache.", - ) - }); pub static DATA_AVAILABILITY_RECONSTRUCTION_TIME: LazyLock<Result<Histogram>> = LazyLock::new(|| { try_create_histogram( @@ -1983,10 +1976,6 @@ pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) { &DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE, da_checker_metrics.block_cache_size, ); - set_gauge_by_usize( - &DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE, - da_checker_metrics.state_cache_size, - ); if let Some((size, num_lookups)) = beacon_chain.pre_finalization_block_cache.metrics() { set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_CACHE_SIZE, size); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index dcf8ee4f8e..f816dbac53 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -222,7 +222,6 @@ pub fn test_da_checker<E: EthSpec>( Duration::from_secs(spec.seconds_per_slot), ); let kzg = get_kzg(&spec); - let store = Arc::new(HotColdDB::open_ephemeral(<_>::default(), spec.clone()).unwrap()); let ordered_custody_column_indices = generate_data_column_indices_rand_order::<E>(); let custody_context = Arc::new(CustodyContext::new( node_custody_type, @@ -234,7 +233,6 @@ pub fn test_da_checker<E: EthSpec>( complete_blob_backfill, slot_clock, kzg, - store, custody_context, spec, ) diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index ffbc460465..ca9893941a 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -116,3 +116,96 @@ async fn rpc_columns_with_invalid_header_signature() { BlockError::InvalidSignature(InvalidSignature::ProposerSignature) )); } + +// Regression test for verify_header_signature bug: it uses head_fork() which is wrong for fork blocks +#[tokio::test] +async fn verify_header_signature_fork_block_bug() { + // Create a spec with all forks enabled at genesis except Fulu which is at epoch 1 + // This allows us to easily create the scenario where the head is at Electra + // but we're trying to verify a block from Fulu epoch + let mut spec = test_spec::<E>(); + + // Only run this test for FORK_NAME=fulu. + if !spec.is_fulu_scheduled() || spec.is_gloas_scheduled() { + return; + } + + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + let fulu_fork_epoch = Epoch::new(1); + spec.fulu_fork_epoch = Some(fulu_fork_epoch); + + let spec = Arc::new(spec); + let harness = get_harness(VALIDATOR_COUNT, spec.clone(), NodeCustodyType::Supernode); + harness.execution_block_generator().set_min_blob_count(1); + + // Add some blocks in epoch 0 (Electra) + harness + .extend_chain( + E::slots_per_epoch() as usize - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Verify we're still in epoch 0 (Electra) + let pre_fork_state = harness.get_current_state(); + assert_eq!(pre_fork_state.current_epoch(), Epoch::new(0)); + assert!(matches!(pre_fork_state, BeaconState::Electra(_))); + + // Now produce a block at the first slot of epoch 1 (Fulu fork). + // make_block will advance the state which will trigger the Electra->Fulu upgrade. + let fork_slot = fulu_fork_epoch.start_slot(E::slots_per_epoch()); + let ((signed_block, opt_blobs), _state_root) = + harness.make_block(pre_fork_state.clone(), fork_slot).await; + let (_, blobs) = opt_blobs.expect("Blobs should be present"); + assert!(!blobs.is_empty(), "Block should have blobs"); + let block_root = signed_block.canonical_root(); + + // Process the block WITHOUT blobs to make it unavailable. + // The block will be accepted but won't become the head because it's not fully available. + // This keeps the head at the pre-fork state (Electra). + harness.advance_slot(); + let rpc_block = harness + .build_rpc_block_from_blobs(signed_block.clone(), None, false) + .expect("Should build RPC block"); + let availability = harness + .chain + .process_block( + block_root, + rpc_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await + .expect("Block should be processed"); + assert_eq!( + availability, + AvailabilityProcessingStatus::MissingComponents(fork_slot, block_root), + "Block should be pending availability" + ); + + // The head should still be in epoch 0 (Electra) because the fork block isn't available + let current_head_state = harness.get_current_state(); + assert_eq!(current_head_state.current_epoch(), Epoch::new(0)); + assert!(matches!(current_head_state, BeaconState::Electra(_))); + + // Now try to process columns for the fork block. + // The bug: verify_header_signature previously used head_fork() which fetched the fork from + // the head state (still Electra fork), but the block was signed with the Fulu fork version. + // This caused an incorrect signature verification failure. + let data_column_sidecars = + generate_data_column_sidecars_from_block(&signed_block, &harness.chain.spec); + + // Now that the bug is fixed, the block should import. + let status = harness + .chain + .process_rpc_custody_columns(data_column_sidecars) + .await + .unwrap(); + assert_eq!(status, AvailabilityProcessingStatus::Imported(block_root)); +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index c2a6bc1f9d..22e7e7450e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3254,7 +3254,19 @@ pub fn serve<T: BeaconChainTypes>( let s = futures::stream::select_all(receivers); - Ok(warp::sse::reply(warp::sse::keep_alive().stream(s))) + let response = warp::sse::reply(warp::sse::keep_alive().stream(s)); + + // Set headers to bypass nginx caching and buffering, which breaks realtime + // delivery. + let response = warp::reply::with_header(response, "X-Accel-Buffering", "no"); + let response = warp::reply::with_header(response, "X-Accel-Expires", "0"); + let response = warp::reply::with_header( + response, + "Cache-Control", + "no-cache, no-store, must-revalidate", + ); + + Ok(response) }) }, ); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index eb0cc2cc99..659886f0f1 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -22,11 +22,11 @@ fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } hex = { workspace = true } +if-addrs = "0.14" itertools = { workspace = true } libp2p = { workspace = true } libp2p-mplex = { git = "https://github.com/libp2p/rust-libp2p.git" } lighthouse_version = { workspace = true } -local-ip-address = "0.6" logging = { workspace = true } lru = { workspace = true } lru_cache = { workspace = true } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 9940cb9f7f..cb94bfff22 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -5,8 +5,8 @@ use crate::{Enr, PeerIdSerialized}; use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; +use if_addrs::get_if_addrs; use libp2p::{Multiaddr, gossipsub}; -use local_ip_address::local_ipv6; use network_utils::listen_addr::{ListenAddr, ListenAddress}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -262,13 +262,13 @@ impl Config { /// A helper function to check if the local host has a globally routeable IPv6 address. If so, /// returns true. pub fn is_ipv6_supported() -> bool { - // If IPv6 is supported - let Ok(std::net::IpAddr::V6(local_ip)) = local_ipv6() else { + let Ok(addrs) = get_if_addrs() else { return false; }; - // If its globally routable, return true - is_global_ipv6(&local_ip) + addrs.iter().any( + |iface| matches!(iface.addr, if_addrs::IfAddr::V6(ref v6) if is_global_ipv6(&v6.ip)), + ) } pub fn listen_addrs(&self) -> &ListenAddress { diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 939eca3b94..38a6a84b44 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -264,47 +264,62 @@ impl<E: EthSpec> Discovery<E> { info!("Contacting Multiaddr boot-nodes for their ENR"); } - // get futures for requesting the Enrs associated to these multiaddr and wait for their + // get futures for requesting the ENRs associated to these multiaddr and wait for their // completion - let mut fut_coll = config + let discv5_eligible_addrs = config .boot_nodes_multiaddr .iter() - .map(|addr| addr.to_string()) - // request the ENR for this multiaddr and keep the original for logging - .map(|addr| { - futures::future::join( - discv5.request_enr(addr.clone()), - futures::future::ready(addr), - ) - }) - .collect::<FuturesUnordered<_>>(); + // Filter out multiaddrs without UDP or P2P protocols required for discv5 ENR requests + .filter(|addr| { + addr.iter().any(|proto| matches!(proto, Protocol::Udp(_))) + && addr.iter().any(|proto| matches!(proto, Protocol::P2p(_))) + }); - while let Some((result, original_addr)) = fut_coll.next().await { - match result { - Ok(enr) => { - debug!( - node_id = %enr.node_id(), - peer_id = %enr.peer_id(), - ip4 = ?enr.ip4(), - udp4 = ?enr.udp4(), - tcp4 = ?enr.tcp4(), - quic4 = ?enr.quic4(), - "Adding node to routing table" - ); - let _ = discv5.add_enr(enr).map_err(|e| { - error!( - addr = original_addr.to_string(), - error = e.to_string(), - "Could not add peer to the local routing table" - ) - }); - } - Err(e) => { - error!( - multiaddr = original_addr.to_string(), - error = e.to_string(), - "Error getting mapping to ENR" + if config.disable_discovery { + if discv5_eligible_addrs.count() > 0 { + warn!( + "Boot node multiaddrs requiring discv5 ENR lookup will be ignored because discovery is disabled" + ); + } + } else { + let mut fut_coll = discv5_eligible_addrs + .map(|addr| addr.to_string()) + // request the ENR for this multiaddr and keep the original for logging + .map(|addr| { + futures::future::join( + discv5.request_enr(addr.clone()), + futures::future::ready(addr), ) + }) + .collect::<FuturesUnordered<_>>(); + + while let Some((result, original_addr)) = fut_coll.next().await { + match result { + Ok(enr) => { + debug!( + node_id = %enr.node_id(), + peer_id = %enr.peer_id(), + ip4 = ?enr.ip4(), + udp4 = ?enr.udp4(), + tcp4 = ?enr.tcp4(), + quic4 = ?enr.quic4(), + "Adding node to routing table" + ); + let _ = discv5.add_enr(enr).map_err(|e| { + error!( + addr = original_addr.to_string(), + error = e.to_string(), + "Could not add peer to the local routing table" + ) + }); + } + Err(e) => { + error!( + multiaddr = original_addr.to_string(), + error = e.to_string(), + "Error getting mapping to ENR" + ) + } } } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 74b1fb4b98..3d709ed9b5 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -573,6 +573,7 @@ impl<E: EthSpec> Network<E> { }; // attempt to connect to user-input libp2p nodes + // DEPRECATED: can be removed in v8.2.0./v9.0.0 for multiaddr in &config.libp2p_nodes { dial(multiaddr.clone()); } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 6193725323..a4125f3df0 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1214,28 +1214,25 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> { .verify_block_for_gossip(block.clone()) .await; - if verification_result.is_ok() { + let block_root = if let Ok(verified_block) = &verification_result { metrics::set_gauge( &metrics::BEACON_BLOCK_DELAY_GOSSIP, block_delay.as_millis() as i64, ); - } - - let block_root = if let Ok(verified_block) = &verification_result { + // Write the time the block was observed into delay cache only for gossip + // valid blocks. + self.chain.block_times_cache.write().set_time_observed( + verified_block.block_root, + block.slot(), + seen_duration, + Some(peer_id.to_string()), + Some(peer_client.to_string()), + ); verified_block.block_root } else { block.canonical_root() }; - // Write the time the block was observed into delay cache. - self.chain.block_times_cache.write().set_time_observed( - block_root, - block.slot(), - seen_duration, - Some(peer_id.to_string()), - Some(peer_client.to_string()), - ); - let verified_block = match verification_result { Ok(verified_block) => { if block_delay >= self.chain.spec.get_unaggregated_attestation_due() { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e4c7c6ff1f..9553fe60ba 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -364,7 +364,7 @@ pub fn cli_app() -> Command { .long("libp2p-addresses") .value_name("MULTIADDR") .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer \ - without an ENR.") + without an ENR. DEPRECATED. The --libp2p-addresses flag is deprecated and replaced by --boot-nodes") .action(ArgAction::Set) .display_order(0) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 2e5a045502..752cf10550 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -15,7 +15,7 @@ use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; use execution_layer::DEFAULT_JWT_FILE; use http_api::TlsConfig; -use lighthouse_network::{Enr, Multiaddr, NetworkConfig, PeerIdSerialized, multiaddr::Protocol}; +use lighthouse_network::{Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use network_utils::listen_addr::ListenAddress; use sensitive_url::SensitiveUrl; use std::collections::HashSet; @@ -28,7 +28,7 @@ use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; -use tracing::{error, info, warn}; +use tracing::{info, warn}; use types::graffiti::GraffitiString; use types::{Checkpoint, Epoch, EthSpec, Hash256}; @@ -1193,12 +1193,6 @@ pub fn set_network_config( let multi: Multiaddr = addr .parse() .map_err(|_| format!("Not valid as ENR nor Multiaddr: {}", addr))?; - if !multi.iter().any(|proto| matches!(proto, Protocol::Udp(_))) { - error!(multiaddr = multi.to_string(), "Missing UDP in Multiaddr"); - } - if !multi.iter().any(|proto| matches!(proto, Protocol::P2p(_))) { - error!(multiaddr = multi.to_string(), "Missing P2P in Multiaddr"); - } multiaddrs.push(multi); } } @@ -1207,7 +1201,9 @@ pub fn set_network_config( config.boot_nodes_multiaddr = multiaddrs; } + // DEPRECATED: can be removed in v8.2.0./v9.0.0 if let Some(libp2p_addresses_str) = cli_args.get_one::<String>("libp2p-addresses") { + warn!("The --libp2p-addresses flag is deprecated and replaced by --boot-nodes"); config.libp2p_nodes = libp2p_addresses_str .split(',') .map(|multiaddr| { diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 5f3c43a7e4..d3aa27c8a7 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -225,7 +225,8 @@ Options: be careful to avoid filling up their disks. --libp2p-addresses <MULTIADDR> One or more comma-delimited multiaddrs to manually connect to a libp2p - peer without an ENR. + peer without an ENR. DEPRECATED. The --libp2p-addresses flag is + deprecated and replaced by --boot-nodes --listen-address [<ADDRESS>...] The address lighthouse will listen for UDP and TCP connections. To listen over IPv4 and IPv6 set this flag twice with the different diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 2a9936d1d2..4647780ea8 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -185,6 +185,12 @@ Flags: If present, do not attempt to discover new validators in the validators-dir. Validators will need to be manually added to the validator_definitions.yml file. + --disable-beacon-head-monitor + Disable the beacon head monitor which tries to attest as soon as any + of the configured beacon nodes sends a head event. Leaving the service + enabled is recommended, but disabling it can lead to reduced bandwidth + and more predictable usage of the primary beacon node (rather than the + fastest BN). --disable-latency-measurement-service Disables the service that periodically attempts to measure latency to BNs. diff --git a/common/eth2/src/error.rs b/common/eth2/src/error.rs index 1f21220b79..671a617c9e 100644 --- a/common/eth2/src/error.rs +++ b/common/eth2/src/error.rs @@ -17,6 +17,8 @@ pub enum Error { #[cfg(feature = "events")] /// The `reqwest_eventsource` client raised an error. SseClient(Box<reqwest_eventsource::Error>), + #[cfg(feature = "events")] + SseEventSource(reqwest_eventsource::CannotCloneRequestError), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -100,6 +102,7 @@ impl Error { None } } + Error::SseEventSource(_) => None, Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::StatusCode(status) => Some(*status), diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 47440e9325..7e4860a4cf 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -40,7 +40,7 @@ use reqwest::{ header::{HeaderMap, HeaderValue}, }; #[cfg(feature = "events")] -use reqwest_eventsource::{Event, EventSource}; +use reqwest_eventsource::{Event, RequestBuilderExt}; use serde::{Serialize, de::DeserializeOwned}; use ssz::{Decode, Encode}; use std::fmt; @@ -77,6 +77,8 @@ const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; +// Generally the timeout for events should be longer than a slot. +const HTTP_GET_EVENTS_TIMEOUT_MULTIPLIER: u32 = 50; const HTTP_DEFAULT_TIMEOUT_QUOTIENT: u32 = 4; /// A struct to define a variety of different timeouts for different validator tasks to ensure @@ -97,6 +99,7 @@ pub struct Timeouts { pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, pub get_validator_block: Duration, + pub events: Duration, pub default: Duration, } @@ -117,6 +120,7 @@ impl Timeouts { get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, get_validator_block: timeout, + events: HTTP_GET_EVENTS_TIMEOUT_MULTIPLIER * timeout, default: timeout, } } @@ -139,6 +143,7 @@ impl Timeouts { get_debug_beacon_states: base_timeout / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: base_timeout / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, get_validator_block: base_timeout / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, + events: HTTP_GET_EVENTS_TIMEOUT_MULTIPLIER * base_timeout, default: base_timeout / HTTP_DEFAULT_TIMEOUT_QUOTIENT, } } @@ -3047,7 +3052,12 @@ impl BeaconNodeHttpClient { .join(","); path.query_pairs_mut().append_pair("topics", &topic_string); - let mut es = EventSource::get(path); + let mut es = self + .client + .get(path) + .timeout(self.timeouts.events) + .eventsource() + .map_err(Error::SseEventSource)?; // If we don't await `Event::Open` here, then the consumer // will not get any Message events until they start awaiting the stream. // This is a way to register the stream with the sse server before diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/bootstrap_nodes.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/bootstrap_nodes.yaml index 70aeaac9c5..5a75d22965 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/bootstrap_nodes.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/bootstrap_nodes.yaml @@ -31,4 +31,4 @@ # Lodestar team's bootnodes - enr:-IS4QPi-onjNsT5xAIAenhCGTDl4z-4UOR25Uq-3TmG4V3kwB9ljLTb_Kp1wdjHNj-H8VVLRBSSWVZo3GUe3z6k0E-IBgmlkgnY0gmlwhKB3_qGJc2VjcDI1NmsxoQMvAfgB4cJXvvXeM6WbCG86CstbSxbQBSGx31FAwVtOTYN1ZHCCIyg # 160.119.254.161 | hostafrica-southafrica -- enr:-KG4QCb8NC3gEM3I0okStV5BPX7Bg6ZXTYCzzbYyEXUPGcZtHmvQtiJH4C4F2jG7azTcb9pN3JlgpfxAnRVFzJ3-LykBgmlkgnY0gmlwhFPlR9KDaXA2kP6AAAAAAAAAAlBW__4my5iJc2VjcDI1NmsxoQLdUv9Eo9sxCt0tc_CheLOWnX59yHJtkBSOL7kpxdJ6GYN1ZHCCIyiEdWRwNoIjKA # 83.229.71.210 | kamatera-telaviv-israel \ No newline at end of file +- enr:-KG4QPUf8-g_jU-KrwzG42AGt0wWM1BTnQxgZXlvCEIfTQ5hSmptkmgmMbRkpOqv6kzb33SlhPHJp7x4rLWWiVq5lSECgmlkgnY0gmlwhFPlR9KDaXA2kCoGxcAJAAAVAAAAAAAAABCJc2VjcDI1NmsxoQLdUv9Eo9sxCt0tc_CheLOWnX59yHJtkBSOL7kpxdJ6GYN1ZHCCIyiEdWRwNoIjKA # 83.229.71.210 | kamatera-telaviv-israel diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs index 71bf6329f1..2262b59ac1 100644 --- a/consensus/state_processing/src/common/get_attestation_participation.rs +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -1,8 +1,8 @@ use integer_sqrt::IntegerSquareRoot; +use safe_arith::SafeArith; use smallvec::SmallVec; -use types::{AttestationData, BeaconState, ChainSpec, EthSpec}; use types::{ - BeaconStateError as Error, + AttestationData, BeaconState, BeaconStateError as Error, ChainSpec, EthSpec, consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, @@ -16,6 +16,8 @@ use types::{ /// /// This function will return an error if the source of the attestation doesn't match the /// state's relevant justified checkpoint. +/// +/// This function has been abstracted to work for all forks from Altair to Gloas. pub fn get_attestation_participation_flag_indices<E: EthSpec>( state: &BeaconState<E>, data: &AttestationData, @@ -27,13 +29,43 @@ pub fn get_attestation_participation_flag_indices<E: EthSpec>( } else { state.previous_justified_checkpoint() }; - - // Matching roots. let is_matching_source = data.source == justified_checkpoint; + + // Matching target. let is_matching_target = is_matching_source && data.target.root == *state.get_block_root_at_epoch(data.target.epoch)?; - let is_matching_head = - is_matching_target && data.beacon_block_root == *state.get_block_root(data.slot)?; + + // [New in Gloas:EIP7732] + let payload_matches = if state.fork_name_unchecked().gloas_enabled() { + if state.is_attestation_same_slot(data)? { + // For same-slot attestations, data.index must be 0 + if data.index != 0 { + return Err(Error::BadOverloadedDataIndex(data.index)); + } + true + } else { + // For non same-slot attestations, check execution payload availability + let slot_index = data + .slot + .as_usize() + .safe_rem(E::slots_per_historical_root())?; + let payload_index = state + .execution_payload_availability()? + .get(slot_index) + .map(|avail| if avail { 1 } else { 0 }) + .map_err(|_| Error::InvalidExecutionPayloadAvailabilityIndex(slot_index))?; + data.index == payload_index + } + } else { + // Essentially `payload_matches` is always true pre-Gloas (it is not considered for matching + // head). + true + }; + + // Matching head. + let is_matching_head = is_matching_target + && data.beacon_block_root == *state.get_block_root(data.slot)? + && payload_matches; if !is_matching_source { return Err(Error::IncorrectAttestationSource); diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs new file mode 100644 index 0000000000..d46728dbbc --- /dev/null +++ b/consensus/state_processing/src/envelope_processing.rs @@ -0,0 +1,278 @@ +use crate::BlockProcessingError; +use crate::VerifySignatures; +use crate::per_block_processing::compute_timestamp_at_slot; +use crate::per_block_processing::process_operations::{ + process_consolidation_requests, process_deposit_requests_post_gloas, + process_withdrawal_requests, +}; +use safe_arith::{ArithError, SafeArith}; +use tree_hash::TreeHash; +use types::{ + BeaconState, BeaconStateError, BuilderIndex, BuilderPendingPayment, ChainSpec, EthSpec, + ExecutionBlockHash, Hash256, SignedExecutionPayloadEnvelope, Slot, +}; + +macro_rules! envelope_verify { + ($condition: expr, $result: expr) => { + if !$condition { + return Err($result); + } + }; +} + +#[derive(Debug, Clone)] +pub enum EnvelopeProcessingError { + /// Bad Signature + BadSignature, + BeaconStateError(BeaconStateError), + BlockProcessingError(BlockProcessingError), + ArithError(ArithError), + /// Envelope doesn't match latest beacon block header + LatestBlockHeaderMismatch { + envelope_root: Hash256, + block_header_root: Hash256, + }, + /// Envelope doesn't match latest beacon block slot + SlotMismatch { + envelope_slot: Slot, + parent_state_slot: Slot, + }, + /// The payload withdrawals don't match the state's payload withdrawals. + WithdrawalsRootMismatch { + state: Hash256, + payload: Hash256, + }, + // The builder index doesn't match the committed bid. + BuilderIndexMismatch { + committed_bid: BuilderIndex, + envelope: BuilderIndex, + }, + // The gas limit doesn't match the committed bid + GasLimitMismatch { + committed_bid: u64, + envelope: u64, + }, + // The block hash doesn't match the committed bid + BlockHashMismatch { + committed_bid: ExecutionBlockHash, + envelope: ExecutionBlockHash, + }, + // The parent hash doesn't match the previous execution payload + ParentHashMismatch { + state: ExecutionBlockHash, + envelope: ExecutionBlockHash, + }, + // The previous randao didn't match the payload + PrevRandaoMismatch { + committed_bid: Hash256, + envelope: Hash256, + }, + // The timestamp didn't match the payload + TimestampMismatch { + state: u64, + envelope: u64, + }, + // Invalid state root + InvalidStateRoot { + state: Hash256, + envelope: Hash256, + }, + // BitFieldError + BitFieldError(ssz::BitfieldError), + // Some kind of error calculating the builder payment index + BuilderPaymentIndexOutOfBounds(usize), + /// The envelope was deemed invalid by the execution engine. + ExecutionInvalid, +} + +impl From<BeaconStateError> for EnvelopeProcessingError { + fn from(e: BeaconStateError) -> Self { + EnvelopeProcessingError::BeaconStateError(e) + } +} + +impl From<BlockProcessingError> for EnvelopeProcessingError { + fn from(e: BlockProcessingError) -> Self { + EnvelopeProcessingError::BlockProcessingError(e) + } +} + +impl From<ArithError> for EnvelopeProcessingError { + fn from(e: ArithError) -> Self { + EnvelopeProcessingError::ArithError(e) + } +} + +/// Processes a `SignedExecutionPayloadEnvelope` +/// +/// This function does all the state modifications inside `process_execution_payload()` +pub fn process_execution_payload_envelope<E: EthSpec>( + state: &mut BeaconState<E>, + parent_state_root: Option<Hash256>, + signed_envelope: &SignedExecutionPayloadEnvelope<E>, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), EnvelopeProcessingError> { + if verify_signatures.is_true() { + // Verify Signed Envelope Signature + if !signed_envelope.verify_signature_with_state(state, spec)? { + return Err(EnvelopeProcessingError::BadSignature); + } + } + + let envelope = &signed_envelope.message; + let payload = &envelope.payload; + let execution_requests = &envelope.execution_requests; + + // Cache latest block header state root + if state.latest_block_header().state_root == Hash256::default() { + let previous_state_root = parent_state_root + .map(Ok) + .unwrap_or_else(|| state.canonical_root())?; + state.latest_block_header_mut().state_root = previous_state_root; + } + + // Verify consistency with the beacon block + let latest_block_header_root = state.latest_block_header().tree_hash_root(); + envelope_verify!( + envelope.beacon_block_root == latest_block_header_root, + EnvelopeProcessingError::LatestBlockHeaderMismatch { + envelope_root: envelope.beacon_block_root, + block_header_root: latest_block_header_root, + } + ); + envelope_verify!( + envelope.slot == state.slot(), + EnvelopeProcessingError::SlotMismatch { + envelope_slot: envelope.slot, + parent_state_slot: state.slot(), + } + ); + + // Verify consistency with the committed bid + let committed_bid = state.latest_execution_payload_bid()?; + envelope_verify!( + envelope.builder_index == committed_bid.builder_index, + EnvelopeProcessingError::BuilderIndexMismatch { + committed_bid: committed_bid.builder_index, + envelope: envelope.builder_index, + } + ); + envelope_verify!( + committed_bid.prev_randao == payload.prev_randao, + EnvelopeProcessingError::PrevRandaoMismatch { + committed_bid: committed_bid.prev_randao, + envelope: payload.prev_randao, + } + ); + + // Verify consistency with expected withdrawals + // NOTE: we don't bother hashing here except in case of error, because we can just compare for + // equality directly. This equality check could be more straight-forward if the types were + // changed to match (currently we are comparing VariableList to List). This could happen + // coincidentally when we adopt ProgressiveList. + envelope_verify!( + payload.withdrawals.len() == state.payload_expected_withdrawals()?.len() + && payload + .withdrawals + .iter() + .eq(state.payload_expected_withdrawals()?.iter()), + EnvelopeProcessingError::WithdrawalsRootMismatch { + state: state.payload_expected_withdrawals()?.tree_hash_root(), + payload: payload.withdrawals.tree_hash_root(), + } + ); + + // Verify the gas limit + envelope_verify!( + committed_bid.gas_limit == payload.gas_limit, + EnvelopeProcessingError::GasLimitMismatch { + committed_bid: committed_bid.gas_limit, + envelope: payload.gas_limit, + } + ); + + // Verify the block hash + envelope_verify!( + committed_bid.block_hash == payload.block_hash, + EnvelopeProcessingError::BlockHashMismatch { + committed_bid: committed_bid.block_hash, + envelope: payload.block_hash, + } + ); + + // Verify consistency of the parent hash with respect to the previous execution payload + envelope_verify!( + payload.parent_hash == *state.latest_block_hash()?, + EnvelopeProcessingError::ParentHashMismatch { + state: *state.latest_block_hash()?, + envelope: payload.parent_hash, + } + ); + + // Verify timestamp + let state_timestamp = compute_timestamp_at_slot(state, state.slot(), spec)?; + envelope_verify!( + payload.timestamp == state_timestamp, + EnvelopeProcessingError::TimestampMismatch { + state: state_timestamp, + envelope: payload.timestamp, + } + ); + + // TODO(gloas): newPayload happens here in the spec, ensure we wire that up correctly + + process_deposit_requests_post_gloas(state, &execution_requests.deposits, spec)?; + + // TODO(gloas): gotta update these + process_withdrawal_requests(state, &execution_requests.withdrawals, spec)?; + process_consolidation_requests(state, &execution_requests.consolidations, spec)?; + + // Queue the builder payment + let payment_index = E::slots_per_epoch() + .safe_add(state.slot().as_u64().safe_rem(E::slots_per_epoch())?)? + as usize; + let payment_mut = state + .builder_pending_payments_mut()? + .get_mut(payment_index) + .ok_or(EnvelopeProcessingError::BuilderPaymentIndexOutOfBounds( + payment_index, + ))?; + + // We have re-ordered the blanking out of the pending payment to avoid a double-lookup. + // This is semantically equivalent to the ordering used by the spec because we have taken a + // clone of the payment prior to doing the write. + let payment_withdrawal = payment_mut.withdrawal.clone(); + *payment_mut = BuilderPendingPayment::default(); + + let amount = payment_withdrawal.amount; + if amount > 0 { + state + .builder_pending_withdrawals_mut()? + .push(payment_withdrawal) + .map_err(|e| EnvelopeProcessingError::BeaconStateError(e.into()))?; + } + + // Cache the execution payload hash + let availability_index = state + .slot() + .as_usize() + .safe_rem(E::slots_per_historical_root())?; + state + .execution_payload_availability_mut()? + .set(availability_index, true) + .map_err(EnvelopeProcessingError::BitFieldError)?; + *state.latest_block_hash_mut()? = payload.block_hash; + + // Verify the state root + let state_root = state.canonical_root()?; + envelope_verify!( + envelope.state_root == state_root, + EnvelopeProcessingError::InvalidStateRoot { + state: state_root, + envelope: envelope.state_root, + } + ); + + Ok(()) +} diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 9b2696c6d5..e37c526579 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -20,6 +20,7 @@ pub mod all_caches; pub mod block_replayer; pub mod common; pub mod consensus_context; +pub mod envelope_processing; pub mod epoch_cache; pub mod genesis; pub mod per_block_processing; diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index d0cf7b46d9..5c1db9d732 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -99,6 +99,8 @@ pub enum BlockProcessingError { IncorrectExpectedWithdrawalsVariant, MissingLastWithdrawal, PendingAttestationInElectra, + /// Builder payment index out of bounds (Gloas) + BuilderPaymentIndexOutOfBounds(usize), } impl From<BeaconStateError> for BlockProcessingError { @@ -372,6 +374,8 @@ pub enum AttestationInvalid { BadSignature, /// The indexed attestation created from this attestation was found to be invalid. BadIndexedAttestation(IndexedAttestationInvalid), + /// The overloaded "data.index" field is invalid (post-Gloas). + BadOverloadedDataIndex, } impl From<BlockOperationError<IndexedAttestationInvalid>> diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 8afeeb685b..19109f1508 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -5,6 +5,7 @@ use crate::common::{ slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use bls::{PublicKeyBytes, SignatureBytes}; use ssz_types::FixedVector; use typenum::U33; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; @@ -38,9 +39,14 @@ pub fn process_operations<E: EthSpec, Payload: AbstractExecPayload<E>>( process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } - if state.fork_name_unchecked().electra_enabled() { + if state.fork_name_unchecked().electra_enabled() && !state.fork_name_unchecked().gloas_enabled() + { state.update_pubkey_cache()?; - process_deposit_requests(state, &block_body.execution_requests()?.deposits, spec)?; + process_deposit_requests_pre_gloas( + state, + &block_body.execution_requests()?.deposits, + spec, + )?; process_withdrawal_requests(state, &block_body.execution_requests()?.withdrawals, spec)?; process_consolidation_requests( state, @@ -212,6 +218,148 @@ pub mod altair_deneb { } } +pub mod gloas { + use super::*; + use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation; + + pub fn process_attestations<'a, E: EthSpec, I>( + state: &mut BeaconState<E>, + attestations: I, + verify_signatures: VerifySignatures, + ctxt: &mut ConsensusContext<E>, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> + where + I: Iterator<Item = AttestationRef<'a, E>>, + { + attestations.enumerate().try_for_each(|(i, attestation)| { + process_attestation(state, attestation, i, ctxt, verify_signatures, spec) + }) + } + + pub fn process_attestation<E: EthSpec>( + state: &mut BeaconState<E>, + attestation: AttestationRef<E>, + att_index: usize, + ctxt: &mut ConsensusContext<E>, + verify_signatures: VerifySignatures, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + let proposer_index = ctxt.get_proposer_index(state, spec)?; + let previous_epoch = ctxt.previous_epoch; + let current_epoch = ctxt.current_epoch; + + let indexed_att = verify_attestation_for_block_inclusion( + state, + attestation, + ctxt, + verify_signatures, + spec, + ) + .map_err(|e| e.into_with_index(att_index))?; + + // Matching roots, participation flag indices + let data = attestation.data(); + let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + let participation_flag_indices = + get_attestation_participation_flag_indices(state, data, inclusion_delay, spec)?; + + // [New in EIP-7732] + let current_epoch_target = data.target.epoch == state.current_epoch(); + let slot_mod = data + .slot + .as_usize() + .safe_rem(E::slots_per_epoch() as usize)?; + let payment_index = if current_epoch_target { + (E::slots_per_epoch() as usize).safe_add(slot_mod)? + } else { + slot_mod + }; + // Cached here to avoid repeat lookups. The withdrawal amount is immutable throughout + // this whole function. + let payment_withdrawal_amount = state + .builder_pending_payments()? + .get(payment_index) + .ok_or(BlockProcessingError::BuilderPaymentIndexOutOfBounds( + payment_index, + ))? + .withdrawal + .amount; + + // Update epoch participation flags. + let mut proposer_reward_numerator = 0; + for index in indexed_att.attesting_indices_iter() { + let index = *index as usize; + + let validator_effective_balance = state.epoch_cache().get_effective_balance(index)?; + let validator_slashed = state.slashings_cache().is_slashed(index); + + // [New in Gloas:EIP7732] + // For same-slot attestations, check if we're setting any new flags + // If we are, this validator hasn't contributed to this slot's quorum yet + let mut will_set_new_flag = false; + + for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { + let epoch_participation = state.get_epoch_participation_mut( + data.target.epoch, + previous_epoch, + current_epoch, + )?; + + if participation_flag_indices.contains(&flag_index) { + let validator_participation = epoch_participation + .get_mut(index) + .ok_or(BeaconStateError::ParticipationOutOfBounds(index))?; + + if !validator_participation.has_flag(flag_index)? { + validator_participation.add_flag(flag_index)?; + proposer_reward_numerator + .safe_add_assign(state.get_base_reward(index)?.safe_mul(weight)?)?; + will_set_new_flag = true; + + update_progressive_balances_on_attestation( + state, + data.target.epoch, + flag_index, + validator_effective_balance, + validator_slashed, + )?; + } + } + } + + // [New in Gloas:EIP7732] + // Add weight for same-slot attestations when any new flag is set. + // This ensures each validator contributes exactly once per slot. + if will_set_new_flag + && state.is_attestation_same_slot(data)? + && payment_withdrawal_amount > 0 + { + let builder_payments = state.builder_pending_payments_mut()?; + let payment = builder_payments.get_mut(payment_index).ok_or( + BlockProcessingError::BuilderPaymentIndexOutOfBounds(payment_index), + )?; + payment + .weight + .safe_add_assign(validator_effective_balance)?; + } + } + + let proposer_reward_denominator = WEIGHT_DENOMINATOR + .safe_sub(PROPOSER_WEIGHT)? + .safe_mul(WEIGHT_DENOMINATOR)? + .safe_div(PROPOSER_WEIGHT)?; + let proposer_reward = proposer_reward_numerator.safe_div(proposer_reward_denominator)?; + increase_balance(state, proposer_index as usize, proposer_reward)?; + + // [New in Gloas:EIP7732] + // Update builder payment weight + // No-op, this is done inline above. + + Ok(()) + } +} + /// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object. /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns @@ -235,6 +383,31 @@ pub fn process_proposer_slashings<E: EthSpec>( verify_proposer_slashing(proposer_slashing, state, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; + // [New in Gloas:EIP7732] + // Remove the BuilderPendingPayment corresponding to this proposal + // if it is still in the 2-epoch window. + if state.fork_name_unchecked().gloas_enabled() { + let slot = proposer_slashing.signed_header_1.message.slot; + let proposal_epoch = slot.epoch(E::slots_per_epoch()); + let slot_in_epoch = slot.as_usize().safe_rem(E::SlotsPerEpoch::to_usize())?; + + let payment_index = if proposal_epoch == state.current_epoch() { + Some(E::SlotsPerEpoch::to_usize().safe_add(slot_in_epoch)?) + } else if proposal_epoch == state.previous_epoch() { + Some(slot_in_epoch) + } else { + None + }; + + if let Some(index) = payment_index { + let payment = state + .builder_pending_payments_mut()? + .get_mut(index) + .ok_or(BlockProcessingError::BuilderPaymentIndexOutOfBounds(index))?; + *payment = BuilderPendingPayment::default(); + } + } + slash_validator( state, proposer_slashing.signed_header_1.message.proposer_index as usize, @@ -285,7 +458,15 @@ pub fn process_attestations<E: EthSpec, Payload: AbstractExecPayload<E>>( ctxt: &mut ConsensusContext<E>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - if state.fork_name_unchecked().altair_enabled() { + if state.fork_name_unchecked().gloas_enabled() { + gloas::process_attestations( + state, + block_body.attestations(), + verify_signatures, + ctxt, + spec, + )?; + } else if state.fork_name_unchecked().altair_enabled() { altair_deneb::process_attestations( state, block_body.attestations(), @@ -586,7 +767,7 @@ pub fn process_withdrawal_requests<E: EthSpec>( Ok(()) } -pub fn process_deposit_requests<E: EthSpec>( +pub fn process_deposit_requests_pre_gloas<E: EthSpec>( state: &mut BeaconState<E>, deposit_requests: &[DepositRequest], spec: &ChainSpec, @@ -613,6 +794,112 @@ pub fn process_deposit_requests<E: EthSpec>( Ok(()) } +pub fn process_deposit_requests_post_gloas<E: EthSpec>( + state: &mut BeaconState<E>, + deposit_requests: &[DepositRequest], + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for request in deposit_requests { + process_deposit_request_post_gloas(state, request, spec)?; + } + + Ok(()) +} + +pub fn process_deposit_request_post_gloas<E: EthSpec>( + state: &mut BeaconState<E>, + deposit_request: &DepositRequest, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + // [New in Gloas:EIP7732] + // Regardless of the withdrawal credentials prefix, if a builder/validator + // already exists with this pubkey, apply the deposit to their balance + // TODO(gloas): this could be more efficient in the builder case, see: + // https://github.com/sigp/lighthouse/issues/8783 + let builder_index = state + .builders()? + .iter() + .enumerate() + .find(|(_, builder)| builder.pubkey == deposit_request.pubkey) + .map(|(i, _)| i as u64); + let is_builder = builder_index.is_some(); + + let validator_index = state.get_validator_index(&deposit_request.pubkey)?; + let is_validator = validator_index.is_some(); + + let is_builder_prefix = + is_builder_withdrawal_credential(deposit_request.withdrawal_credentials, spec); + + if is_builder || (is_builder_prefix && !is_validator) { + // Apply builder deposits immediately + apply_deposit_for_builder( + state, + builder_index, + deposit_request.pubkey, + deposit_request.withdrawal_credentials, + deposit_request.amount, + deposit_request.signature.clone(), + state.slot(), + spec, + )?; + return Ok(()); + } + + // Add validator deposits to the queue + let slot = state.slot(); + state.pending_deposits_mut()?.push(PendingDeposit { + pubkey: deposit_request.pubkey, + withdrawal_credentials: deposit_request.withdrawal_credentials, + amount: deposit_request.amount, + signature: deposit_request.signature.clone(), + slot, + })?; + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub fn apply_deposit_for_builder<E: EthSpec>( + state: &mut BeaconState<E>, + builder_index_opt: Option<BuilderIndex>, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, + amount: u64, + signature: SignatureBytes, + slot: Slot, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + match builder_index_opt { + None => { + // Verify the deposit signature (proof of possession) which is not checked by the deposit contract + let deposit_data = DepositData { + pubkey, + withdrawal_credentials, + amount, + signature, + }; + if is_valid_deposit_signature(&deposit_data, spec).is_ok() { + state.add_builder_to_registry( + pubkey, + withdrawal_credentials, + amount, + slot, + spec, + )?; + } + } + Some(builder_index) => { + state + .builders_mut()? + .get_mut(builder_index as usize) + .ok_or(BeaconStateError::UnknownBuilder(builder_index))? + .balance + .safe_add_assign(amount)?; + } + } + Ok(()) +} + // Make sure to build the pubkey cache before calling this function pub fn process_consolidation_requests<E: EthSpec>( state: &mut BeaconState<E>, diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 0d1fd17768..64b7a31afb 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -52,8 +52,6 @@ pub fn verify_attestation_for_block_inclusion<'ctxt, E: EthSpec>( /// /// Returns a descriptive `Err` if the attestation is malformed or does not accurately reflect the /// prior blocks in `state`. -/// -/// Spec v0.12.1 pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( state: &BeaconState<E>, attestation: AttestationRef<'ctxt, E>, @@ -74,7 +72,12 @@ pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( ); } AttestationRef::Electra(_) => { - verify!(data.index == 0, Invalid::BadCommitteeIndex); + let fork_at_attestation_slot = spec.fork_name_at_slot::<E>(data.slot); + if fork_at_attestation_slot.gloas_enabled() { + verify!(data.index < 2, Invalid::BadOverloadedDataIndex); + } else { + verify!(data.index == 0, Invalid::BadCommitteeIndex); + } } } @@ -89,8 +92,6 @@ pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( } /// Check target epoch and source checkpoint. -/// -/// Spec v0.12.1 fn verify_casper_ffg_vote<E: EthSpec>( attestation: AttestationRef<E>, state: &BeaconState<E>, diff --git a/consensus/swap_or_not_shuffle/benches/benches.rs b/consensus/swap_or_not_shuffle/benches/benches.rs index f33556be38..5a9ba38f06 100644 --- a/consensus/swap_or_not_shuffle/benches/benches.rs +++ b/consensus/swap_or_not_shuffle/benches/benches.rs @@ -1,4 +1,5 @@ -use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; +use std::hint::black_box; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list as fast_shuffle}; const SHUFFLE_ROUND_COUNT: u8 = 90; diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 397c33163e..85d7de980b 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -1,8 +1,9 @@ -use criterion::{BatchSize, BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use criterion::{BatchSize, BenchmarkId, Criterion, criterion_group, criterion_main}; use fixed_bytes::FixedBytesExtended; use milhouse::List; use rayon::prelude::*; use ssz::Encode; +use std::hint::black_box; use std::sync::Arc; use types::{ BeaconState, Epoch, Eth1Data, EthSpec, Hash256, MainnetEthSpec, Validator, diff --git a/consensus/types/src/builder/builder.rs b/consensus/types/src/builder/builder.rs index 81ca45046c..2bd50f42cc 100644 --- a/consensus/types/src/builder/builder.rs +++ b/consensus/types/src/builder/builder.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; -use crate::{Address, Epoch}; +use crate::{Address, Epoch, ForkName}; use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -12,6 +13,7 @@ pub type BuilderIndex = u64; #[derive( Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash, )] +#[context_deserialize(ForkName)] pub struct Builder { pub pubkey: PublicKeyBytes, #[serde(with = "serde_utils::quoted_u8")] diff --git a/consensus/types/src/execution/signed_execution_payload_envelope.rs b/consensus/types/src/execution/signed_execution_payload_envelope.rs index cdcebc7b31..b1d949f863 100644 --- a/consensus/types/src/execution/signed_execution_payload_envelope.rs +++ b/consensus/types/src/execution/signed_execution_payload_envelope.rs @@ -1,9 +1,11 @@ use crate::test_utils::TestRandom; use crate::{ - ChainSpec, Domain, Epoch, EthSpec, ExecutionBlockHash, ExecutionPayloadEnvelope, Fork, Hash256, - SignedRoot, Slot, + BeaconState, BeaconStateError, ChainSpec, Domain, Epoch, EthSpec, ExecutionBlockHash, + ExecutionPayloadEnvelope, Fork, ForkName, Hash256, SignedRoot, Slot, + consts::gloas::BUILDER_INDEX_SELF_BUILD, }; use bls::{PublicKey, Signature}; +use context_deserialize::context_deserialize; use educe::Educe; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -13,6 +15,7 @@ use tree_hash_derive::TreeHash; #[derive(Debug, Clone, Serialize, Encode, Decode, Deserialize, TestRandom, TreeHash, Educe)] #[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec")] +#[context_deserialize(ForkName)] pub struct SignedExecutionPayloadEnvelope<E: EthSpec> { pub message: ExecutionPayloadEnvelope<E>, pub signature: Signature, @@ -56,6 +59,42 @@ impl<E: EthSpec> SignedExecutionPayloadEnvelope<E> { self.signature.verify(pubkey, message) } + + /// Verify `self.signature` using keys drawn from the beacon state. + pub fn verify_signature_with_state( + &self, + state: &BeaconState<E>, + spec: &ChainSpec, + ) -> Result<bool, BeaconStateError> { + let builder_index = self.message.builder_index; + + let pubkey_bytes = if builder_index == BUILDER_INDEX_SELF_BUILD { + let validator_index = state.latest_block_header().proposer_index; + state.get_validator(validator_index as usize)?.pubkey + } else { + state.get_builder(builder_index)?.pubkey + }; + + // TODO(gloas): Could use pubkey cache on state here, but it probably isn't worth + // it because this function is rarely used. Almost always the envelope should be signature + // verified prior to consensus code running. + let pubkey = pubkey_bytes.decompress()?; + + // Ensure the state's epoch matches the message's epoch before determining the Fork. + if self.epoch() != state.current_epoch() { + return Err(BeaconStateError::SignedEnvelopeIncorrectEpoch { + state_epoch: state.current_epoch(), + envelope_epoch: self.epoch(), + }); + } + + Ok(self.verify_signature( + &pubkey, + &state.fork(), + state.genesis_validators_root(), + spec, + )) + } } #[cfg(test)] diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index f661988edb..1352ded79e 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; use typenum::Unsigned; use crate::{ - ExecutionBlockHash, ExecutionPayloadBid, Withdrawal, + Address, ExecutionBlockHash, ExecutionPayloadBid, Withdrawal, attestation::{ AttestationData, AttestationDuty, BeaconCommittee, Checkpoint, CommitteeIndex, PTC, ParticipationFlags, PendingAttestation, @@ -174,8 +174,12 @@ pub enum BeaconStateError { MerkleTreeError(merkle_proof::MerkleTreeError), PartialWithdrawalCountInvalid(usize), NonExecutionAddressWithdrawalCredential, + WithdrawalCredentialMissingVersion, + WithdrawalCredentialMissingAddress, NoCommitteeFound(CommitteeIndex), InvalidCommitteeIndex(CommitteeIndex), + /// `Attestation.data.index` field is invalid in overloaded data index scenario. + BadOverloadedDataIndex(u64), InvalidSelectionProof { aggregator_index: u64, }, @@ -197,7 +201,12 @@ pub enum BeaconStateError { ProposerLookaheadOutOfBounds { i: usize, }, + SignedEnvelopeIncorrectEpoch { + state_epoch: Epoch, + envelope_epoch: Epoch, + }, InvalidIndicesCount, + InvalidExecutionPayloadAvailabilityIndex(usize), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -1917,6 +1926,15 @@ impl<E: EthSpec> BeaconState<E> { .ok_or(BeaconStateError::UnknownValidator(validator_index)) } + /// Safe indexer for the `builders` list. + /// + /// Will return an error pre-Gloas, or for out-of-bounds indices. + pub fn get_builder(&self, builder_index: BuilderIndex) -> Result<&Builder, BeaconStateError> { + self.builders()? + .get(builder_index as usize) + .ok_or(BeaconStateError::UnknownBuilder(builder_index)) + } + /// Add a validator to the registry and return the validator index that was allocated for it. pub fn add_validator_to_registry( &mut self, @@ -1963,6 +1981,64 @@ impl<E: EthSpec> BeaconState<E> { Ok(index) } + /// Add a builder to the registry and return the builder index that was allocated for it. + pub fn add_builder_to_registry( + &mut self, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, + amount: u64, + slot: Slot, + spec: &ChainSpec, + ) -> Result<BuilderIndex, BeaconStateError> { + // We are not yet using the spec's `set_or_append_list`, but could consider it if it crops + // up elsewhere. It has been retconned into the spec to support index reuse but so far + // index reuse is only relevant for builders. + let builder_index = self.get_index_for_new_builder()?; + let builders = self.builders_mut()?; + + let version = *withdrawal_credentials + .as_slice() + .first() + .ok_or(BeaconStateError::WithdrawalCredentialMissingVersion)?; + let execution_address = withdrawal_credentials + .as_slice() + .get(12..) + .and_then(|bytes| Address::try_from(bytes).ok()) + .ok_or(BeaconStateError::WithdrawalCredentialMissingAddress)?; + + let builder = Builder { + pubkey, + version, + execution_address, + balance: amount, + deposit_epoch: slot.epoch(E::slots_per_epoch()), + withdrawable_epoch: spec.far_future_epoch, + }; + + if builder_index == builders.len() as u64 { + builders.push(builder)?; + } else { + *builders + .get_mut(builder_index as usize) + .ok_or(BeaconStateError::UnknownBuilder(builder_index))? = builder; + } + Ok(builder_index) + } + + // TODO(gloas): Optimize this function if we see a lot of registered builders on-chain. + // A cache here could be quite fiddly because this calculation depends on withdrawable epoch + // and balance - a cache for this would need to be updated whenever either of those fields + // changes. + pub fn get_index_for_new_builder(&self) -> Result<BuilderIndex, BeaconStateError> { + let current_epoch = self.current_epoch(); + for (index, builder) in self.builders()?.iter().enumerate() { + if builder.withdrawable_epoch <= current_epoch && builder.balance == 0 { + return Ok(index as u64); + } + } + Ok(self.builders()?.len() as u64) + } + /// Safe copy-on-write accessor for the `validators` list. pub fn get_validator_cow( &mut self, diff --git a/consensus/types/src/validator/mod.rs b/consensus/types/src/validator/mod.rs index 8a67407821..23f7a2a0e1 100644 --- a/consensus/types/src/validator/mod.rs +++ b/consensus/types/src/validator/mod.rs @@ -4,6 +4,8 @@ mod validator_registration_data; mod validator_subscription; pub use proposer_preparation_data::ProposerPreparationData; -pub use validator::{Validator, is_compounding_withdrawal_credential}; +pub use validator::{ + Validator, is_builder_withdrawal_credential, is_compounding_withdrawal_credential, +}; pub use validator_registration_data::{SignedValidatorRegistrationData, ValidatorRegistrationData}; pub use validator_subscription::ValidatorSubscription; diff --git a/consensus/types/src/validator/validator.rs b/consensus/types/src/validator/validator.rs index 7898ab9073..5c5bfc761f 100644 --- a/consensus/types/src/validator/validator.rs +++ b/consensus/types/src/validator/validator.rs @@ -319,6 +319,14 @@ pub fn is_compounding_withdrawal_credential( .unwrap_or(false) } +pub fn is_builder_withdrawal_credential(withdrawal_credentials: Hash256, spec: &ChainSpec) -> bool { + withdrawal_credentials + .as_slice() + .first() + .map(|prefix_byte| *prefix_byte == spec.builder_withdrawal_prefix_byte) + .unwrap_or(false) +} + #[cfg(test)] mod tests { use super::*; diff --git a/deny.toml b/deny.toml index 54ede06429..e6c30f6a48 100644 --- a/deny.toml +++ b/deny.toml @@ -10,12 +10,14 @@ deny = [ { crate = "protobuf", reason = "use quick-protobuf instead" }, { crate = "derivative", reason = "use educe or derive_more instead" }, { crate = "ark-ff", reason = "present in Cargo.lock but not needed by Lighthouse" }, + { crate = "openssl", reason = "non-Rust dependency, use rustls instead" }, { crate = "strum", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "reqwest", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "aes", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "sha2", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "pbkdf2", deny-multiple-versions = true, reason = "takes a long time to compile" }, { crate = "scrypt", deny-multiple-versions = true, reason = "takes a long time to compile" }, + { crate = "syn", deny-multiple-versions = true, reason = "takes a long time to compile" }, ] [sources] diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index ee3e910b36..6fd5a6538c 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -758,3 +758,21 @@ fn validator_proposer_nodes() { ); }); } + +// Head monitor is enabled by default. +#[test] +fn head_monitor_default() { + CommandLineTest::new().run().with_config(|config| { + assert!(config.enable_beacon_head_monitor); + }); +} + +#[test] +fn head_monitor_disabled() { + CommandLineTest::new() + .flag("disable-beacon-head-monitor", None) + .run() + .with_config(|config| { + assert!(!config.enable_beacon_head_monitor); + }); +} diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 97c1c4f4f9..8e5bd24d24 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -47,8 +47,27 @@ excluded_paths = [ "bls12-381-tests/hash_to_G2", "tests/.*/eip7732", "tests/.*/eip7805", + # TODO(gloas): remove these ignores as more Gloas operations are implemented + "tests/.*/gloas/operations/block_header/.*", + "tests/.*/gloas/operations/execution_payload_bid/.*", + "tests/.*/gloas/operations/payload_attestation/.*", + # TODO(EIP-7732): remove these ignores as Gloas consensus is implemented + "tests/.*/gloas/epoch_processing/.*", + "tests/.*/gloas/finality/.*", + "tests/.*/gloas/fork/.*", + "tests/.*/gloas/fork_choice/.*", + "tests/.*/gloas/networking/.*", + "tests/.*/gloas/rewards/.*", + "tests/.*/gloas/sanity/.*", + "tests/.*/gloas/transition/.*", # Ignore MatrixEntry SSZ tests for now. - "tests/.*/fulu/ssz_static/MatrixEntry/.*", + "tests/.*/.*/ssz_static/MatrixEntry/.*", + # TODO(gloas): Ignore Gloas light client stuff for now + "tests/.*/gloas/ssz_static/LightClient.*/.*", + # Execution payload header is irrelevant after Gloas, this type will probably be deleted. + "tests/.*/gloas/ssz_static/ExecutionPayloadHeader/.*", + # ForkChoiceNode is internal to fork choice and probably doesn't need SSZ tests. + "tests/.*/gloas/ssz_static/ForkChoiceNode/.*", # EIP-7916 is still in draft and hasn't been implemented yet https://eips.ethereum.org/EIPS/eip-7916 "tests/general/phase0/ssz_generic/progressive_bitlist", "tests/general/phase0/ssz_generic/basic_progressive_list", @@ -61,8 +80,6 @@ excluded_paths = [ "tests/.*/.*/epoch_processing/.*/post_epoch.ssz_snappy", # Ignore inactivity_scores tests for now (should implement soon). "tests/.*/.*/rewards/inactivity_scores/.*", - # Ignore gloas tests for now - "tests/.*/gloas/.*", # Ignore KZG tests that target internal kzg library functions "tests/.*/compute_verify_cell_kzg_proof_batch_challenge/.*", "tests/.*/compute_challenge/.*", diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index e778300879..ef998a94ba 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -7,17 +7,20 @@ use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::per_block_processing::process_operations::{ - process_consolidation_requests, process_deposit_requests, process_withdrawal_requests, + process_consolidation_requests, process_deposit_requests_post_gloas, + process_deposit_requests_pre_gloas, process_withdrawal_requests, }; use state_processing::{ ConsensusContext, + envelope_processing::{EnvelopeProcessingError, process_execution_payload_envelope}, per_block_processing::{ VerifyBlockRoot, VerifySignatures, errors::BlockProcessingError, process_block_header, process_execution_payload, process_operations::{ - altair_deneb, base, process_attester_slashings, process_bls_to_execution_changes, - process_deposits, process_exits, process_proposer_slashings, + altair_deneb, base, gloas, process_attester_slashings, + process_bls_to_execution_changes, process_deposits, process_exits, + process_proposer_slashings, }, process_sync_aggregate, withdrawals, }, @@ -28,7 +31,7 @@ use types::{ BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconState, BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, ForkVersionDecode, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, - SignedVoluntaryExit, SyncAggregate, WithdrawalRequest, + SignedExecutionPayloadEnvelope, SignedVoluntaryExit, SyncAggregate, WithdrawalRequest, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -58,6 +61,8 @@ pub struct Operations<E: EthSpec, O: Operation<E>> { } pub trait Operation<E: EthSpec>: Debug + Sync + Sized { + type Error: Debug; + fn handler_name() -> String; fn filename() -> String { @@ -75,10 +80,12 @@ pub trait Operation<E: EthSpec>: Debug + Sync + Sized { state: &mut BeaconState<E>, spec: &ChainSpec, _: &Operations<E, Self>, - ) -> Result<(), BlockProcessingError>; + ) -> Result<(), Self::Error>; } impl<E: EthSpec> Operation<E> for Attestation<E> { + type Error = BlockProcessingError; + fn handler_name() -> String { "attestation".into() } @@ -98,9 +105,18 @@ impl<E: EthSpec> Operation<E> for Attestation<E> { _: &Operations<E, Self>, ) -> Result<(), BlockProcessingError> { initialize_epoch_cache(state, spec)?; + initialize_progressive_balances_cache(state, spec)?; let mut ctxt = ConsensusContext::new(state.slot()); - if state.fork_name_unchecked().altair_enabled() { - initialize_progressive_balances_cache(state, spec)?; + if state.fork_name_unchecked().gloas_enabled() { + gloas::process_attestation( + state, + self.to_ref(), + 0, + &mut ctxt, + VerifySignatures::True, + spec, + ) + } else if state.fork_name_unchecked().altair_enabled() { altair_deneb::process_attestation( state, self.to_ref(), @@ -122,6 +138,8 @@ impl<E: EthSpec> Operation<E> for Attestation<E> { } impl<E: EthSpec> Operation<E> for AttesterSlashing<E> { + type Error = BlockProcessingError; + fn handler_name() -> String { "attester_slashing".into() } @@ -153,6 +171,8 @@ impl<E: EthSpec> Operation<E> for AttesterSlashing<E> { } impl<E: EthSpec> Operation<E> for Deposit { + type Error = BlockProcessingError; + fn handler_name() -> String { "deposit".into() } @@ -177,6 +197,8 @@ impl<E: EthSpec> Operation<E> for Deposit { } impl<E: EthSpec> Operation<E> for ProposerSlashing { + type Error = BlockProcessingError; + fn handler_name() -> String { "proposer_slashing".into() } @@ -204,6 +226,8 @@ impl<E: EthSpec> Operation<E> for ProposerSlashing { } impl<E: EthSpec> Operation<E> for SignedVoluntaryExit { + type Error = BlockProcessingError; + fn handler_name() -> String { "voluntary_exit".into() } @@ -228,6 +252,8 @@ impl<E: EthSpec> Operation<E> for SignedVoluntaryExit { } impl<E: EthSpec> Operation<E> for BeaconBlock<E> { + type Error = BlockProcessingError; + fn handler_name() -> String { "block_header".into() } @@ -259,6 +285,8 @@ impl<E: EthSpec> Operation<E> for BeaconBlock<E> { } impl<E: EthSpec> Operation<E> for SyncAggregate<E> { + type Error = BlockProcessingError; + fn handler_name() -> String { "sync_aggregate".into() } @@ -287,6 +315,8 @@ impl<E: EthSpec> Operation<E> for SyncAggregate<E> { } impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, FullPayload<E>> { + type Error = BlockProcessingError; + fn handler_name() -> String { "execution_payload".into() } @@ -296,7 +326,7 @@ impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, FullPayload<E>> { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name.bellatrix_enabled() + fork_name.bellatrix_enabled() && !fork_name.gloas_enabled() } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result<Self, Error> { @@ -307,8 +337,7 @@ impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, FullPayload<E>> { ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), ForkName::Electra => BeaconBlockBody::Electra(<_>::from_ssz_bytes(bytes)?), ForkName::Fulu => BeaconBlockBody::Fulu(<_>::from_ssz_bytes(bytes)?), - // TODO(EIP-7732): See if we need to handle Gloas here - _ => panic!(), + _ => panic!("Not supported after Gloas"), }) }) } @@ -330,7 +359,10 @@ impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, FullPayload<E>> { } } } + impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, BlindedPayload<E>> { + type Error = BlockProcessingError; + fn handler_name() -> String { "execution_payload".into() } @@ -340,7 +372,7 @@ impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, BlindedPayload<E>> { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name.bellatrix_enabled() + fork_name.bellatrix_enabled() && !fork_name.gloas_enabled() } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result<Self, Error> { @@ -367,8 +399,7 @@ impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, BlindedPayload<E>> { let inner = <BeaconBlockBodyFulu<E, FullPayload<E>>>::from_ssz_bytes(bytes)?; BeaconBlockBody::Fulu(inner.clone_as_blinded()) } - // TODO(EIP-7732): See if we need to handle Gloas here - _ => panic!(), + _ => panic!("Not supported after Gloas"), }) }) } @@ -391,7 +422,46 @@ impl<E: EthSpec> Operation<E> for BeaconBlockBody<E, BlindedPayload<E>> { } } +impl<E: EthSpec> Operation<E> for SignedExecutionPayloadEnvelope<E> { + type Error = EnvelopeProcessingError; + + fn handler_name() -> String { + "execution_payload".into() + } + + fn filename() -> String { + "signed_envelope.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.gloas_enabled() + } + + fn decode(path: &Path, _: ForkName, _spec: &ChainSpec) -> Result<Self, Error> { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState<E>, + spec: &ChainSpec, + extra: &Operations<E, Self>, + ) -> Result<(), Self::Error> { + let valid = extra + .execution_metadata + .as_ref() + .is_some_and(|e| e.execution_valid); + if valid { + process_execution_payload_envelope(state, None, self, VerifySignatures::True, spec) + } else { + Err(EnvelopeProcessingError::ExecutionInvalid) + } + } +} + impl<E: EthSpec> Operation<E> for WithdrawalsPayload<E> { + type Error = BlockProcessingError; + fn handler_name() -> String { "withdrawals".into() } @@ -438,6 +508,8 @@ impl<E: EthSpec> Operation<E> for WithdrawalsPayload<E> { } impl<E: EthSpec> Operation<E> for SignedBlsToExecutionChange { + type Error = BlockProcessingError; + fn handler_name() -> String { "bls_to_execution_change".into() } @@ -470,6 +542,8 @@ impl<E: EthSpec> Operation<E> for SignedBlsToExecutionChange { } impl<E: EthSpec> Operation<E> for WithdrawalRequest { + type Error = BlockProcessingError; + fn handler_name() -> String { "withdrawal_request".into() } @@ -494,6 +568,8 @@ impl<E: EthSpec> Operation<E> for WithdrawalRequest { } impl<E: EthSpec> Operation<E> for DepositRequest { + type Error = BlockProcessingError; + fn handler_name() -> String { "deposit_request".into() } @@ -512,11 +588,17 @@ impl<E: EthSpec> Operation<E> for DepositRequest { spec: &ChainSpec, _extra: &Operations<E, Self>, ) -> Result<(), BlockProcessingError> { - process_deposit_requests(state, std::slice::from_ref(self), spec) + if state.fork_name_unchecked().gloas_enabled() { + process_deposit_requests_post_gloas(state, std::slice::from_ref(self), spec) + } else { + process_deposit_requests_pre_gloas(state, std::slice::from_ref(self), spec) + } } } impl<E: EthSpec> Operation<E> for ConsolidationRequest { + type Error = BlockProcessingError; + fn handler_name() -> String { "consolidation_request".into() } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index b0fc90b169..9d11252edb 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -422,6 +422,11 @@ where fn handler_name(&self) -> String { BeaconState::<E>::name().into() } + + fn disabled_forks(&self) -> Vec<ForkName> { + // TODO(gloas): Can be removed once we enable Gloas on all tests + vec![] + } } impl<T, E> Handler for SszStaticWithSpecHandler<T, E> @@ -444,6 +449,11 @@ where T::name().into() } + fn disabled_forks(&self) -> Vec<ForkName> { + // TODO(gloas): Can be removed once we enable Gloas on all tests + vec![] + } + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { self.supported_forks.contains(&fork_name) } @@ -1125,9 +1135,20 @@ impl<E: EthSpec + TypeName, O: Operation<E>> Handler for OperationsHandler<E, O> } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // TODO(gloas): So far only withdrawals tests are enabled for Gloas. Self::Case::is_enabled_for_fork(fork_name) - && (!fork_name.gloas_enabled() || self.handler_name() == "withdrawals") + && (!fork_name.gloas_enabled() + || self.handler_name() == "attestation" + || self.handler_name() == "attester_slashing" + || self.handler_name() == "bls_to_execution_change" + || self.handler_name() == "consolidation_request" + || self.handler_name() == "deposit_request" + || self.handler_name() == "deposit" + || self.handler_name() == "execution_payload" + || self.handler_name() == "proposer_slashing" + || self.handler_name() == "sync_aggregate" + || self.handler_name() == "withdrawal_request" + || self.handler_name() == "withdrawals" + || self.handler_name() == "voluntary_exit") } } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index ae00727fc3..18666befaa 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -72,6 +72,9 @@ type_name!(DepositData); type_name!(DepositMessage); type_name!(DepositRequest); type_name!(Eth1Data); +type_name!(Builder); +type_name!(BuilderPendingPayment); +type_name!(BuilderPendingWithdrawal); type_name!(WithdrawalRequest); type_name_generic!(ExecutionPayload); type_name_generic!(ExecutionPayloadBellatrix, "ExecutionPayload"); @@ -90,6 +93,8 @@ type_name_generic!(ExecutionPayloadHeaderFulu, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadBid); type_name_generic!(SignedExecutionPayloadBid); type_name_generic!(ExecutionRequests); +type_name_generic!(ExecutionPayloadEnvelope); +type_name_generic!(SignedExecutionPayloadEnvelope); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); @@ -97,6 +102,7 @@ type_name_generic!(HistoricalBatch); type_name_generic!(IndexedAttestation); type_name_generic!(IndexedAttestationBase, "IndexedAttestation"); type_name_generic!(IndexedAttestationElectra, "IndexedAttestation"); +type_name_generic!(IndexedPayloadAttestation); type_name_generic!(LightClientBootstrap); type_name_generic!(LightClientBootstrapAltair, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapCapella, "LightClientBootstrap"); @@ -149,10 +155,15 @@ type_name_generic!(LightClientUpdateDeneb, "LightClientUpdate"); type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); type_name_generic!(LightClientUpdateFulu, "LightClientUpdate"); type_name_generic!(PendingAttestation); +type_name_generic!(PayloadAttestation); +type_name!(PayloadAttestationData); +type_name!(PayloadAttestationMessage); type_name!(PendingConsolidation); type_name!(PendingPartialWithdrawal); type_name!(PendingDeposit); type_name!(ProposerSlashing); +type_name!(ProposerPreferences); +type_name!(SignedProposerPreferences); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedAggregateAndProofBase, "SignedAggregateAndProof"); type_name_generic!(SignedAggregateAndProofElectra, "SignedAggregateAndProof"); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 505693c31d..8a53a61929 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -87,6 +87,12 @@ fn operations_execution_payload_blinded() { OperationsHandler::<MainnetEthSpec, BeaconBlockBody<_, BlindedPayload<_>>>::default().run(); } +#[test] +fn operations_execution_payload_envelope() { + OperationsHandler::<MinimalEthSpec, SignedExecutionPayloadEnvelope<_>>::default().run(); + OperationsHandler::<MainnetEthSpec, SignedExecutionPayloadEnvelope<_>>::default().run(); +} + #[test] fn operations_withdrawals() { OperationsHandler::<MinimalEthSpec, WithdrawalsPayload<_>>::default().run(); @@ -94,7 +100,7 @@ fn operations_withdrawals() { } #[test] -fn operations_withdrawal_reqeusts() { +fn operations_withdrawal_requests() { OperationsHandler::<MinimalEthSpec, WithdrawalRequest>::default().run(); OperationsHandler::<MainnetEthSpec, WithdrawalRequest>::default().run(); } @@ -241,9 +247,12 @@ mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; use types::state::HistoricalSummary; use types::{ - AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DataColumnSidecarFulu, - DataColumnSidecarGloas, DepositRequest, LightClientBootstrapAltair, PendingDeposit, - PendingPartialWithdrawal, WithdrawalRequest, *, + AttesterSlashingBase, AttesterSlashingElectra, Builder, BuilderPendingPayment, + BuilderPendingWithdrawal, ConsolidationRequest, DepositRequest, ExecutionPayloadBid, + ExecutionPayloadEnvelope, IndexedPayloadAttestation, LightClientBootstrapAltair, + PayloadAttestation, PayloadAttestationData, PayloadAttestationMessage, PendingDeposit, + PendingPartialWithdrawal, SignedExecutionPayloadBid, SignedExecutionPayloadEnvelope, + WithdrawalRequest, *, }; ssz_static_test!(attestation_data, AttestationData); @@ -600,7 +609,7 @@ mod ssz_static { .run(); SszStaticHandler::<ExecutionPayloadFulu<MinimalEthSpec>, MinimalEthSpec>::fulu_only().run(); SszStaticHandler::<ExecutionPayloadFulu<MainnetEthSpec>, MainnetEthSpec>::fulu_only().run(); - SszStaticHandler::<ExecutionPayloadGloas<MainnetEthSpec>, MainnetEthSpec>::gloas_only() + SszStaticHandler::<ExecutionPayloadGloas<MinimalEthSpec>, MinimalEthSpec>::gloas_only() .run(); SszStaticHandler::<ExecutionPayloadGloas<MainnetEthSpec>, MainnetEthSpec>::gloas_only() .run(); @@ -749,6 +758,81 @@ mod ssz_static { SszStaticHandler::<ExecutionRequests<MinimalEthSpec>, MinimalEthSpec>::electra_and_later() .run(); } + + // Gloas and later + #[test] + fn builder() { + SszStaticHandler::<Builder, MinimalEthSpec>::gloas_and_later().run(); + SszStaticHandler::<Builder, MainnetEthSpec>::gloas_and_later().run(); + } + + #[test] + fn builder_pending_payment() { + SszStaticHandler::<BuilderPendingPayment, MinimalEthSpec>::gloas_and_later().run(); + SszStaticHandler::<BuilderPendingPayment, MainnetEthSpec>::gloas_and_later().run(); + } + + #[test] + fn builder_pending_withdrawal() { + SszStaticHandler::<BuilderPendingWithdrawal, MinimalEthSpec>::gloas_and_later().run(); + SszStaticHandler::<BuilderPendingWithdrawal, MainnetEthSpec>::gloas_and_later().run(); + } + + #[test] + fn payload_attestation_data() { + SszStaticHandler::<PayloadAttestationData, MinimalEthSpec>::gloas_and_later().run(); + SszStaticHandler::<PayloadAttestationData, MainnetEthSpec>::gloas_and_later().run(); + } + + #[test] + fn payload_attestation() { + SszStaticHandler::<PayloadAttestation<MinimalEthSpec>, MinimalEthSpec>::gloas_and_later() + .run(); + SszStaticHandler::<PayloadAttestation<MainnetEthSpec>, MainnetEthSpec>::gloas_and_later() + .run(); + } + + #[test] + fn payload_attestation_message() { + SszStaticHandler::<PayloadAttestationMessage, MinimalEthSpec>::gloas_and_later().run(); + SszStaticHandler::<PayloadAttestationMessage, MainnetEthSpec>::gloas_and_later().run(); + } + + #[test] + fn indexed_payload_attestation() { + SszStaticHandler::<IndexedPayloadAttestation<MinimalEthSpec>, MinimalEthSpec>::gloas_and_later() + .run(); + SszStaticHandler::<IndexedPayloadAttestation<MainnetEthSpec>, MainnetEthSpec>::gloas_and_later() + .run(); + } + + #[test] + fn execution_payload_envelope() { + SszStaticHandler::<ExecutionPayloadEnvelope<MinimalEthSpec>, MinimalEthSpec>::gloas_and_later() + .run(); + SszStaticHandler::<ExecutionPayloadEnvelope<MainnetEthSpec>, MainnetEthSpec>::gloas_and_later() + .run(); + } + + #[test] + fn signed_execution_payload_envelope() { + SszStaticHandler::<SignedExecutionPayloadEnvelope<MinimalEthSpec>, MinimalEthSpec>::gloas_and_later() + .run(); + SszStaticHandler::<SignedExecutionPayloadEnvelope<MainnetEthSpec>, MainnetEthSpec>::gloas_and_later() + .run(); + } + + #[test] + fn proposer_preferences() { + SszStaticHandler::<ProposerPreferences, MinimalEthSpec>::gloas_and_later().run(); + SszStaticHandler::<ProposerPreferences, MainnetEthSpec>::gloas_and_later().run(); + } + + #[test] + fn signed_proposer_preferences() { + SszStaticHandler::<SignedProposerPreferences, MinimalEthSpec>::gloas_and_later().run(); + SszStaticHandler::<SignedProposerPreferences, MainnetEthSpec>::gloas_and_later().run(); + } } #[test] diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 0483f61538..4b9432b67b 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -137,11 +137,7 @@ mod tests { } fn client_identity_path() -> PathBuf { - if cfg!(target_os = "macos") { - tls_dir().join("lighthouse").join("key_legacy.p12") - } else { - tls_dir().join("lighthouse").join("key.p12") - } + tls_dir().join("lighthouse").join("key.p12") } fn client_identity_password() -> String { diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index 3b14dbddba..31900d5d90 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,12 +1,5 @@ #!/bin/bash -# The lighthouse/key_legacy.p12 file is generated specifically for macOS because the default `openssl pkcs12` encoding -# algorithm in OpenSSL v3 is not compatible with the PKCS algorithm used by the Apple Security Framework. The client -# side (using the reqwest crate) relies on the Apple Security Framework to parse PKCS files. -# We don't need to generate web3signer/key_legacy.p12 because the compatibility issue doesn't occur on the web3signer -# side. It seems that web3signer (Java) uses its own implementation to parse PKCS files. -# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2469252651 - # We specify `-days 825` when generating the certificate files because Apple requires TLS server certificates to have a # validity period of 825 days or fewer. # See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2474979183 @@ -16,5 +9,4 @@ openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web cp web3signer/cert.pem lighthouse/web3signer.pem && openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && -openssl pkcs12 -export -legacy -out lighthouse/key_legacy.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 b/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 deleted file mode 100644 index c3394fae9a..0000000000 Binary files a/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 and /dev/null differ diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index 481aece48b..bc1ac20d44 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -11,7 +11,7 @@ path = "src/lib.rs" [dependencies] bls = { workspace = true } clap = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["events"] } futures = { workspace = true } itertools = { workspace = true } sensitive_url = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/beacon_head_monitor.rs b/validator_client/beacon_node_fallback/src/beacon_head_monitor.rs new file mode 100644 index 0000000000..bed107d856 --- /dev/null +++ b/validator_client/beacon_node_fallback/src/beacon_head_monitor.rs @@ -0,0 +1,423 @@ +use crate::BeaconNodeFallback; +use eth2::types::{EventKind, EventTopic, Hash256, SseHead}; +use futures::StreamExt; +use slot_clock::SlotClock; +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::RwLock; +use tracing::{debug, info, warn}; +use types::EthSpec; + +type CacheHashMap = HashMap<usize, SseHead>; + +// This is used to send the index derived from `CandidateBeaconNode` to the +// `AttestationService` for further processing +#[derive(Debug)] +pub struct HeadEvent { + pub beacon_node_index: usize, + pub slot: types::Slot, + pub beacon_block_root: Hash256, +} + +/// Cache to maintain the latest head received from each of the beacon nodes +/// in the `BeaconNodeFallback`. +#[derive(Debug)] +pub struct BeaconHeadCache { + cache: RwLock<CacheHashMap>, +} + +impl BeaconHeadCache { + /// Creates a new empty beacon head cache. + pub fn new() -> Self { + Self { + cache: RwLock::new(HashMap::new()), + } + } + + /// Retrieves the cached head for a specific beacon node. + /// Returns `None` if no head has been cached for that node yet. + pub async fn get(&self, beacon_node_index: usize) -> Option<SseHead> { + self.cache.read().await.get(&beacon_node_index).cloned() + } + + /// Stores or updates the head event for a specific beacon node. + /// Replaces any previously cached head for the given node. + pub async fn insert(&self, beacon_node_index: usize, head: SseHead) { + self.cache.write().await.insert(beacon_node_index, head); + } + + /// Checks if the given head is the latest among all cached heads. + /// Returns `true` if the head's slot is >= all cached heads' slots. + pub async fn is_latest(&self, head: &SseHead) -> bool { + let cache = self.cache.read().await; + cache + .values() + .all(|cache_head| head.slot >= cache_head.slot) + } + + /// Clears all cached heads, removing entries for all beacon nodes. + /// Useful when beacon node candidates are refreshed to avoid stale references. + pub async fn purge_cache(&self) { + self.cache.write().await.clear(); + } +} + +impl Default for BeaconHeadCache { + fn default() -> Self { + Self::new() + } +} + +// Runs a non-terminating loop to update the `BeaconHeadCache` with the latest head received +// from the candidate beacon_nodes. This is an attempt to stream events to beacon nodes and +// potential start attestation duties earlier as soon as latest head is receive from any of the +// beacon node in contrast to attest at the 1/3rd mark in the slot. +// +// +// The cache and the candidate BNs list are refresh/purged to avoid dangling reference conditions +// that arise due to `update_candidates_list`. +// +// Starts the service to perpetually stream head events from connected beacon_nodes +pub async fn poll_head_event_from_beacon_nodes<E: EthSpec, T: SlotClock + 'static>( + beacon_nodes: Arc<BeaconNodeFallback<T>>, +) -> Result<(), String> { + let head_cache = beacon_nodes + .beacon_head_cache + .clone() + .ok_or("Unable to start head monitor without beacon_head_cache")?; + let head_monitor_send = beacon_nodes + .head_monitor_send + .clone() + .ok_or("Unable to start head monitor without head_monitor_send")?; + + info!("Starting head monitoring service"); + let candidates = { + let candidates_guard = beacon_nodes.candidates.read().await; + candidates_guard.clone() + }; + + // Clear the cache in case it contains stale data from a previous run. This function gets + // restarted if it fails (see monitoring in `start_fallback_updater_service`). + head_cache.purge_cache().await; + + // Create Vec of streams, which we will select over. + let mut streams = vec![]; + + for candidate in &candidates { + let head_event_stream = candidate + .beacon_node + .get_events::<E>(&[EventTopic::Head]) + .await; + + let head_event_stream = match head_event_stream { + Ok(stream) => stream, + Err(e) => { + warn!(error = ?e, node_index = candidate.index, "Failed to get head event stream"); + continue; + } + }; + + streams.push(head_event_stream.map(|event| (candidate.index, event))); + } + + if streams.is_empty() { + return Err("No beacon nodes available for head event streaming".to_string()); + } + + // Combine streams into a single stream and poll events from any of them. + let mut combined_stream = futures::stream::select_all(streams); + + while let Some((candidate_index, event_result)) = combined_stream.next().await { + match event_result { + Ok(EventKind::Head(head)) => { + debug!( + candidate_index, + block_root = ?head.block, + slot = %head.slot, + "New head from beacon node" + ); + + // Skip optimistic heads - the beacon node can't produce valid + // attestation data when its execution layer is not verified + if head.execution_optimistic { + debug!( + candidate_index, + block_root = ?head.block, + slot = %head.slot, + "Skipping optimistic head" + ); + continue; + } + + head_cache.insert(candidate_index, head.clone()).await; + + if !head_cache.is_latest(&head).await { + debug!( + candidate_index, + block_root = ?head.block, + slot = %head.slot, + "Skipping stale head" + ); + continue; + } + + if head_monitor_send + .send(HeadEvent { + beacon_node_index: candidate_index, + slot: head.slot, + beacon_block_root: head.block, + }) + .await + .is_err() + { + return Err("Head monitoring service channel closed".into()); + } + } + Ok(event) => { + warn!( + event_kind = event.topic_name(), + candidate_index, "Received unexpected event from BN" + ); + continue; + } + Err(e) => { + return Err(format!( + "Head monitoring stream error, node: {candidate_index}, error: {e:?}" + )); + } + } + } + + Err("Stream ended unexpectedly".into()) +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::FixedBytesExtended; + use types::{Hash256, Slot}; + + fn create_sse_head(slot: u64, block_root: u8) -> SseHead { + SseHead { + slot: types::Slot::new(slot), + block: Hash256::from_low_u64_be(block_root as u64), + state: Hash256::from_low_u64_be(block_root as u64), + epoch_transition: false, + previous_duty_dependent_root: Hash256::from_low_u64_be(block_root as u64), + current_duty_dependent_root: Hash256::from_low_u64_be(block_root as u64), + execution_optimistic: false, + } + } + + #[tokio::test] + async fn test_beacon_head_cache_insertion_and_retrieval() { + let cache = BeaconHeadCache::new(); + let head_1 = create_sse_head(1, 1); + let head_2 = create_sse_head(2, 2); + + cache.insert(0, head_1.clone()).await; + cache.insert(1, head_2.clone()).await; + + assert_eq!(cache.get(0).await, Some(head_1)); + assert_eq!(cache.get(1).await, Some(head_2)); + assert_eq!(cache.get(2).await, None); + } + + #[tokio::test] + async fn test_beacon_head_cache_update() { + let cache = BeaconHeadCache::new(); + let head_old = create_sse_head(1, 1); + let head_new = create_sse_head(2, 2); + + cache.insert(0, head_old).await; + cache.insert(0, head_new.clone()).await; + + assert_eq!(cache.get(0).await, Some(head_new)); + } + + #[tokio::test] + async fn test_is_latest_with_higher_slot() { + let cache = BeaconHeadCache::new(); + let head_1 = create_sse_head(1, 1); + let head_2 = create_sse_head(2, 2); + let head_3 = create_sse_head(3, 3); + + cache.insert(0, head_1).await; + cache.insert(1, head_2).await; + + assert!(cache.is_latest(&head_3).await); + } + + #[tokio::test] + async fn test_is_latest_with_lower_slot() { + let cache = BeaconHeadCache::new(); + let head_1 = create_sse_head(1, 1); + let head_2 = create_sse_head(2, 2); + let head_older = create_sse_head(1, 99); + + cache.insert(0, head_1).await; + cache.insert(1, head_2).await; + + assert!(!cache.is_latest(&head_older).await); + } + + #[tokio::test] + async fn test_is_latest_with_equal_slot() { + let cache = BeaconHeadCache::new(); + let head_1 = create_sse_head(5, 1); + let head_2 = create_sse_head(5, 2); + let head_equal = create_sse_head(5, 3); + + cache.insert(0, head_1).await; + cache.insert(1, head_2).await; + + assert!(cache.is_latest(&head_equal).await); + } + + #[tokio::test] + async fn test_is_latest_empty_cache() { + let cache = BeaconHeadCache::new(); + let head = create_sse_head(1, 1); + + assert!(cache.is_latest(&head).await); + } + + #[tokio::test] + async fn test_purge_cache_clears_all_entries() { + let cache = BeaconHeadCache::new(); + let head_1 = create_sse_head(1, 1); + let head_2 = create_sse_head(2, 2); + + cache.insert(0, head_1).await; + cache.insert(1, head_2).await; + + assert!(cache.get(0).await.is_some()); + assert!(cache.get(1).await.is_some()); + + cache.purge_cache().await; + + assert!(cache.get(0).await.is_none()); + assert!(cache.get(1).await.is_none()); + } + + #[tokio::test] + async fn test_head_event_creation() { + let block_root = Hash256::from_low_u64_be(99); + let event = HeadEvent { + beacon_node_index: 42, + slot: Slot::new(123), + beacon_block_root: block_root, + }; + assert_eq!(event.beacon_node_index, 42); + assert_eq!(event.slot, Slot::new(123)); + assert_eq!(event.beacon_block_root, block_root); + } + + #[tokio::test] + async fn test_cache_caches_multiple_heads_from_different_nodes() { + let cache = BeaconHeadCache::new(); + let head_1 = create_sse_head(10, 1); + let head_2 = create_sse_head(5, 2); + let head_3 = create_sse_head(8, 3); + + cache.insert(0, head_1.clone()).await; + cache.insert(1, head_2.clone()).await; + cache.insert(2, head_3.clone()).await; + + // Verify all are stored + assert_eq!(cache.get(0).await, Some(head_1)); + assert_eq!(cache.get(1).await, Some(head_2)); + assert_eq!(cache.get(2).await, Some(head_3)); + + // The latest should be slot 10 + let head_10 = create_sse_head(10, 99); + assert!(cache.is_latest(&head_10).await); + + // Anything with slot > 10 should be latest + let head_11 = create_sse_head(11, 99); + assert!(cache.is_latest(&head_11).await); + + // Anything with slot < 10 should not be latest + let head_9 = create_sse_head(9, 99); + assert!(!cache.is_latest(&head_9).await); + } + + #[tokio::test] + async fn test_cache_handles_concurrent_operations() { + let cache = Arc::new(BeaconHeadCache::new()); + let mut handles = vec![]; + + // Spawn multiple tasks that insert heads concurrently + for i in 0..10 { + let cache_clone = cache.clone(); + let handle = tokio::spawn(async move { + let head = create_sse_head(i as u64, (i % 256) as u8); + cache_clone.insert(i, head).await; + }); + handles.push(handle); + } + + // Wait for all tasks to complete + for handle in handles { + handle.await.unwrap(); + } + + // Verify all heads are cached + for i in 0..10 { + assert!(cache.get(i).await.is_some()); + } + } + + #[tokio::test] + async fn test_is_latest_after_cache_updates() { + let cache = BeaconHeadCache::new(); + + // Start with head at slot 5 + let head_5 = create_sse_head(5, 1); + cache.insert(0, head_5.clone()).await; + assert!(cache.is_latest(&head_5).await); + + // Add a higher slot + let head_10 = create_sse_head(10, 2); + cache.insert(1, head_10.clone()).await; + + // head_5 should no longer be latest + assert!(!cache.is_latest(&head_5).await); + // head_10 should be latest + assert!(cache.is_latest(&head_10).await); + + // Add an even higher slot + let head_15 = create_sse_head(15, 3); + cache.insert(2, head_15.clone()).await; + + // head_10 should no longer be latest + assert!(!cache.is_latest(&head_10).await); + // head_15 should be latest + assert!(cache.is_latest(&head_15).await); + } + + #[tokio::test] + async fn test_cache_default_is_empty() { + let cache = BeaconHeadCache::default(); + assert!(cache.get(0).await.is_none()); + assert!(cache.get(999).await.is_none()); + } + + #[tokio::test] + async fn test_is_latest_with_multiple_same_slot_heads() { + let cache = BeaconHeadCache::new(); + let head_slot_5_node1 = create_sse_head(5, 1); + let head_slot_5_node2 = create_sse_head(5, 2); + let head_slot_5_node3 = create_sse_head(5, 3); + + cache.insert(0, head_slot_5_node1).await; + cache.insert(1, head_slot_5_node2).await; + + // All heads with slot 5 should be considered latest + assert!(cache.is_latest(&head_slot_5_node3).await); + + // But heads with slot 4 should not be latest + let head_slot_4 = create_sse_head(4, 4); + assert!(!cache.is_latest(&head_slot_4).await); + } +} diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 3c20e57200..b36ec70aa3 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -2,7 +2,10 @@ //! "fallback" behaviour; it will try a request on all of the nodes until one or none of them //! succeed. +pub mod beacon_head_monitor; pub mod beacon_node_health; + +use beacon_head_monitor::{BeaconHeadCache, HeadEvent, poll_head_event_from_beacon_nodes}; use beacon_node_health::{ BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, IsOptimistic, SyncDistanceTier, check_node_health, @@ -22,7 +25,10 @@ use std::time::{Duration, Instant}; use std::vec::Vec; use strum::VariantNames; use task_executor::TaskExecutor; -use tokio::{sync::RwLock, time::sleep}; +use tokio::{ + sync::{RwLock, mpsc}, + time::sleep, +}; use tracing::{debug, error, warn}; use types::{ChainSpec, Config as ConfigSpec, EthSpec, Slot}; use validator_metrics::{ENDPOINT_ERRORS, ENDPOINT_REQUESTS, inc_counter_vec}; @@ -68,6 +74,31 @@ pub fn start_fallback_updater_service<T: SlotClock + 'static, E: EthSpec>( return Err("Cannot start fallback updater without slot clock"); } + let beacon_nodes_ref = beacon_nodes.clone(); + + // the existence of head_monitor_send is overloaded with the predicate of + // requirement of starting the head monitoring service or not. + if beacon_nodes_ref.head_monitor_send.is_some() { + let head_monitor_future = async move { + loop { + if let Err(error) = + poll_head_event_from_beacon_nodes::<E, T>(beacon_nodes_ref.clone()).await + { + warn!(error, "Head service failed retrying starting next slot"); + + let sleep_time = beacon_nodes_ref + .slot_clock + .as_ref() + .and_then(|slot_clock| slot_clock.duration_to_next_slot()) + .unwrap_or_else(|| beacon_nodes_ref.spec.get_slot_duration()); + sleep(sleep_time).await + } + } + }; + + executor.spawn(head_monitor_future, "head_monitoring"); + } + let future = async move { loop { beacon_nodes.update_all_candidates::<E>().await; @@ -96,12 +127,15 @@ pub fn start_fallback_updater_service<T: SlotClock + 'static, E: EthSpec>( pub enum Error<T> { /// We attempted to contact the node but it failed. RequestFailed(T), + /// The beacon node with the requested index was not available. + CandidateIndexUnknown(usize), } impl<T> Error<T> { pub fn request_failure(&self) -> Option<&T> { match self { Error::RequestFailed(e) => Some(e), + Error::CandidateIndexUnknown(_) => None, } } } @@ -380,6 +414,8 @@ pub struct BeaconNodeFallback<T> { pub candidates: Arc<RwLock<Vec<CandidateBeaconNode>>>, distance_tiers: BeaconNodeSyncDistanceTiers, slot_clock: Option<T>, + beacon_head_cache: Option<Arc<BeaconHeadCache>>, + head_monitor_send: Option<Arc<mpsc::Sender<HeadEvent>>>, broadcast_topics: Vec<ApiTopic>, spec: Arc<ChainSpec>, } @@ -396,6 +432,8 @@ impl<T: SlotClock> BeaconNodeFallback<T> { candidates: Arc::new(RwLock::new(candidates)), distance_tiers, slot_clock: None, + beacon_head_cache: None, + head_monitor_send: None, broadcast_topics, spec, } @@ -410,6 +448,15 @@ impl<T: SlotClock> BeaconNodeFallback<T> { self.slot_clock = Some(slot_clock); } + /// This the head monitor channel that streams events from all the beacon nodes that the + /// validator client is connected in the `BeaconNodeFallback`. This also initializes the + /// beacon_head_cache under the assumption the beacon_head_cache will always be needed when + /// head_monitor_send is set. + pub fn set_head_send(&mut self, head_monitor_send: Arc<mpsc::Sender<HeadEvent>>) { + self.head_monitor_send = Some(head_monitor_send); + self.beacon_head_cache = Some(Arc::new(BeaconHeadCache::new())); + } + /// The count of candidates, regardless of their state. pub async fn num_total(&self) -> usize { self.candidates.read().await.len() @@ -493,6 +540,10 @@ impl<T: SlotClock> BeaconNodeFallback<T> { let mut candidates = self.candidates.write().await; *candidates = new_candidates; + if let Some(cache) = &self.beacon_head_cache { + cache.purge_cache().await; + } + Ok(new_list) } @@ -646,6 +697,32 @@ impl<T: SlotClock> BeaconNodeFallback<T> { Err(Errors(errors)) } + /// Try `func` on a specific beacon node by index. + /// + /// Returns immediately if the preferred node succeeds, otherwise return an error. + pub async fn run_on_candidate_index<F, O, Err, R>( + &self, + candidate_index: usize, + func: F, + ) -> Result<O, Error<Err>> + where + F: Fn(BeaconNodeHttpClient) -> R + Clone, + R: Future<Output = Result<O, Err>>, + Err: Debug, + { + // Find the requested beacon node or return an error. + let candidates = self.candidates.read().await; + let Some(candidate) = candidates.iter().find(|c| c.index == candidate_index) else { + return Err(Error::CandidateIndexUnknown(candidate_index)); + }; + let candidate_node = candidate.beacon_node.clone(); + drop(candidates); + + Self::run_on_candidate(candidate_node, &func) + .await + .map_err(|(_, err)| err) + } + /// Run the future `func` on `candidate` while reporting metrics. async fn run_on_candidate<F, R, Err, O>( candidate: BeaconNodeHttpClient, @@ -1073,4 +1150,60 @@ mod tests { mock1.expect(3).assert(); mock2.expect(3).assert(); } + + #[tokio::test] + async fn run_on_candidate_index_success() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + let (mut mock_beacon_node_3, beacon_node_3) = new_mock_beacon_node(2, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + vec![beacon_node_1, beacon_node_2, beacon_node_3], + vec![], + spec.clone(), + ); + + let mock1 = mock_beacon_node_1.mock_offline_node(); + let _mock2 = mock_beacon_node_2.mock_online_node(); + let mock3 = mock_beacon_node_3.mock_online_node(); + + // Request with preferred_index=1 (beacon_node_2) + let result = beacon_node_fallback + .run_on_candidate_index(1, |client| async move { client.get_node_version().await }) + .await; + + // Should succeed since beacon_node_2 is online + assert!(result.is_ok()); + + // mock1 should not be called since preferred node succeeds + mock1.expect(0).assert(); + mock3.expect(0).assert(); + } + + #[tokio::test] + async fn run_on_candidate_index_error() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + let (mut mock_beacon_node_3, beacon_node_3) = new_mock_beacon_node(2, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + vec![beacon_node_1, beacon_node_2, beacon_node_3], + vec![], + spec.clone(), + ); + + let _mock1 = mock_beacon_node_1.mock_online_node(); + let _mock2 = mock_beacon_node_2.mock_offline_node(); + let _mock3 = mock_beacon_node_3.mock_offline_node(); + + // Request with preferred_index=1 (beacon_node_2), but it's offline + let result = beacon_node_fallback + .run_on_candidate_index(1, |client| async move { client.get_node_version().await }) + .await; + + // Should fail. + assert!(result.is_err()); + } } diff --git a/validator_client/initialized_validators/Cargo.toml b/validator_client/initialized_validators/Cargo.toml index 8b2ae62aea..53191ffe1e 100644 --- a/validator_client/initialized_validators/Cargo.toml +++ b/validator_client/initialized_validators/Cargo.toml @@ -12,7 +12,9 @@ eth2_keystore = { workspace = true } filesystem = { workspace = true } lockfile = { workspace = true } metrics = { workspace = true } +p12-keystore = "0.2" parking_lot = { workspace = true } +pem = "3" rand = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index db6d03174d..8928e4f508 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -397,6 +397,7 @@ pub fn load_pem_certificate<P: AsRef<Path>>(pem_path: P) -> Result<Certificate, Certificate::from_pem(&buf).map_err(Error::InvalidWeb3SignerRootCertificate) } +// Read a PKCS12 identity certificate and parse it into a PEM certificate. pub fn load_pkcs12_identity<P: AsRef<Path>>( pkcs12_path: P, password: &str, @@ -406,7 +407,29 @@ pub fn load_pkcs12_identity<P: AsRef<Path>>( .map_err(Error::InvalidWeb3SignerClientIdentityCertificateFile)? .read_to_end(&mut buf) .map_err(Error::InvalidWeb3SignerClientIdentityCertificateFile)?; - Identity::from_pkcs12_der(&buf, password) + + let keystore = p12_keystore::KeyStore::from_pkcs12(&buf, password).map_err(|e| { + Error::InvalidWeb3SignerClientIdentityCertificateFile(io::Error::new( + io::ErrorKind::InvalidData, + format!("PKCS12 parse error: {e:?}"), + )) + })?; + + let (_alias, key_chain) = keystore + .private_key_chain() + .ok_or(Error::MissingWeb3SignerClientIdentityCertificateFile)?; + + let key_pem = pem::encode(&pem::Pem::new("PRIVATE KEY", key_chain.key())); + let certs_pem: String = key_chain + .chain() + .iter() + .map(|cert| pem::encode(&pem::Pem::new("CERTIFICATE", cert.as_der()))) + .collect::<Vec<_>>() + .join("\n"); + + let combined_pem = format!("{key_pem}\n{certs_pem}"); + + Identity::from_pem(combined_pem.as_bytes()) .map_err(Error::InvalidWeb3SignerClientIdentityCertificate) } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 3e1c46097f..0eb0e9e5dd 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -476,6 +476,17 @@ pub struct ValidatorClient { )] pub beacon_nodes_sync_tolerances: Vec<u64>, + #[clap( + long, + help = "Disable the beacon head monitor which tries to attest as soon as any of the \ + configured beacon nodes sends a head event. Leaving the service enabled is \ + recommended, but disabling it can lead to reduced bandwidth and more predictable \ + usage of the primary beacon node (rather than the fastest BN).", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub disable_beacon_head_monitor: bool, + #[clap( long, help = "Disable Lighthouse's slashing protection for all web3signer keys. This can \ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 1a286a74dc..d68a78b705 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -82,6 +82,8 @@ pub struct Config { pub broadcast_topics: Vec<ApiTopic>, /// Enables a service which attempts to measure latency between the VC and BNs. pub enable_latency_measurement_service: bool, + /// Enables the beacon head monitor that reacts to head updates from connected beacon nodes. + pub enable_beacon_head_monitor: bool, /// Defines the number of validators per `validator/register_validator` request sent to the BN. pub validator_registration_batch_size: usize, /// Whether we are running with distributed network support. @@ -132,6 +134,7 @@ impl Default for Config { builder_registration_timestamp_override: None, broadcast_topics: vec![ApiTopic::Subscriptions], enable_latency_measurement_service: true, + enable_beacon_head_monitor: true, validator_registration_batch_size: 500, distributed: false, initialized_validators: <_>::default(), @@ -377,6 +380,7 @@ impl Config { config.validator_store.builder_boost_factor = validator_client_config.builder_boost_factor; config.enable_latency_measurement_service = !validator_client_config.disable_latency_measurement_service; + config.enable_beacon_head_monitor = !validator_client_config.disable_beacon_head_monitor; config.validator_registration_batch_size = validator_client_config.validator_registration_batch_size; diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 2b863715d2..c0d561b175 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -9,10 +9,12 @@ use metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; +use tokio::sync::Mutex; use account_utils::validator_definitions::ValidatorDefinitions; use beacon_node_fallback::{ - BeaconNodeFallback, CandidateBeaconNode, start_fallback_updater_service, + BeaconNodeFallback, CandidateBeaconNode, beacon_head_monitor::HeadEvent, + start_fallback_updater_service, }; use clap::ArgMatches; use doppelganger_service::DoppelgangerService; @@ -70,6 +72,8 @@ pub const AGGREGATION_PRE_COMPUTE_EPOCHS: u64 = 2; /// Number of slots in advance to compute sync selection proofs when in `distributed` mode. pub const AGGREGATION_PRE_COMPUTE_SLOTS_DISTRIBUTED: u64 = 1; +const MAX_HEAD_EVENT_QUEUE_LEN: usize = 1_024; + type ValidatorStore<E> = LighthouseValidatorStore<SystemTimeSlotClock, E>; #[derive(Clone)] @@ -395,6 +399,17 @@ impl<E: EthSpec> ProductionValidatorClient<E> { beacon_nodes.set_slot_clock(slot_clock.clone()); proposer_nodes.set_slot_clock(slot_clock.clone()); + // Only the beacon_nodes are used for attestation duties and thus biconditionally + // proposer_nodes do not need head_send ref. + let head_monitor_rx = if config.enable_beacon_head_monitor { + let (head_monitor_tx, head_receiver) = + mpsc::channel::<HeadEvent>(MAX_HEAD_EVENT_QUEUE_LEN); + beacon_nodes.set_head_send(Arc::new(head_monitor_tx)); + Some(Mutex::new(head_receiver)) + } else { + None + }; + let beacon_nodes = Arc::new(beacon_nodes); start_fallback_updater_service::<_, E>(context.executor.clone(), beacon_nodes.clone())?; @@ -505,15 +520,17 @@ impl<E: EthSpec> ProductionValidatorClient<E> { let block_service = block_service_builder.build()?; - let attestation_service = AttestationServiceBuilder::new() + let attestation_builder = AttestationServiceBuilder::new() .duties_service(duties_service.clone()) .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .executor(context.executor.clone()) + .head_monitor_rx(head_monitor_rx) .chain_spec(context.eth2_config.spec.clone()) - .disable(config.disable_attesting) - .build()?; + .disable(config.disable_attesting); + + let attestation_service = attestation_builder.build()?; let preparation_service = PreparationServiceBuilder::new() .slot_clock(slot_clock.clone()) diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index 326ec6d01e..a9d5283312 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -1,5 +1,5 @@ use crate::duties_service::{DutiesService, DutyAndProof}; -use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, beacon_head_monitor::HeadEvent}; use futures::future::join_all; use logging::crit; use slot_clock::SlotClock; @@ -7,10 +7,12 @@ use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; use task_executor::TaskExecutor; +use tokio::sync::Mutex; +use tokio::sync::mpsc; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; use tree_hash::TreeHash; -use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Hash256, Slot}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; /// Builds an `AttestationService`. @@ -22,6 +24,7 @@ pub struct AttestationServiceBuilder<S: ValidatorStore, T: SlotClock + 'static> beacon_nodes: Option<Arc<BeaconNodeFallback<T>>>, executor: Option<TaskExecutor>, chain_spec: Option<Arc<ChainSpec>>, + head_monitor_rx: Option<Mutex<mpsc::Receiver<HeadEvent>>>, disable: bool, } @@ -34,6 +37,7 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationServiceBuil beacon_nodes: None, executor: None, chain_spec: None, + head_monitor_rx: None, disable: false, } } @@ -73,6 +77,13 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationServiceBuil self } + pub fn head_monitor_rx( + mut self, + head_monitor_rx: Option<Mutex<mpsc::Receiver<HeadEvent>>>, + ) -> Self { + self.head_monitor_rx = head_monitor_rx; + self + } pub fn build(self) -> Result<AttestationService<S, T>, String> { Ok(AttestationService { inner: Arc::new(Inner { @@ -94,7 +105,9 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationServiceBuil chain_spec: self .chain_spec .ok_or("Cannot build AttestationService without chain_spec")?, + head_monitor_rx: self.head_monitor_rx, disable: self.disable, + latest_attested_slot: Mutex::new(Slot::default()), }), }) } @@ -108,10 +121,13 @@ pub struct Inner<S, T> { beacon_nodes: Arc<BeaconNodeFallback<T>>, executor: TaskExecutor, chain_spec: Arc<ChainSpec>, + head_monitor_rx: Option<Mutex<mpsc::Receiver<HeadEvent>>>, disable: bool, + latest_attested_slot: Mutex<Slot>, } -/// Attempts to produce attestations for all known validators 1/3rd of the way through each slot. +/// Attempts to produce attestations for all known validators 1/3rd of the way through each slot +/// or when a head event is received from the BNs. /// /// If any validators are on the same committee, a single attestation will be downloaded and /// returned to the beacon node. This attestation will have a signature from each of the @@ -161,19 +177,42 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationService<S, let interval_fut = async move { loop { - if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { - sleep(duration_to_next_slot + unaggregated_attestation_due).await; - - if let Err(e) = self.spawn_attestation_tasks() { - crit!(error = e, "Failed to spawn attestation tasks") - } else { - trace!("Spawned attestation tasks"); - } - } else { + let Some(duration) = self.slot_clock.duration_to_next_slot() else { error!("Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; continue; + }; + + let beacon_node_data = if self.head_monitor_rx.is_some() { + tokio::select! { + _ = sleep(duration + unaggregated_attestation_due) => None, + event = self.poll_for_head_events() => + event.map(|event| (event.beacon_node_index, event.beacon_block_root)), + } + } else { + sleep(duration + unaggregated_attestation_due).await; + None + }; + + let Some(current_slot) = self.slot_clock.now() else { + error!("Failed to read slot clock after trigger"); + continue; + }; + + let mut last_slot = self.latest_attested_slot.lock().await; + + if current_slot <= *last_slot { + debug!(%current_slot, "Attestation already initiated for the slot"); + continue; + } + + match self.spawn_attestation_tasks(beacon_node_data).await { + Ok(_) => { + *last_slot = current_slot; + } + Err(e) => { + crit!(error = e, "Failed to spawn attestation tasks") + } } } }; @@ -182,15 +221,38 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationService<S, Ok(()) } + async fn poll_for_head_events(&self) -> Option<HeadEvent> { + let Some(receiver) = &self.head_monitor_rx else { + return None; + }; + let mut receiver = receiver.lock().await; + loop { + match receiver.recv().await { + Some(head_event) => { + // Only return head events for the current slot - this ensures the + // block for this slot has been produced before triggering attestation + let current_slot = self.slot_clock.now()?; + if head_event.slot == current_slot { + return Some(head_event); + } + // Head event is for a previous slot, keep waiting + } + None => { + warn!("Head monitor channel closed unexpectedly"); + return None; + } + } + } + } + /// Spawn only one new task for attestation post-Electra /// For each required aggregates, spawn a new task that downloads, signs and uploads the /// aggregates to the beacon node. - fn spawn_attestation_tasks(&self) -> Result<(), String> { + async fn spawn_attestation_tasks( + &self, + beacon_node_data: Option<(usize, Hash256)>, + ) -> Result<(), String> { let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; - let duration_to_next_slot = self - .slot_clock - .duration_to_next_slot() - .ok_or("Unable to determine duration to next slot")?; // Create and publish an `Attestation` for all validators only once // as the committee_index is not included in AttestationData post-Electra @@ -201,29 +263,89 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationService<S, return Ok(()); } + debug!( + %slot, + from_head_monitor = beacon_node_data.is_some(), + "Starting attestation production" + ); + let attestation_service = self.clone(); - let attestation_data_handle = self + let mut attestation_data_from_head_event = None; + + if let Some((beacon_node_index, expected_block_root)) = beacon_node_data { + match attestation_service + .beacon_nodes + .run_on_candidate_index(beacon_node_index, |beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_GET], + ); + let data = beacon_node + .get_validator_attestation_data(slot, 0) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e))? + .data; + + if data.beacon_block_root != expected_block_root { + return Err(format!( + "Attestation block root mismatch: expected {:?}, got {:?}", + expected_block_root, data.beacon_block_root + )); + } + Ok(data) + }) + .await + { + Ok(data) => attestation_data_from_head_event = Some(data), + Err(error) => { + warn!(?error, "Failed to attest based on head event"); + } + } + } + + // If the beacon node that sent us the head failed to attest, wait until the attestation + // deadline then try all BNs. + let attestation_data = if let Some(attestation_data) = attestation_data_from_head_event { + attestation_data + } else { + let duration_to_deadline = self + .slot_clock + .duration_to_slot(slot + 1) + .and_then(|duration_to_next_slot| { + duration_to_next_slot + .checked_add(self.chain_spec.get_unaggregated_attestation_due()) + }) + .map(|next_slot_deadline| { + next_slot_deadline.saturating_sub(self.chain_spec.get_slot_duration()) + }) + .unwrap_or(Duration::from_secs(0)); + sleep(duration_to_deadline).await; + + attestation_service + .beacon_nodes + .first_success(|beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_GET], + ); + let data = beacon_node + .get_validator_attestation_data(slot, 0) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e))? + .data; + Ok::<AttestationData, String>(data) + }) + .await + .map_err(|e| e.to_string())? + }; + + // Sign and publish attestations. + let publication_handle = self .inner .executor .spawn_handle( async move { - let attestation_data = attestation_service - .beacon_nodes - .first_success(|beacon_node| async move { - let _timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::ATTESTATIONS_HTTP_GET], - ); - beacon_node - .get_validator_attestation_data(slot, 0) - .await - .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) - .map(|result| result.data) - }) - .await - .map_err(|e| e.to_string())?; - attestation_service .sign_and_publish_attestations( slot, @@ -241,12 +363,16 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationService<S, })?; Ok::<AttestationData, String>(attestation_data) }, - "unaggregated attestation production", + "unaggregated attestation publication", ) .ok_or("Failed to spawn attestation data task")?; // If a validator needs to publish an aggregate attestation, they must do so at 2/3 // through the slot. This delay triggers at this time + let duration_to_next_slot = self + .slot_clock + .duration_to_slot(slot + 1) + .ok_or("Unable to determine duration to next slot")?; let aggregate_production_instant = Instant::now() + duration_to_next_slot .checked_add(self.chain_spec.get_aggregate_attestation_due()) @@ -270,7 +396,7 @@ impl<S: ValidatorStore + 'static, T: SlotClock + 'static> AttestationService<S, self.inner.executor.spawn( async move { // Log an error if the handle fails and return, skipping aggregates - let attestation_data = match attestation_data_handle.await { + let attestation_data = match publication_handle.await { Ok(Some(Ok(data))) => data, Ok(Some(Err(err))) => { error!(?err, "Attestation production failed");