diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 42293d38a7..bcade948d7 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -13,87 +13,154 @@ concurrency: cancel-in-progress: true jobs: - run-local-testnet: - strategy: - matrix: - os: - - ubuntu-22.04 - - macos-12 - runs-on: ${{ matrix.os }} - env: - # Enable portable to prevent issues with caching `blst` for the wrong CPU type - FEATURES: portable,jemalloc + dockerfile-ubuntu: + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install geth (ubuntu) - if: matrix.os == 'ubuntu-22.04' + - name: Build Docker image run: | - sudo add-apt-repository -y ppa:ethereum/ethereum - sudo apt-get update - sudo apt-get install ethereum - - name: Install geth (mac) - if: matrix.os == 'macos-12' - run: | - brew tap ethereum/ethereum - brew install ethereum - - name: Install GNU sed & GNU grep - if: matrix.os == 'macos-12' - run: | - brew install gnu-sed grep - echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH - echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH - # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v4 - id: cache-cargo - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + docker build --build-arg FEATURES=portable -t lighthouse:local . + docker save lighthouse:local -o lighthouse-docker.tar - - name: Install lighthouse - run: make && make install-lcli + - name: Upload Docker image artifact + uses: actions/upload-artifact@v4 + with: + name: lighthouse-docker + path: lighthouse-docker.tar + retention-days: 3 + + run-local-testnet: + runs-on: ubuntu-22.04 + needs: dockerfile-ubuntu + steps: + - uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:rmescandon/yq + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli yq + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 + with: + name: lighthouse-docker + path: . + + - name: Load Docker image + run: docker load -i lighthouse-docker.tar - name: Start local testnet - run: ./start_local_testnet.sh genesis.json && sleep 60 + run: ./start_local_testnet.sh -e local -c -b false && sleep 60 working-directory: scripts/local_testnet - - name: Print logs - run: ./dump_logs.sh - working-directory: scripts/local_testnet - - - name: Stop local testnet - run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet - - - name: Clean-up testnet - run: ./clean.sh + - name: Stop local testnet and dump logs + run: ./stop_local_testnet.sh local working-directory: scripts/local_testnet - name: Start local testnet with blinded block production - run: ./start_local_testnet.sh -p genesis.json && sleep 60 + run: ./start_local_testnet.sh -e local-blinded -c -p -b false && sleep 60 working-directory: scripts/local_testnet - - name: Print logs for blinded block testnet - run: ./dump_logs.sh + - name: Stop local testnet and dump logs + run: ./stop_local_testnet.sh local-blinded working-directory: scripts/local_testnet - - name: Stop local testnet with blinded block production - run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet + - name: Upload logs artifact + uses: actions/upload-artifact@v4 + with: + name: logs-local-testnet + path: | + scripts/local_testnet/logs + retention-days: 3 + + doppelganger-protection-success-test: + needs: dockerfile-ubuntu + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:rmescandon/yq + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli yq + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 + with: + name: lighthouse-docker + path: . + + - name: Load Docker image + run: docker load -i lighthouse-docker.tar + + - name: Run the doppelganger protection success test script + run: | + ./doppelganger_protection.sh success + working-directory: scripts/tests + + - name: Upload logs artifact + uses: actions/upload-artifact@v4 + with: + name: logs-doppelganger-protection-success + path: | + scripts/local_testnet/logs + retention-days: 3 + + doppelganger-protection-failure-test: + needs: dockerfile-ubuntu + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:rmescandon/yq + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli yq + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 + with: + name: lighthouse-docker + path: . + + - name: Load Docker image + run: docker load -i lighthouse-docker.tar + + - name: Run the doppelganger protection failure test script + run: | + ./doppelganger_protection.sh failure + working-directory: scripts/tests + + - name: Upload logs artifact + uses: actions/upload-artifact@v4 + with: + name: logs-doppelganger-protection-failure + path: | + scripts/local_testnet/logs + retention-days: 3 + # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. local-testnet-success: name: local-testnet-success runs-on: ubuntu-latest - needs: ["run-local-testnet"] + needs: [ + 'dockerfile-ubuntu', + 'run-local-testnet', + 'doppelganger-protection-success-test', + 'doppelganger-protection-failure-test', + ] steps: - uses: actions/checkout@v4 - name: Check that success job is dependent on all others diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3d23b4110e..75063ee2e0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,38 +31,22 @@ jobs: strategy: matrix: arch: [aarch64-unknown-linux-gnu, - aarch64-unknown-linux-gnu-portable, x86_64-unknown-linux-gnu, - x86_64-unknown-linux-gnu-portable, x86_64-apple-darwin, - x86_64-apple-darwin-portable, - x86_64-windows, - x86_64-windows-portable] + x86_64-windows] include: - arch: aarch64-unknown-linux-gnu runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - - arch: aarch64-unknown-linux-gnu-portable - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} - profile: maxperf - arch: x86_64-unknown-linux-gnu runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - - arch: x86_64-unknown-linux-gnu-portable - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} - profile: maxperf - arch: x86_64-apple-darwin - runner: macos-latest - profile: maxperf - - arch: x86_64-apple-darwin-portable - runner: macos-latest + runner: macos-13 profile: maxperf - arch: x86_64-windows runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} profile: maxperf - - arch: x86_64-windows-portable - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} - profile: maxperf runs-on: ${{ matrix.runner }} needs: extract-version @@ -90,53 +74,29 @@ jobs: # Builds # ============================== - - name: Build Lighthouse for aarch64-unknown-linux-gnu-portable - if: matrix.arch == 'aarch64-unknown-linux-gnu-portable' - run: | - cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64-portable - - name: Build Lighthouse for aarch64-unknown-linux-gnu if: matrix.arch == 'aarch64-unknown-linux-gnu' run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64 - - - name: Build Lighthouse for x86_64-unknown-linux-gnu-portable - if: matrix.arch == 'x86_64-unknown-linux-gnu-portable' - run: | - cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64-portable + env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64-portable - name: Build Lighthouse for x86_64-unknown-linux-gnu if: matrix.arch == 'x86_64-unknown-linux-gnu' run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64 + env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64-portable - name: Move cross-compiled binary - if: startsWith(matrix.arch, 'aarch64') - run: mv target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse + if: contains(matrix.arch, 'unknown-linux-gnu') + run: mv target/${{ matrix.arch }}/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse - - name: Move cross-compiled binary - if: startsWith(matrix.arch, 'x86_64-unknown-linux-gnu') - run: mv target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse - - - name: Build Lighthouse for x86_64-apple-darwin portable - if: matrix.arch == 'x86_64-apple-darwin-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - - - name: Build Lighthouse for x86_64-apple-darwin modern + - name: Build Lighthouse for x86_64-apple-darwin if: matrix.arch == 'x86_64-apple-darwin' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} - - - name: Build Lighthouse for Windows portable - if: matrix.arch == 'x86_64-windows-portable' run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - - name: Build Lighthouse for Windows modern + - name: Build Lighthouse for Windows if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true @@ -151,6 +111,11 @@ jobs: cd artifacts tar -czf lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz lighthouse echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz + for ext in "tar.gz" "tar.gz.asc";\ + do for f in *.$ext;\ + do cp $f "../${f%.$ext}-portable.$ext";\ + done;\ + done mv *tar.gz* .. - name: Configure GPG and create artifacts Windows @@ -179,6 +144,14 @@ jobs: path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz compression-level: 0 + - name: Upload artifact (copy) + if: startsWith(matrix.arch, 'x86_64-windows') != true + uses: actions/upload-artifact@v4 + with: + name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz + path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz + compression-level: 0 + - name: Upload signature uses: actions/upload-artifact@v4 with: @@ -186,6 +159,14 @@ jobs: path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc compression-level: 0 + - name: Upload signature (copy) + if: startsWith(matrix.arch, 'x86_64-windows') != true + uses: actions/upload-artifact@v4 + with: + name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz.asc + path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz.asc + compression-level: 0 + draft-release: name: Draft Release needs: [build, extract-version] @@ -229,9 +210,9 @@ jobs: ## Testing Checklist (DELETE ME) - - [ ] Run on synced Prater Sigma Prime nodes. + - [ ] Run on synced Holesky Sigma Prime nodes. - [ ] Run on synced Canary (mainnet) Sigma Prime nodes. - - [ ] Resync a Prater node. + - [ ] Resync a Holesky node. - [ ] Resync a mainnet node. ## Release Checklist (DELETE ME) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 413dd2b95d..3049c6374d 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -29,6 +29,31 @@ env: # Enable portable to prevent issues with caching `blst` for the wrong CPU type TEST_FEATURES: portable jobs: + check-labels: + runs-on: ubuntu-latest + name: Check for 'skip-ci' label + outputs: + skip_ci: ${{ steps.set-output.outputs.SKIP_CI }} + steps: + - name: check for skip-ci label + id: set-output + env: + LABELS: ${{ toJson(github.event.pull_request.labels) }} + run: | + SKIP_CI="false" + if [ -z "${LABELS}" ]; then + LABELS="none"; + else + LABELS=$(echo ${LABELS} | jq -r '.[].name') + fi + for label in ${LABELS}; do + if [ "$label" = "skip-ci" ]; then + SKIP_CI="true" + break + fi + done + echo "::set-output name=skip_ci::$SKIP_CI" + target-branch-check: name: target-branch-check runs-on: ubuntu-latest @@ -38,6 +63,8 @@ jobs: run: test ${{ github.base_ref }} != "stable" release-tests-ubuntu: name: release-tests-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: @@ -63,6 +90,8 @@ jobs: run: sccache --show-stats release-tests-windows: name: release-tests-windows + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} steps: - uses: actions/checkout@v4 @@ -97,6 +126,8 @@ jobs: run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} env: @@ -117,6 +148,8 @@ jobs: run: sccache --show-stats op-pool-tests: name: op-pool-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -132,6 +165,8 @@ jobs: run: make test-op-pool network-tests: name: network-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -147,6 +182,8 @@ jobs: run: make test-network slasher-tests: name: slasher-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -162,6 +199,8 @@ jobs: run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} env: @@ -186,6 +225,8 @@ jobs: run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -198,6 +239,8 @@ jobs: run: make run-state-transition-tests ef-tests-ubuntu: name: ef-tests-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} env: @@ -216,17 +259,10 @@ jobs: - name: Show cache stats if: env.SELF_HOSTED_RUNNERS == 'true' run: sccache --show-stats - dockerfile-ubuntu: - name: dockerfile-ubuntu - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Build the root Dockerfile - run: docker build --build-arg FEATURES=portable -t lighthouse:local . - - name: Test the built image - run: docker run -t lighthouse:local lighthouse --version basic-simulator-ubuntu: name: basic-simulator-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -239,6 +275,8 @@ jobs: run: cargo run --release --bin simulator basic-sim fallback-simulator-ubuntu: name: fallback-simulator-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -249,44 +287,10 @@ jobs: cache-target: release - name: Run a beacon chain sim which tests VC fallback behaviour run: cargo run --release --bin simulator fallback-sim - doppelganger-protection-test: - name: doppelganger-protection-test - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} - env: - # Enable portable to prevent issues with caching `blst` for the wrong CPU type - FEATURES: jemalloc,portable - steps: - - uses: actions/checkout@v4 - - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' - uses: moonrepo/setup-rust@v1 - with: - channel: stable - cache-target: release - - name: Install geth - if: env.SELF_HOSTED_RUNNERS == 'false' - run: | - sudo add-apt-repository -y ppa:ethereum/ethereum - sudo apt-get update - sudo apt-get install ethereum - - name: Install lighthouse - run: | - make - - name: Install lcli - # TODO: uncomment after the version of lcli in https://github.com/sigp/lighthouse/pull/5137 - # is installed on the runners - # if: env.SELF_HOSTED_RUNNERS == 'false' - run: make install-lcli - - name: Run the doppelganger protection failure test script - run: | - cd scripts/tests - ./doppelganger_protection.sh failure genesis.json - - name: Run the doppelganger protection success test script - run: | - cd scripts/tests - ./doppelganger_protection.sh success genesis.json execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -332,6 +336,8 @@ jobs: run: make audit-CI - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose run: CARGO_HOME=$(readlink -f $HOME) make vendor + - name: Markdown-linter + run: make mdlint check-msrv: name: check-msrv runs-on: ubuntu-latest @@ -346,6 +352,8 @@ jobs: run: cargo check --workspace cargo-udeps: name: cargo-udeps + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -368,6 +376,8 @@ jobs: RUSTFLAGS: "" compile-with-beta-compiler: name: compile-with-beta-compiler + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -379,6 +389,8 @@ jobs: run: make cli-check: name: cli-check + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -393,8 +405,10 @@ jobs: # a PR is safe to merge. New jobs should be added here. test-suite-success: name: test-suite-success + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest needs: [ + 'check-labels', 'target-branch-check', 'release-tests-ubuntu', 'release-tests-windows', @@ -405,10 +419,8 @@ jobs: 'debug-tests-ubuntu', 'state-transition-vectors-ubuntu', 'ef-tests-ubuntu', - 'dockerfile-ubuntu', 'basic-simulator-ubuntu', 'fallback-simulator-ubuntu', - 'doppelganger-protection-test', 'execution-engine-integration-ubuntu', 'check-code', 'check-msrv', diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a408fcdd52..3c53558a10 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,12 +1,14 @@ # Contributors Guide + [![GitPOAP badge](https://public-api.gitpoap.io/v1/repo/sigp/lighthouse/badge)](https://www.gitpoap.io/gh/sigp/lighthouse) -Lighthouse is an open-source Ethereum 2.0 client. We're community driven and +Lighthouse is an open-source Ethereum consensus client. We're community driven and welcome all contribution. We aim to provide a constructive, respectful and fun environment for collaboration. -We are active contributors to the [Ethereum 2.0 specification](https://github.com/ethereum/eth2.0-specs) and attend all [Eth -2.0 implementers calls](https://github.com/ethereum/eth2.0-pm). +We are active contributors to +the [Ethereum Proof-of-Stake Consensus specification](https://github.com/ethereum/consensus-specs) and attend +all [Ethereum implementers calls](https://github.com/ethereum/pm/). This guide is geared towards beginners. If you're an open-source veteran feel free to just skim this document and get straight into crushing issues. @@ -41,7 +43,7 @@ We recommend the following work-flow for contributors: 1. **Find an issue** to work on, either because it's interesting or suitable to your skill-set. Use comments to communicate your intentions and ask -questions. + questions. 2. **Work in a feature branch** of your personal fork (github.com/YOUR_NAME/lighthouse) of the main repository (github.com/sigp/lighthouse). @@ -49,13 +51,13 @@ questions. `unstable` as the base branch to merge your changes into the main repository. 4. Wait for the repository maintainers to **review your changes** to ensure the issue is addressed satisfactorily. Optionally, mention your PR on -[discord](https://discord.gg/cyAszAh). + [discord](https://discord.gg/cyAszAh). 5. If the issue is addressed the repository maintainers will **merge your pull-request** and you'll be an official contributor! Generally, you find an issue you'd like to work on and announce your intentions to start work in a comment on the issue. Then, do your work on a separate -branch (a "feature branch") in your own fork of the main repository. Once +branch (a "feature branch") in your own fork of the main repository. Once you're happy and you think the issue has been addressed, create a pull request into the main repository. @@ -66,18 +68,20 @@ steps: 1. [Create a fork](https://help.github.com/articles/fork-a-repo/#fork-an-example-repository) -and [clone -it](https://help.github.com/articles/fork-a-repo/#step-2-create-a-local-clone-of-your-fork) -to your local machine. + and [clone + it](https://help.github.com/articles/fork-a-repo/#step-2-create-a-local-clone-of-your-fork) + to your local machine. 2. [Add an _"upstream"_ branch](https://help.github.com/articles/fork-a-repo/#step-3-configure-git-to-sync-your-fork-with-the-original-spoon-knife-repository) -that tracks github.com/sigp/lighthouse using `$ git remote add upstream -https://github.com/sigp/lighthouse.git` (pro-tip: [use SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) instead of HTTPS). + that tracks github.com/sigp/lighthouse using `$ git remote add upstream + https://github.com/sigp/lighthouse.git` ( + pro-tip: [use SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) instead of HTTPS). 3. Create a new feature branch with `$ git checkout -b your_feature_name`. The name of your branch isn't critical but it should be short and instructive. -E.g., if you're fixing a bug with serialization, you could name your branch -`fix_serialization_bug`. -4. Make sure you sign your commits. See [relevant doc](https://help.github.com/en/github/authenticating-to-github/about-commit-signature-verification). + E.g., if you're fixing a bug with serialization, you could name your branch + `fix_serialization_bug`. +4. Make sure you sign your commits. + See [relevant doc](https://help.github.com/en/github/authenticating-to-github/about-commit-signature-verification). 5. Commit your changes and push them to your fork with `$ git push origin your_feature_name`. 6. Go to your fork on github.com and use the web interface to create a pull @@ -92,22 +96,28 @@ by Rob Allen that provides much more detail on each of these steps, if you're having trouble. As always, jump on [discord](https://discord.gg/cyAszAh) if you get stuck. +Additionally, +the ["Contributing to Lighthouse" section](https://lighthouse-book.sigmaprime.io/contributing.html#contributing-to-lighthouse) +of the Lighthouse Book provides more details on the setup. ## FAQs ### I don't think I have anything to add There's lots to be done and there's all sorts of tasks. You can do anything -from correcting typos through to writing core consensus code. If you reach out, +from enhancing documentation through to writing core consensus code. If you reach out, we'll include you. +Please note, to maintain project quality, we may not accept PRs for small typos or changes +with minimal impact. + ### I'm not sure my Rust is good enough We're open to developers of all levels. If you create a PR and your code doesn't meet our standards, we'll help you fix it and we'll share the reasoning with you. Contributing to open-source is a great way to learn. -### I'm not sure I know enough about Ethereum 2.0 +### I'm not sure I know enough about Ethereum No problems, there's plenty of tasks that don't require extensive Ethereum knowledge. You can learn about Ethereum as you go. diff --git a/Cargo.lock b/Cargo.lock index eecc688e5f..8fefcb6a46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,10 +30,7 @@ dependencies = [ "filesystem", "safe_arith", "sensitive_url", - "serde", - "serde_json", "slashing_protection", - "slog", "slot_clock", "tempfile", "tokio", @@ -284,12 +281,57 @@ dependencies = [ ] [[package]] -name = "ansi_term" -version = "0.12.1" +name = "anes" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstream" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ - "winapi", + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" + +[[package]] +name = "anstyle-parse" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", ] [[package]] @@ -533,7 +575,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.33", + "rustix 0.38.34", "slab", "tracing", "windows-sys 0.52.0", @@ -739,9 +781,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -781,7 +823,7 @@ dependencies = [ "merkle_proof", "oneshot_broadcast", "operation_pool", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "proto_array", "rand", "rayon", @@ -822,19 +864,15 @@ dependencies = [ "dirs", "environment", "eth2_config", - "eth2_network_config", "execution_layer", - "futures", "genesis", "hex", "http_api", "hyper 1.3.1", "lighthouse_network", - "lighthouse_version", "monitoring_api", "node_test_rig", "sensitive_url", - "serde", "serde_json", "slasher", "slog", @@ -849,18 +887,15 @@ dependencies = [ name = "beacon_processor" version = "0.1.0" dependencies = [ - "derivative", - "ethereum_ssz", "fnv", "futures", - "hex", "itertools", "lazy_static", "lighthouse_metrics", "lighthouse_network", "logging", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "serde", "slog", "slot_clock", @@ -1037,12 +1072,9 @@ dependencies = [ "log", "logging", "serde", - "serde_json", - "serde_yaml", "slog", "slog-async", "slog-scope", - "slog-stdlog", "slog-term", "tokio", "types", @@ -1072,7 +1104,6 @@ dependencies = [ "reqwest", "sensitive_url", "serde", - "serde_json", ] [[package]] @@ -1191,9 +1222,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" dependencies = [ "jobserver", "libc", @@ -1257,6 +1288,33 @@ dependencies = [ "windows-targets 0.52.5", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.3.0" @@ -1290,19 +1348,32 @@ dependencies = [ [[package]] name = "clap" -version = "2.34.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", + "clap_builder", ] +[[package]] +name = "clap_builder" +version = "4.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim 0.11.0", + "terminal_size", +] + +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + [[package]] name = "clap_utils" version = "0.1.0" @@ -1341,12 +1412,9 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lighthouse_network", - "logging", "monitoring_api", "network", - "num_cpus", "operation_pool", - "parking_lot 0.12.1", "sensitive_url", "serde", "serde_yaml", @@ -1360,7 +1428,6 @@ dependencies = [ "time", "timer", "tokio", - "tree_hash", "types", ] @@ -1373,6 +1440,12 @@ dependencies = [ "cc", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "compare_fields" version = "0.2.0" @@ -1391,9 +1464,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -1474,24 +1547,24 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ - "atty", + "anes", "cast", + "ciborium", "clap", "criterion-plot", - "csv", + "is-terminal", "itertools", - "lazy_static", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", "regex", "serde", - "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -1500,9 +1573,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", "itertools", @@ -1603,27 +1676,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "csv" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" -dependencies = [ - "memchr", -] - [[package]] name = "ctr" version = "0.7.0" @@ -1752,15 +1804,15 @@ checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1768,9 +1820,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1786,12 +1838,9 @@ dependencies = [ "clap_utils", "environment", "hex", - "logging", "slog", - "sloggers", "store", "strum", - "tempfile", "types", ] @@ -2132,13 +2181,11 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bls", - "cached_tree_hash", "compare_fields", "compare_fields_derive", "derivative", "eth2_network_config", "ethereum-types 0.14.1", - "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", @@ -2154,7 +2201,6 @@ dependencies = [ "serde_yaml", "snap", "state_processing", - "store", "swap_or_not_shuffle", "tree_hash", "tree_hash_derive", @@ -2240,7 +2286,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "syn 2.0.60", @@ -2336,15 +2382,12 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "hex", "lazy_static", "lighthouse_metrics", "merkle_proof", - "parking_lot 0.12.1", - "reqwest", + "parking_lot 0.12.2", "sensitive_url", "serde", - "serde_json", "serde_yaml", "slog", "sloggers", @@ -2387,7 +2430,6 @@ dependencies = [ "libsecp256k1", "lighthouse_network", "mediatype", - "mime", "pretty_reqwest_error", "procfs", "proto_array", @@ -2401,7 +2443,6 @@ dependencies = [ "ssz_types", "store", "tokio", - "tree_hash", "types", ] @@ -2473,7 +2514,6 @@ dependencies = [ "pretty_reqwest_error", "reqwest", "sensitive_url", - "serde_json", "serde_yaml", "sha2 0.9.9", "slog", @@ -2803,7 +2843,6 @@ version = "0.1.0" dependencies = [ "async-channel", "deposit_contract", - "environment", "ethers-core", "ethers-providers", "execution_layer", @@ -2828,7 +2867,6 @@ dependencies = [ "alloy-consensus", "alloy-rlp", "arc-swap", - "async-trait", "builder_client", "bytes", "environment", @@ -2838,7 +2876,6 @@ dependencies = [ "ethereum_ssz", "ethers-core", "fork_choice", - "futures", "hash-db", "hash256-std-hasher", "hex", @@ -2849,7 +2886,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pretty_reqwest_error", "rand", "reqwest", @@ -2898,9 +2935,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -2913,6 +2950,16 @@ dependencies = [ "bytes", ] +[[package]] +name = "fdlimit" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" +dependencies = [ + "libc", + "thiserror", +] + [[package]] name = "ff" version = "0.12.1" @@ -2941,9 +2988,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" [[package]] name = "field-offset" @@ -2996,9 +3043,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "libz-sys", @@ -3159,7 +3206,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.11", + "rustls 0.21.12", ] [[package]] @@ -3331,7 +3378,6 @@ dependencies = [ "futures-timer", "getrandom", "hex_fmt", - "instant", "libp2p", "prometheus-client", "quick-protobuf", @@ -3341,9 +3387,9 @@ dependencies = [ "regex", "serde", "sha2 0.10.8", - "smallvec", "tracing", "void", + "web-time", ] [[package]] @@ -3389,9 +3435,13 @@ dependencies = [ [[package]] name = "half" -version = "1.8.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "hash-db" @@ -3416,18 +3466,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3448,7 +3489,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -3481,6 +3522,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -3534,7 +3581,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "socket2 0.5.6", + "socket2 0.5.7", "thiserror", "tinyvec", "tokio", @@ -3554,7 +3601,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "resolv-conf", "smallvec", @@ -3705,7 +3752,7 @@ dependencies = [ "lru", "network", "operation_pool", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "proto_array", "safe_arith", "sensitive_url", @@ -3782,7 +3829,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3817,7 +3864,7 @@ dependencies = [ "futures-util", "http 0.2.12", "hyper 0.14.28", - "rustls 0.21.11", + "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", ] @@ -3847,7 +3894,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", ] @@ -4027,7 +4074,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -4086,7 +4133,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring 1.1.0", "windows-sys 0.48.0", "winreg", @@ -4284,25 +4331,20 @@ dependencies = [ "clap", "clap_utils", "deposit_contract", - "directory", "env_logger 0.9.3", "environment", - "eth1_test_rig", "eth2", "eth2_network_config", "eth2_wallet", "ethereum_hashing", "ethereum_ssz", "execution_layer", - "genesis", "hex", - "int_to_bytes", "lighthouse_network", "lighthouse_version", "log", "malloc_utils", "rayon", - "sensitive_url", "serde", "serde_json", "serde_yaml", @@ -4339,15 +4381,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.153" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libflate" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7d5654ae1795afc7ff76f4365c2c8791b0feb18e8996a96adad8ffd7c3b2bf" +checksum = "45d9dfdc14ea4ef0900c1cddbc8dcd553fbaacd8a4a282cf4018ae9dd04fb21e" dependencies = [ "adler32", "core2", @@ -4358,12 +4400,12 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5f52fb8c451576ec6b79d3f4deb327398bc05bbdbd99021a6e77a4c855d524" +checksum = "e6e0d73b369f386f1c44abd9c570d5318f55ccde816ff4b562fa452e5182863d" dependencies = [ "core2", - "hashbrown 0.13.2", + "hashbrown 0.14.5", "rle-decode-fast", ] @@ -4394,7 +4436,7 @@ dependencies = [ "indexmap 1.9.3", "libc", "mdbx-sys", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "thiserror", ] @@ -4471,7 +4513,7 @@ dependencies = [ "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "quick-protobuf", "rand", @@ -4494,16 +4536,16 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "smallvec", "tracing", ] [[package]] name = "libp2p-identify" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20499a945d2f0221fdc6269b3848892c0f370d2ee3e19c7f65a29d8f860f6126" +checksum = "b5d635ebea5ca0c3c3e77d414ae9b67eccf2a822be06091b9c1a0d13029a1e2f" dependencies = [ "asynchronous-codec 0.7.0", "either", @@ -4560,7 +4602,7 @@ dependencies = [ "libp2p-swarm", "rand", "smallvec", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tracing", "void", @@ -4594,7 +4636,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "smallvec", "tracing", @@ -4656,12 +4698,12 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-tls", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "quinn", "rand", "ring 0.16.20", - "rustls 0.21.11", - "socket2 0.5.6", + "rustls 0.21.12", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -4669,9 +4711,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" +checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" dependencies = [ "either", "fnv", @@ -4681,6 +4723,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", + "lru", "multistream-select", "once_cell", "rand", @@ -4692,11 +4735,11 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.34.1" +version = "0.34.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b644268b4acfdaa6a6100b31226ee7a36d96ab4c43287d113bfd2308607d8b6f" +checksum = "5daceb9dd908417b6dfcfe8e94098bc4aac54500c282e78120b885dadc09b999" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.60", @@ -4714,7 +4757,7 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tracing", ] @@ -4731,7 +4774,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.21.11", + "rustls 0.21.12", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -4740,9 +4783,9 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49cc89949bf0e06869297cd4fe2c132358c23fe93e76ad43950453df4da3d35" +checksum = "cccf04b0e3ff3de52d07d5fd6c3b061d0e7f908ffc683c32d9638caedce86fc8" dependencies = [ "futures", "futures-timer", @@ -4766,7 +4809,7 @@ dependencies = [ "thiserror", "tracing", "yamux 0.12.1", - "yamux 0.13.1", + "yamux 0.13.2", ] [[package]] @@ -4882,10 +4925,8 @@ dependencies = [ "slasher", "slashing_protection", "slog", - "sloggers", "task_executor", "tempfile", - "tracing-subscriber", "types", "unused_port", "validator_client", @@ -4897,7 +4938,6 @@ dependencies = [ name = "lighthouse_metrics" version = "0.2.0" dependencies = [ - "lazy_static", "prometheus", ] @@ -4906,8 +4946,6 @@ name = "lighthouse_network" version = "0.2.0" dependencies = [ "async-channel", - "base64 0.21.7", - "byteorder", "bytes", "delay_map", "directory", @@ -4919,12 +4957,8 @@ dependencies = [ "ethereum_ssz_derive", "fnv", "futures", - "futures-ticker", - "getrandom", "gossipsub", "hex", - "hex_fmt", - "instant", "lazy_static", "libp2p", "libp2p-mplex", @@ -4932,7 +4966,7 @@ dependencies = [ "lighthouse_version", "lru", "lru_cache", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "prometheus-client", "quickcheck", "quickcheck_macros", @@ -4954,9 +4988,6 @@ dependencies = [ "tokio", "tokio-io-timeout", "tokio-util 0.6.10", - "tracing", - "tree_hash", - "tree_hash_derive", "types", "unsigned-varint 0.6.0", "unused_port", @@ -5013,9 +5044,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -5042,11 +5073,10 @@ dependencies = [ "chrono", "lazy_static", "lighthouse_metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "serde", "serde_json", "slog", - "slog-async", "slog-term", "sloggers", "take_mut", @@ -5064,7 +5094,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -5102,7 +5132,7 @@ dependencies = [ "lazy_static", "libc", "lighthouse_metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -5249,7 +5279,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rayon", "serde", "smallvec", @@ -5480,10 +5510,8 @@ dependencies = [ "beacon_processor", "delay_map", "derivative", - "environment", "error-chain", "eth2", - "ethereum-types 0.14.1", "ethereum_ssz", "execution_layer", "fnv", @@ -5497,12 +5525,10 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", - "lru", "lru_cache", "matches", - "num_cpus", "operation_pool", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "rlp", "slog", @@ -5517,7 +5543,6 @@ dependencies = [ "task_executor", "tokio", "tokio-stream", - "tokio-util 0.6.10", "types", ] @@ -5698,7 +5723,7 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" name = "oneshot_broadcast" version = "0.1.0" dependencies = [ - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -5805,7 +5830,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "maplit", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "rayon", "serde", @@ -5904,12 +5929,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -5928,15 +5953,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -5998,7 +6023,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "serde", ] @@ -6019,9 +6044,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -6156,15 +6181,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" +checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi 0.3.9", "pin-project-lite", - "rustix 0.38.33", + "rustix 0.38.34", "tracing", "windows-sys 0.52.0", ] @@ -6351,7 +6376,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "protobuf", "thiserror", ] @@ -6364,7 +6389,7 @@ checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "prometheus-client-derive-encode", ] @@ -6512,7 +6537,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.11", + "rustls 0.21.12", "thiserror", "tokio", "tracing", @@ -6528,7 +6553,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.21.11", + "rustls 0.21.12", "slab", "thiserror", "tinyvec", @@ -6543,7 +6568,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.6", + "socket2 0.5.7", "tracing", "windows-sys 0.48.0", ] @@ -6564,7 +6589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "scheduled-thread-pool", ] @@ -6679,6 +6704,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "redox_users" version = "0.4.5" @@ -6759,7 +6793,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.11", + "rustls 0.21.12", "rustls-pemfile 1.0.4", "serde", "serde_json", @@ -7007,9 +7041,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.33" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3cc72858054fcff6d7dea32df2aeaee6a7c24227366d7ea429aada2f26b16ad" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -7020,9 +7054,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -7039,7 +7073,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.3", "subtle", "zeroize", ] @@ -7059,15 +7093,15 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" [[package]] name = "rustls-webpki" @@ -7081,9 +7115,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7186,7 +7220,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -7317,9 +7351,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.198" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" dependencies = [ "serde_derive", ] @@ -7334,21 +7368,11 @@ dependencies = [ "serde_urlencoded", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" dependencies = [ "proc-macro2", "quote", @@ -7572,17 +7596,14 @@ version = "0.2.0" dependencies = [ "clap", "env_logger 0.9.3", - "eth1", "eth2_network_config", - "ethereum-types 0.14.1", "execution_layer", "futures", "node_test_rig", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rayon", "sensitive_url", "serde_json", - "ssz_types", "tokio", "types", ] @@ -7620,13 +7641,12 @@ dependencies = [ "logging", "lru", "maplit", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "rayon", "safe_arith", "serde", "slog", - "sloggers", "strum", "tempfile", "tree_hash", @@ -7777,7 +7797,7 @@ version = "0.2.0" dependencies = [ "lazy_static", "lighthouse_metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "types", ] @@ -7822,9 +7842,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -7936,7 +7956,6 @@ name = "store" version = "0.2.0" dependencies = [ "beacon_chain", - "bls", "db-key", "directory", "ethereum_ssz", @@ -7945,14 +7964,11 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", - "logging", "lru", - "parking_lot 0.12.1", - "safe_arith", + "parking_lot 0.12.2", "serde", "slog", "sloggers", - "smallvec", "state_processing", "strum", "tempfile", @@ -7970,18 +7986,18 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" + [[package]] name = "strum" version = "0.24.1" @@ -7997,7 +8013,7 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", @@ -8120,9 +8136,8 @@ name = "system_health" version = "0.1.0" dependencies = [ "lighthouse_network", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "serde", - "serde_json", "sysinfo", "types", ] @@ -8174,7 +8189,7 @@ checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", - "rustix 0.38.33", + "rustix 0.38.34", "windows-sys 0.52.0", ] @@ -8198,6 +8213,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix 0.38.34", + "windows-sys 0.48.0", +] + [[package]] name = "test-test_logger" version = "0.1.0" @@ -8231,15 +8256,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - [[package]] name = "thiserror" version = "1.0.59" @@ -8387,7 +8403,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -8436,14 +8452,14 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "percent-encoding", "phf", "pin-project-lite", "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tokio-util 0.7.10", "whoami", @@ -8455,7 +8471,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.11", + "rustls 0.21.12", "tokio", ] @@ -8732,9 +8748,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" dependencies = [ "serde", "stable_deref_trait", @@ -8780,7 +8796,7 @@ dependencies = [ "merkle_proof", "metastruct", "milhouse", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "paste", "rand", "rand_xorshift", @@ -8796,7 +8812,6 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", - "strum", "superstruct", "swap_or_not_shuffle", "tempfile", @@ -8867,12 +8882,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-width" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" - [[package]] name = "unicode-xid" version = "0.2.4" @@ -8949,7 +8958,7 @@ version = "0.1.0" dependencies = [ "lazy_static", "lru_cache", - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -8963,6 +8972,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "0.8.2" @@ -8989,6 +9004,7 @@ dependencies = [ "eth2", "eth2_keystore", "ethereum_serde_utils", + "fdlimit", "filesystem", "futures", "hex", @@ -9002,7 +9018,7 @@ dependencies = [ "logging", "malloc_utils", "monitoring_api", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "reqwest", "ring 0.16.20", @@ -9051,12 +9067,10 @@ name = "validator_manager" version = "0.1.0" dependencies = [ "account_utils", - "bls", "clap", "clap_utils", "environment", "eth2", - "eth2_keystore", "eth2_network_config", "eth2_wallet", "ethereum_serde_utils", @@ -9292,13 +9306,12 @@ dependencies = [ "beacon_chain", "beacon_node", "bls", - "byteorder", "clap", + "clap_utils", "diesel", "diesel_migrations", "env_logger 0.9.3", "eth2", - "hex", "http_api", "hyper 1.3.1", "log", @@ -9329,6 +9342,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "web3signer_tests" version = "0.1.0" @@ -9340,7 +9363,7 @@ dependencies = [ "eth2_network_config", "futures", "lazy_static", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "reqwest", "serde", "serde_json", @@ -9402,11 +9425,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -9777,7 +9800,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "static_assertions", @@ -9785,14 +9808,15 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.1" -source = "git+https://github.com/sigp/rust-yamux.git#12a23aa0e34b7807c0c5f87f06b3438f7d6c2ed0" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f97202f6b125031b95d83e01dc57292b529384f80bfae4677e4bbc10178cf72" dependencies = [ "futures", "instant", "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "static_assertions", diff --git a/Cargo.toml b/Cargo.toml index be2011ba28..9359d28721 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,12 +102,12 @@ bincode = "1" bitvec = "1" byteorder = "1" bytes = "1" +clap = { version = "4.5.4", features = ["cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. c-kzg = { version = "1", default-features = false } -clap = "2" compare_fields_derive = { path = "common/compare_fields_derive" } -criterion = "0.3" +criterion = "0.5" delay_map = "0.3" derivative = "2" dirs = "3" @@ -238,9 +238,6 @@ validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } -[patch.crates-io] -yamux = { git = "https://github.com/sigp/rust-yamux.git" } - [profile.maxperf] inherits = "release" lto = "fat" diff --git a/Makefile b/Makefile index 12d33cc3a8..3e6934e6b5 100644 --- a/Makefile +++ b/Makefile @@ -214,6 +214,10 @@ cli: cli-local: make && ./scripts/cli.sh +# Check for markdown files +mdlint: + ./scripts/mdlint.sh + # Runs the entire test suite, downloading test vectors if required. test-full: cargo-fmt test-release test-debug test-ef test-exec-engine diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 0fab7b31fe..7f2fa05a88 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -27,9 +27,6 @@ safe_arith = { workspace = true } slot_clock = { workspace = true } filesystem = { workspace = true } sensitive_url = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -slog = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index ce7e8a42c2..f1160fff9c 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -2,8 +2,11 @@ mod common; pub mod validator; pub mod wallet; -use clap::App; +use clap::Arg; +use clap::ArgAction; use clap::ArgMatches; +use clap::Command; +use clap_utils::FLAG_HEADER; use environment::Environment; use types::EthSpec; @@ -13,25 +16,36 @@ pub const VALIDATOR_DIR_FLAG: &str = "validator-dir"; pub const VALIDATOR_DIR_FLAG_ALIAS: &str = "validators-dir"; pub const WALLETS_DIR_FLAG: &str = "wallets-dir"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["a", "am", "account", CMD]) +pub fn cli_app() -> Command { + Command::new(CMD) + .visible_aliases(["a", "am", "account", CMD]) .about("Utilities for generating and managing Ethereum 2.0 accounts.") + .display_order(0) + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) .subcommand(wallet::cli_app()) .subcommand(validator::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. -pub fn run(matches: &ArgMatches<'_>, env: Environment) -> Result<(), String> { +pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), String> { match matches.subcommand() { - (wallet::CMD, Some(matches)) => wallet::cli_run(matches)?, - (validator::CMD, Some(matches)) => validator::cli_run(matches, env)?, - (unknown, _) => { + Some((wallet::CMD, matches)) => wallet::cli_run(matches)?, + Some((validator::CMD, matches)) => validator::cli_run(matches, env)?, + Some((unknown, _)) => { return Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD )); } + _ => return Err("No subcommand provided, see --help for options".to_string()), } Ok(()) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 93b041c61c..cfe4d8e94a 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -4,7 +4,8 @@ use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; use account_utils::{ random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{ ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, }; @@ -26,73 +27,83 @@ pub const COUNT_FLAG: &str = "count"; pub const AT_MOST_FLAG: &str = "at-most"; pub const WALLET_PASSWORD_PROMPT: &str = "Enter your wallet's password:"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key \ derivation scheme.", ) .arg( - Arg::with_name(WALLET_NAME_FLAG) + Arg::new(WALLET_NAME_FLAG) .long(WALLET_NAME_FLAG) .value_name("WALLET_NAME") .help("Use the wallet identified by this name") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(WALLET_PASSWORD_FLAG) + Arg::new(WALLET_PASSWORD_FLAG) .long(WALLET_PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help("A path to a file containing the password which will unlock the wallet.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(WALLETS_DIR_FLAG) + Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) .value_name(WALLETS_DIR_FLAG) .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets") - .takes_value(true) - .conflicts_with("datadir"), + .action(ArgAction::Set) + .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name(SECRETS_DIR_FLAG) + Arg::new(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ Defaults to ~/.lighthouse/{network}/secrets", ) - .takes_value(true), + .conflicts_with("datadir") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(DEPOSIT_GWEI_FLAG) + Arg::new(DEPOSIT_GWEI_FLAG) .long(DEPOSIT_GWEI_FLAG) .value_name("DEPOSIT_GWEI") .help( "The GWEI value of the deposit amount. Defaults to the minimum amount \ required for an active validator (MAX_EFFECTIVE_BALANCE)", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STORE_WITHDRAW_FLAG) + Arg::new(STORE_WITHDRAW_FLAG) .long(STORE_WITHDRAW_FLAG) .help( "If present, the withdrawal keystore will be stored alongside the voting \ keypair. It is generally recommended to *not* store the withdrawal key and \ instead generate them from the wallet seed when required.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to create, regardless of how many already exist") .conflicts_with("at-most") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(AT_MOST_FLAG) + Arg::new(AT_MOST_FLAG) .long(AT_MOST_FLAG) .value_name("AT_MOST_VALIDATORS") .help( @@ -100,14 +111,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { reach the given count. Never deletes an existing validator.", ) .conflicts_with("count") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) + .action(ArgAction::SetTrue) ) } @@ -119,15 +134,15 @@ pub fn cli_run( let spec = env.core_context().eth2_config.spec; let name: Option = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); - let wallet_base_dir = if matches.value_of("datadir").is_some() { + let wallet_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_WALLET_DIR) } else { parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? }; - let secrets_dir = if matches.value_of("datadir").is_some() { + let secrets_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_SECRET_DIR) } else { @@ -144,7 +159,7 @@ pub fn cli_run( return Err(format!( "No wallet directory at {:?}. Use the `lighthouse --network {} {} {} {}` command to create a wallet", wallet_base_dir, - matches.value_of("network").unwrap_or(""), + matches.get_one::("network").unwrap_or(&String::from("")), crate::CMD, crate::wallet::CMD, crate::wallet::create::CMD @@ -245,7 +260,7 @@ pub fn cli_run( .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) .create_eth1_tx_data(deposit_gwei, &spec) - .store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG)) + .store_withdrawal_keystore(matches.get_flag(STORE_WITHDRAW_FLAG)) .build() .map_err(|e| format!("Unable to build validator directory: {:?}", e))?; diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index bc9e0ee1dd..277d2ae8ec 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -1,6 +1,7 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; use bls::{Keypair, PublicKey}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use environment::Environment; use eth2::{ types::{GenesisData, StateId, ValidatorData, ValidatorId, ValidatorStatus}, @@ -28,48 +29,59 @@ pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const CONFIRMATION_PHRASE: &str = "Exit my validator"; pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("exit") +pub fn cli_app() -> Command { + Command::new("exit") .about("Submits a VoluntaryExit to the beacon chain for a given validator keystore.") .arg( - Arg::with_name(KEYSTORE_FLAG) + Arg::new(KEYSTORE_FLAG) .long(KEYSTORE_FLAG) .value_name("KEYSTORE_PATH") .help("The path to the EIP-2335 voting keystore for the validator") - .takes_value(true) - .required(true), + .action(ArgAction::Set) + .required(true) + .display_order(0) ) .arg( - Arg::with_name(PASSWORD_FILE_FLAG) + Arg::new(PASSWORD_FILE_FLAG) .long(PASSWORD_FILE_FLAG) .value_name("PASSWORD_FILE_PATH") .help("The path to the password file which unlocks the validator voting keystore") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(BEACON_SERVER_FLAG) + Arg::new(BEACON_SERVER_FLAG) .long(BEACON_SERVER_FLAG) .value_name("NETWORK_ADDRESS") .help("Address to a beacon node HTTP API") .default_value(DEFAULT_BEACON_NODE) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(NO_WAIT) + Arg::new(NO_WAIT) .long(NO_WAIT) .help("Exits after publishing the voluntary exit without waiting for confirmation that the exit was included in the beacon chain") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(NO_CONFIRMATION) + Arg::new(NO_CONFIRMATION) .long(NO_CONFIRMATION) .help("Exits without prompting for confirmation that you understand the implications of a voluntary exit. This should be used with caution") + .display_order(0) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) } @@ -78,9 +90,9 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let password_file_path: Option = clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); - let no_wait = matches.is_present(NO_WAIT); - let no_confirmation = matches.is_present(NO_CONFIRMATION); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); + let no_wait = matches.get_flag(NO_WAIT); + let no_confirmation = matches.get_flag(NO_CONFIRMATION); let spec = env.eth2_config().spec.clone(); let server_url: String = clap_utils::parse_required(matches, BEACON_SERVER_FLAG)?; diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index bf000385f3..a7c72679f7 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -9,7 +9,8 @@ use account_utils::{ }, ZeroizeString, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::fs; use std::path::PathBuf; @@ -25,8 +26,8 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter t pub const KEYSTORE_REUSE_WARNING: &str = "DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH \ ANOTHER CLIENT, OR YOU WILL GET SLASHED."; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \ requesting passwords interactively. The directory flag provides a convenient \ @@ -34,16 +35,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Python utility.", ) .arg( - Arg::with_name(KEYSTORE_FLAG) + Arg::new(KEYSTORE_FLAG) .long(KEYSTORE_FLAG) .value_name("KEYSTORE_PATH") .help("Path to a single keystore to be imported.") .conflicts_with(DIR_FLAG) - .required_unless(DIR_FLAG) - .takes_value(true), + .required_unless_present(DIR_FLAG) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DIR_FLAG) + Arg::new(DIR_FLAG) .long(DIR_FLAG) .value_name("KEYSTORES_DIRECTORY") .help( @@ -53,23 +55,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { has the '.json' extension will be attempted to be imported.", ) .conflicts_with(KEYSTORE_FLAG) - .required_unless(KEYSTORE_FLAG) - .takes_value(true), + .required_unless_present(KEYSTORE_FLAG) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) .arg( - Arg::with_name(REUSE_PASSWORD_FLAG) + Arg::new(REUSE_PASSWORD_FLAG) .long(REUSE_PASSWORD_FLAG) - .help("If present, the same password will be used for all imported keystores."), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, the same password will be used for all imported keystores.") + .display_order(0), ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("KEYSTORE_PASSWORD_PATH") .requires(REUSE_PASSWORD_FLAG) @@ -79,15 +87,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { The password will be copied to the `validator_definitions.yml` file, so after \ import we strongly recommend you delete the file at KEYSTORE_PASSWORD_PATH.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) } pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { let keystore: Option = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?; let keystores_dir: Option = clap_utils::parse_optional(matches, DIR_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); - let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); + let reuse_password = matches.get_flag(REUSE_PASSWORD_FLAG); let keystore_password_path: Option = clap_utils::parse_optional(matches, PASSWORD_FLAG)?; diff --git a/account_manager/src/validator/list.rs b/account_manager/src/validator/list.rs index 3385728369..d082a49590 100644 --- a/account_manager/src/validator/list.rs +++ b/account_manager/src/validator/list.rs @@ -1,11 +1,11 @@ use account_utils::validator_definitions::ValidatorDefinitions; -use clap::App; +use clap::Command; use std::path::PathBuf; pub const CMD: &str = "list"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD).about("Lists the public keys of all validators.") +pub fn cli_app() -> Command { + Command::new(CMD).about("Lists the public keys of all validators.") } pub fn cli_run(validator_dir: PathBuf) -> Result<(), String> { diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index af977dcf03..6616bb0c45 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -7,7 +7,8 @@ pub mod recover; pub mod slashing_protection; use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; use std::path::PathBuf; @@ -15,11 +16,21 @@ use types::EthSpec; pub const CMD: &str = "validator"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) + .display_order(0) .about("Provides commands for managing Eth2 validators.") .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(VALIDATOR_DIR_FLAG) .long(VALIDATOR_DIR_FLAG) .alias(VALIDATOR_DIR_FLAG_ALIAS) .value_name("VALIDATOR_DIRECTORY") @@ -27,7 +38,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The path to search for validator directories. \ Defaults to ~/.lighthouse/{network}/validators", ) - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir"), ) .subcommand(create::cli_app()) @@ -40,7 +51,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result<(), String> { - let validator_base_dir = if matches.value_of("datadir").is_some() { + let validator_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_VALIDATOR_DIR) } else { @@ -49,18 +60,19 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< eprintln!("validator-dir path: {:?}", validator_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run::(matches, env, validator_base_dir), - (modify::CMD, Some(matches)) => modify::cli_run(matches, validator_base_dir), - (import::CMD, Some(matches)) => import::cli_run(matches, validator_base_dir), - (list::CMD, Some(_)) => list::cli_run(validator_base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, validator_base_dir), - (slashing_protection::CMD, Some(matches)) => { + Some((create::CMD, matches)) => create::cli_run::(matches, env, validator_base_dir), + Some((modify::CMD, matches)) => modify::cli_run(matches, validator_base_dir), + Some((import::CMD, matches)) => import::cli_run(matches, validator_base_dir), + Some((list::CMD, _)) => list::cli_run(validator_base_dir), + Some((recover::CMD, matches)) => recover::cli_run(matches, validator_base_dir), + Some((slashing_protection::CMD, matches)) => { slashing_protection::cli_run(matches, env, validator_base_dir) } - (exit::CMD, Some(matches)) => exit::cli_run(matches, env), - (unknown, _) => Err(format!( + Some((exit::CMD, matches)) => exit::cli_run(matches, env), + Some((unknown, _)) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown )), + _ => Err(format!("No command provided for {}. See --help", CMD)), } } diff --git a/account_manager/src/validator/modify.rs b/account_manager/src/validator/modify.rs index bd4ae4d8f4..571cd28bf5 100644 --- a/account_manager/src/validator/modify.rs +++ b/account_manager/src/validator/modify.rs @@ -1,6 +1,7 @@ use account_utils::validator_definitions::ValidatorDefinitions; use bls::PublicKey; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use std::{collections::HashSet, path::PathBuf}; pub const CMD: &str = "modify"; @@ -10,43 +11,50 @@ pub const DISABLE: &str = "disable"; pub const PUBKEY_FLAG: &str = "pubkey"; pub const ALL: &str = "all"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Modify validator status in validator_definitions.yml.") + .display_order(0) .subcommand( - App::new(ENABLE) + Command::new(ENABLE) .about("Enable validator(s) in validator_definitions.yml.") .arg( - Arg::with_name(PUBKEY_FLAG) + Arg::new(PUBKEY_FLAG) .long(PUBKEY_FLAG) .value_name("PUBKEY") .help("Validator pubkey to enable") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(ALL) + Arg::new(ALL) .long(ALL) .help("Enable all validators in the validator directory") - .takes_value(false) - .conflicts_with(PUBKEY_FLAG), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with(PUBKEY_FLAG) + .display_order(0), ), ) .subcommand( - App::new(DISABLE) + Command::new(DISABLE) .about("Disable validator(s) in validator_definitions.yml.") .arg( - Arg::with_name(PUBKEY_FLAG) + Arg::new(PUBKEY_FLAG) .long(PUBKEY_FLAG) .value_name("PUBKEY") .help("Validator pubkey to disable") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(ALL) + Arg::new(ALL) .long(ALL) .help("Disable all validators in the validator directory") - .takes_value(false) - .conflicts_with(PUBKEY_FLAG), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with(PUBKEY_FLAG) + .display_order(0), ), ) } @@ -55,14 +63,15 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin // `true` implies we are setting `validator_definition.enabled = true` and // vice versa. let (enabled, sub_matches) = match matches.subcommand() { - (ENABLE, Some(sub_matches)) => (true, sub_matches), - (DISABLE, Some(sub_matches)) => (false, sub_matches), - (unknown, _) => { + Some((ENABLE, sub_matches)) => (true, sub_matches), + Some((DISABLE, sub_matches)) => (false, sub_matches), + Some((unknown, _)) => { return Err(format!( "{} does not have a {} command. See --help", CMD, unknown )) } + _ => return Err(format!("No command provided for {}. See --help", CMD)), }; let mut defs = ValidatorDefinitions::open(&validator_dir).map_err(|e| { format!( @@ -70,7 +79,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin validator_dir, e ) })?; - let pubkeys_to_modify = if sub_matches.is_present(ALL) { + let pubkeys_to_modify = if sub_matches.get_flag(ALL) { defs.as_slice() .iter() .map(|def| def.voting_public_key.clone()) diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index 33d3b18926..4677db18df 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -4,7 +4,8 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; use account_utils::{random_password, read_mnemonic_from_cli}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::ensure_dir_exists; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use eth2_wallet::bip39::Seed; @@ -15,70 +16,79 @@ pub const CMD: &str = "recover"; pub const FIRST_INDEX_FLAG: &str = "first-index"; pub const MNEMONIC_FLAG: &str = "mnemonic-path"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Recovers validator private keys given a BIP-39 mnemonic phrase. \ If you did not specify a `--first-index` or count `--count`, by default this will \ only recover the keys associated with the validator at index 0 for an HD wallet \ in accordance with the EIP-2333 spec.") .arg( - Arg::with_name(FIRST_INDEX_FLAG) + Arg::new(FIRST_INDEX_FLAG) .long(FIRST_INDEX_FLAG) .value_name("FIRST_INDEX") .help("The first of consecutive key indexes you wish to recover.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("0"), + .default_value("0") + .display_order(0) ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("COUNT") .help("The number of validator keys you wish to recover. Counted consecutively from the provided `--first_index`.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("1"), + .default_value("1") + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help( "If present, the mnemonic will be read in from this file.", ) - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(SECRETS_DIR_FLAG) + Arg::new(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ Defaults to ~/.lighthouse/{network}/secrets", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STORE_WITHDRAW_FLAG) + Arg::new(STORE_WITHDRAW_FLAG) .long(STORE_WITHDRAW_FLAG) .help( "If present, the withdrawal keystore will be stored alongside the voting \ keypair. It is generally recommended to *not* store the withdrawal key and \ instead generate them from the wallet seed when required.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) } pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { - let secrets_dir = if matches.value_of("datadir").is_some() { + let secrets_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_SECRET_DIR) } else { @@ -87,7 +97,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?; let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?; let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); eprintln!("secrets-dir path: {:?}", secrets_dir); @@ -131,7 +141,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .password_dir(secrets_dir.clone()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) - .store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG)) + .store_withdrawal_keystore(matches.get_flag(STORE_WITHDRAW_FLAG)) .build() .map_err(|e| format!("Unable to build validator directory: {:?}", e))?; diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index ff2eeb9cbf..bcd860a484 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -1,4 +1,4 @@ -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use environment::Environment; use slashing_protection::{ interchange::Interchange, InterchangeError, InterchangeImportOutcome, SlashingDatabase, @@ -18,43 +18,47 @@ pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE"; pub const PUBKEYS_FLAG: &str = "pubkeys"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Import or export slashing protection data to or from another client") + .display_order(0) .subcommand( - App::new(IMPORT_CMD) + Command::new(IMPORT_CMD) .about("Import an interchange file") .arg( - Arg::with_name(IMPORT_FILE_ARG) - .takes_value(true) + Arg::new(IMPORT_FILE_ARG) + .action(ArgAction::Set) .value_name("FILE") + .display_order(0) .help("The slashing protection interchange file to import (.json)"), ) ) .subcommand( - App::new(EXPORT_CMD) + Command::new(EXPORT_CMD) .about("Export an interchange file") .arg( - Arg::with_name(EXPORT_FILE_ARG) - .takes_value(true) + Arg::new(EXPORT_FILE_ARG) + .action(ArgAction::Set) .value_name("FILE") - .help("The filename to export the interchange file to"), + .help("The filename to export the interchange file to") + .display_order(0) ) .arg( - Arg::with_name(PUBKEYS_FLAG) + Arg::new(PUBKEYS_FLAG) .long(PUBKEYS_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("PUBKEYS") .help( "List of public keys to export history for. Keys should be 0x-prefixed, \ comma-separated. All known keys will be exported if omitted", - ), + ) + .display_order(0) ) ) } pub fn cli_run( - matches: &ArgMatches<'_>, + matches: &ArgMatches, env: Environment, validator_base_dir: PathBuf, ) -> Result<(), String> { @@ -68,7 +72,7 @@ pub fn cli_run( .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; match matches.subcommand() { - (IMPORT_CMD, Some(matches)) => { + Some((IMPORT_CMD, matches)) => { let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?; let import_file = File::open(&import_filename).map_err(|e| { format!( @@ -168,7 +172,7 @@ pub fn cli_run( Ok(()) } - (EXPORT_CMD, Some(matches)) => { + Some((EXPORT_CMD, matches)) => { let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?; let selected_pubkeys = if let Some(pubkeys) = @@ -215,7 +219,7 @@ pub fn cli_run( Ok(()) } - ("", _) => Err("No subcommand provided, see --help for options".to_string()), - (command, _) => Err(format!("No such subcommand `{}`", command)), + Some((command, _)) => Err(format!("No such subcommand `{}`", command)), + _ => Err("No subcommand provided, see --help for options".to_string()), } } diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index accee11b5a..12aa5d3801 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -3,7 +3,7 @@ use crate::WALLETS_DIR_FLAG; use account_utils::{ is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2_wallet::{ bip39::{Language, Mnemonic, MnemonicType}, PlainText, @@ -33,21 +33,22 @@ pub const NEW_WALLET_PASSWORD_PROMPT: &str = "Enter a password for your new wallet that is at least 12 characters long:"; pub const RETYPE_PASSWORD_PROMPT: &str = "Please re-enter your wallet's new password:"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Creates a new HD (hierarchical-deterministic) EIP-2386 wallet.") .arg( - Arg::with_name(NAME_FLAG) + Arg::new(NAME_FLAG) .long(NAME_FLAG) .value_name("WALLET_NAME") .help( "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help( @@ -56,49 +57,65 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(TYPE_FLAG) + Arg::new(TYPE_FLAG) .long(TYPE_FLAG) .value_name("WALLET_TYPE") .help( "The type of wallet to create. Only HD (hierarchical-deterministic) \ wallets are supported presently..", ) - .takes_value(true) - .possible_values(&[HD_TYPE]) - .default_value(HD_TYPE), + .action(ArgAction::Set) + .value_parser([HD_TYPE]) + .default_value(HD_TYPE) + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help( "If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC.", ) - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_LENGTH_FLAG) + Arg::new(MNEMONIC_LENGTH_FLAG) .long(MNEMONIC_LENGTH_FLAG) .value_name("MNEMONIC_LENGTH") .help("The number of words to use for the mnemonic phrase.") - .takes_value(true) - .validator(|len| { - match len.parse::().ok().and_then(|words| MnemonicType::for_word_count(words).ok()) { - Some(_) => Ok(()), - None => Err(format!("Mnemonic length must be one of {}", MNEMONIC_TYPES.iter().map(|t| t.word_count().to_string()).collect::>().join(", "))), - } + .action(ArgAction::Set) + .value_parser(|len: &str| { + match len + .parse::() + .ok() + .and_then(|words| MnemonicType::for_word_count(words).ok()) + { + Some(_) => Ok(len.to_string()), + None => Err(format!( + "Mnemonic length must be one of {}", + MNEMONIC_TYPES + .iter() + .map(|t| t.word_count().to_string()) + .collect::>() + .join(", ") + )), + } }) - .default_value("24"), + .default_value("24") + .display_order(0) ) } @@ -153,7 +170,7 @@ pub fn create_wallet_from_mnemonic( let name: Option = clap_utils::parse_optional(matches, NAME_FLAG)?; let wallet_password_path: Option = clap_utils::parse_optional(matches, PASSWORD_FLAG)?; let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); let wallet_type = match type_field.as_ref() { HD_TYPE => WalletType::Hd, unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)), diff --git a/account_manager/src/wallet/list.rs b/account_manager/src/wallet/list.rs index 9190de3915..a551ffae12 100644 --- a/account_manager/src/wallet/list.rs +++ b/account_manager/src/wallet/list.rs @@ -1,12 +1,12 @@ use crate::WALLETS_DIR_FLAG; -use clap::App; +use clap::Command; use eth2_wallet_manager::WalletManager; use std::path::PathBuf; pub const CMD: &str = "list"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD).about("Lists the names of all wallets.") +pub fn cli_app() -> Command { + Command::new(CMD).about("Lists the names of all wallets.") } pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index dfadebf57f..59f5f36252 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -3,21 +3,32 @@ pub mod list; pub mod recover; use crate::WALLETS_DIR_FLAG; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; use std::path::PathBuf; pub const CMD: &str = "wallet"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Manage wallets, from which validator keys can be derived.") + .display_order(0) .arg( - Arg::with_name(WALLETS_DIR_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) .value_name("WALLETS_DIRECTORY") .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir"), ) .subcommand(create::cli_app()) @@ -26,7 +37,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { - let wallet_base_dir = if matches.value_of("datadir").is_some() { + let wallet_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_WALLET_DIR) } else { @@ -37,12 +48,13 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { eprintln!("wallet-dir path: {:?}", wallet_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run(matches, wallet_base_dir), - (list::CMD, Some(_)) => list::cli_run(wallet_base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, wallet_base_dir), - (unknown, _) => Err(format!( + Some((create::CMD, matches)) => create::cli_run(matches, wallet_base_dir), + Some((list::CMD, _)) => list::cli_run(wallet_base_dir), + Some((recover::CMD, matches)) => recover::cli_run(matches, wallet_base_dir), + Some((unknown, _)) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown )), + _ => Err("No subcommand provided, see --help for options".to_string()), } } diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index 6e047aca8d..b9641f1152 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,27 +1,28 @@ use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; use account_utils::read_mnemonic_from_cli; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use std::path::PathBuf; pub const CMD: &str = "recover"; pub const MNEMONIC_FLAG: &str = "mnemonic-path"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Recovers an EIP-2386 wallet from a given a BIP-39 mnemonic phrase.") .arg( - Arg::with_name(NAME_FLAG) + Arg::new(NAME_FLAG) .long(NAME_FLAG) .value_name("WALLET_NAME") .help( "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("PASSWORD_FILE_PATH") .help( @@ -31,39 +32,43 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help("If present, the mnemonic will be read in from this file.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(TYPE_FLAG) + Arg::new(TYPE_FLAG) .long(TYPE_FLAG) .value_name("WALLET_TYPE") .help( "The type of wallet to create. Only HD (hierarchical-deterministic) \ wallets are supported presently..", ) - .takes_value(true) - .possible_values(&[HD_TYPE]) - .default_value(HD_TYPE), + .action(ArgAction::Set) + .value_parser([HD_TYPE]) + .default_value(HD_TYPE) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) } pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); eprintln!(); eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 7cc6e2b6ae..e2f6c681c1 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -29,18 +29,14 @@ clap = { workspace = true } slog = { workspace = true } dirs = { workspace = true } directory = { workspace = true } -futures = { workspace = true } environment = { workspace = true } task_executor = { workspace = true } genesis = { workspace = true } -eth2_network_config = { workspace = true } execution_layer = { workspace = true } lighthouse_network = { workspace = true } -serde = { workspace = true } serde_json = { workspace = true } clap_utils = { workspace = true } hyper = { workspace = true } -lighthouse_version = { workspace = true } hex = { workspace = true } slasher = { workspace = true } monitoring_api = { workspace = true } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 471c43d94f..b7019d79b4 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -381,6 +381,11 @@ fn process_slash_info( if let Some(slasher) = chain.slasher.as_ref() { let (indexed_attestation, check_signature, err) = match slash_info { SignatureNotChecked(attestation, err) => { + if let Error::UnknownHeadBlock { .. } = err { + if attestation.data.beacon_block_root == attestation.data.target.root { + return err; + } + } match obtain_indexed_attestation_and_committees_per_slot(chain, attestation) { Ok((indexed, _)) => (indexed, true, err), Err(e) => { diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 0c92b7c1f6..f0a68b6be5 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,4 +1,4 @@ -use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, error, Logger}; use std::collections::HashMap; @@ -410,15 +410,14 @@ impl BeaconBlockStreamer { fn check_caches(&self, root: Hash256) -> Option>> { if self.check_caches == CheckCaches::Yes { - self.beacon_chain - .reqresp_pre_import_cache - .read() - .get(&root) - .map(|block| { + match self.beacon_chain.get_block_process_status(&root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => { metrics::inc_counter(&metrics::BEACON_REQRESP_PRE_IMPORT_CACHE_HITS); - block.clone() - }) - .or(self.beacon_chain.early_attester_cache.get_block(root)) + Some(block) + } + } } else { None } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9c7ded313b..77e1bc095e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -337,6 +337,20 @@ struct PartialBeaconBlock { bls_to_execution_changes: Vec, } +pub enum BlockProcessStatus { + /// Block is not in any pre-import cache. Block may be in the data-base or in the fork-choice. + Unknown, + /// Block is currently processing but not yet validated. + NotValidated(Arc>), + /// Block is fully valid, but not yet imported. It's cached in the da_checker while awaiting + /// missing block components. + ExecutionValidated(Arc>), +} + +pub struct BeaconChainMetrics { + pub reqresp_pre_import_cache_len: usize, +} + pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); pub type BeaconForkChoice = ForkChoice< @@ -1237,6 +1251,27 @@ impl BeaconChain { Ok(self.store.get_blinded_block(block_root)?) } + /// Return the status of a block as it progresses through the various caches of the beacon + /// chain. Used by sync to learn the status of a block and prevent repeated downloads / + /// processing attempts. + pub fn get_block_process_status(&self, block_root: &Hash256) -> BlockProcessStatus { + if let Some(block) = self + .data_availability_checker + .get_execution_valid_block(block_root) + { + return BlockProcessStatus::ExecutionValidated(block); + } + + if let Some(block) = self.reqresp_pre_import_cache.read().get(block_root) { + // A block is on the `reqresp_pre_import_cache` but NOT in the + // `data_availability_checker` only if it is actively processing. We can expect a future + // event with the result of processing + return BlockProcessStatus::NotValidated(block.clone()); + } + + BlockProcessStatus::Unknown + } + /// Returns the state at the given root, if any. /// /// ## Errors @@ -2422,6 +2457,7 @@ impl BeaconChain { proposer_slashing: ProposerSlashing, ) -> Result, Error> { let wall_clock_state = self.wall_clock_state()?; + Ok(self.observed_proposer_slashings.lock().verify_and_observe( proposer_slashing, &wall_clock_state, @@ -2434,6 +2470,14 @@ impl BeaconChain { &self, proposer_slashing: SigVerifiedOp, ) { + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_proposer_slashing_subscribers() { + event_handler.register(EventKind::ProposerSlashing(Box::new( + proposer_slashing.clone().into_inner(), + ))); + } + } + if self.eth1_chain.is_some() { self.op_pool.insert_proposer_slashing(proposer_slashing) } @@ -2445,6 +2489,7 @@ impl BeaconChain { attester_slashing: AttesterSlashing, ) -> Result, T::EthSpec>, Error> { let wall_clock_state = self.wall_clock_state()?; + Ok(self.observed_attester_slashings.lock().verify_and_observe( attester_slashing, &wall_clock_state, @@ -2465,6 +2510,14 @@ impl BeaconChain { .fork_choice_write_lock() .on_attester_slashing(attester_slashing.as_inner()); + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_attester_slashing_subscribers() { + event_handler.register(EventKind::AttesterSlashing(Box::new( + attester_slashing.clone().into_inner(), + ))); + } + } + // Add to the op pool (if we have the ability to propose blocks). if self.eth1_chain.is_some() { self.op_pool.insert_attester_slashing(attester_slashing) @@ -2536,6 +2589,14 @@ impl BeaconChain { bls_to_execution_change: SigVerifiedOp, received_pre_capella: ReceivedPreCapella, ) -> bool { + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_bls_to_execution_change_subscribers() { + event_handler.register(EventKind::BlsToExecutionChange(Box::new( + bls_to_execution_change.clone().into_inner(), + ))); + } + } + if self.eth1_chain.is_some() { self.op_pool .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) @@ -2756,6 +2817,7 @@ impl BeaconChain { signature_verified_block.block_root(), signature_verified_block, notify_execution_layer, + BlockImportSource::RangeSync, || Ok(()), ) .await @@ -2938,6 +3000,7 @@ impl BeaconChain { self: &Arc, block_root: Hash256, unverified_block: B, + block_source: BlockImportSource, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { self.reqresp_pre_import_cache @@ -2945,9 +3008,13 @@ impl BeaconChain { .insert(block_root, unverified_block.block_cloned()); let r = self - .process_block(block_root, unverified_block, notify_execution_layer, || { - Ok(()) - }) + .process_block( + block_root, + unverified_block, + notify_execution_layer, + block_source, + || Ok(()), + ) .await; self.remove_notified(&block_root, r) } @@ -2970,6 +3037,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, notify_execution_layer: NotifyExecutionLayer, + block_source: BlockImportSource, publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, ) -> Result> { // Start the Prometheus timer. @@ -3030,6 +3098,7 @@ impl BeaconChain { "Beacon block imported"; "block_root" => ?block_root, "block_slot" => block_slot, + "source" => %block_source, ); // Increment the Prometheus counter for block processing successes. @@ -3267,6 +3336,20 @@ impl BeaconChain { "payload_verification_handle", ) .await??; + + // Remove block components from da_checker AFTER completing block import. Then we can assert + // the following invariant: + // > A valid unfinalized block is either in fork-choice or da_checker. + // + // If we remove the block when it becomes available, there's some time window during + // `import_block` where the block is nowhere. Consumers of the da_checker can handle the + // extend time a block may exist in the da_checker. + // + // If `import_block` errors (only errors with internal errors), the pending components will + // be pruned on data_availability_checker maintenance as finality advances. + self.data_availability_checker + .remove_pending_components(block_root); + Ok(AvailabilityProcessingStatus::Imported(block_root)) } @@ -4397,12 +4480,6 @@ impl BeaconChain { if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) } else { - info!( - self.log, - "Missed snapshot cache during withdrawals calculation"; - "slot" => proposal_slot, - "parent_block_root" => ?parent_block_root - ); let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; @@ -6365,9 +6442,8 @@ impl BeaconChain { /// account the current slot when accounting for skips. pub fn is_healthy(&self, parent_root: &Hash256) -> Result { let cached_head = self.canonical_head.cached_head(); - // Check if the merge has been finalized. - if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { - if ExecutionBlockHash::zero() == finalized_hash { + if let Some(head_hash) = cached_head.forkchoice_update_parameters().head_hash { + if ExecutionBlockHash::zero() == head_hash { return Ok(ChainHealth::PreMerge); } } else { @@ -6610,6 +6686,12 @@ impl BeaconChain { ForkName::Base => Err(Error::UnsupportedFork), } } + + pub fn metrics(&self) -> BeaconChainMetrics { + BeaconChainMetrics { + reqresp_pre_import_cache_len: self.reqresp_pre_import_cache.read().len(), + } + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 263b9f9e01..fdf8ee2b97 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -571,6 +571,14 @@ pub fn validate_blob_sidecar_for_gossip( }); } + // Kzg verification for gossip blob sidecar + let kzg = chain + .kzg + .as_ref() + .ok_or(GossipBlobError::KzgNotInitialized)?; + let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar.clone(), kzg, seen_timestamp) + .map_err(GossipBlobError::KzgError)?; + chain .observed_slashable .write() @@ -605,14 +613,6 @@ pub fn validate_blob_sidecar_for_gossip( }); } - // Kzg verification for gossip blob sidecar - let kzg = chain - .kzg - .as_ref() - .ok_or(GossipBlobError::KzgNotInitialized)?; - let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp) - .map_err(GossipBlobError::KzgError)?; - Ok(GossipVerifiedBlob { block_root, blob: kzg_verified_blob, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 866dde5a76..f4f6526a56 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1382,18 +1382,20 @@ impl ExecutionPendingBlock { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); // Stage a batch of operations to be completed atomically if this block is imported - // successfully. We include the state root of the pre-state, which may be an advanced state - // that was stored in the DB with a `temporary` flag. + // successfully. If there is a skipped slot, we include the state root of the pre-state, + // which may be an advanced state that was stored in the DB with a `temporary` flag. let mut state = parent.pre_state; - let mut confirmed_state_roots = if state.slot() > parent.beacon_block.slot() { - // Advanced pre-state. Delete its temporary flag. - let pre_state_root = state.update_tree_hash_cache()?; - vec![pre_state_root] - } else { - // Pre state is parent state. It is already stored in the DB without temporary status. - vec![] - }; + let mut confirmed_state_roots = + if block.slot() > state.slot() && state.slot() > parent.beacon_block.slot() { + // Advanced pre-state. Delete its temporary flag. + let pre_state_root = state.update_tree_hash_cache()?; + vec![pre_state_root] + } else { + // Pre state is either unadvanced, or should not be stored long-term because there + // is no skipped slot between `parent` and `block`. + vec![] + }; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index fb0e0c965f..70f1e99ef7 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -96,13 +96,18 @@ impl RpcBlock { } /// Constructs a new `BlockAndBlobs` variant after making consistency - /// checks between the provided blocks and blobs. + /// checks between the provided blocks and blobs. This struct makes no + /// guarantees about whether blobs should be present, only that they are + /// consistent with the block. An empty list passed in for `blobs` is + /// viewed the same as `None` passed in. pub fn new( block_root: Option, block: Arc>, blobs: Option>, ) -> Result { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + // Treat empty blob lists as if they are missing. + let blobs = blobs.filter(|b| !b.is_empty()); if let (Some(blobs), Ok(block_commitments)) = ( blobs.as_ref(), @@ -309,6 +314,26 @@ pub struct BlockImportData { pub consensus_context: ConsensusContext, } +impl BlockImportData { + pub fn __new_for_test( + block_root: Hash256, + state: BeaconState, + parent_block: SignedBeaconBlock>, + ) -> Self { + Self { + block_root, + state, + parent_block, + parent_eth1_finalization_data: Eth1FinalizationData { + eth1_data: <_>::default(), + eth1_deposit_index: 0, + }, + confirmed_state_roots: vec![], + consensus_context: ConsensusContext::new(Slot::new(0)), + } + } +} + pub type GossipVerifiedBlockContents = (GossipVerifiedBlock, Option>); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 376bc16c03..90461b8f03 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -708,8 +708,8 @@ where .ok_or("Cannot build without a genesis state root")?; let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); let head_tracker = Arc::new(self.head_tracker.unwrap_or_default()); - let beacon_proposer_cache: Arc> = <_>::default(); + let mut validator_monitor = ValidatorMonitor::new( validator_monitor_config, beacon_proposer_cache.clone(), diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index dd0d97b1da..e0347d81c3 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -2,14 +2,11 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; -pub use crate::data_availability_checker::child_components::ChildComponents; use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache; use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slasher::test_utils::E; use slog::{debug, error, Logger}; use slot_clock::SlotClock; -use ssz_types::FixedVector; use std::fmt; use std::fmt::Debug; use std::num::NonZeroUsize; @@ -19,7 +16,6 @@ use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; -mod child_components; mod error; mod overflow_lru_cache; mod state_lru_cache; @@ -88,74 +84,29 @@ impl DataAvailabilityChecker { }) } - /// Checks if the block root is currenlty in the availability cache awaiting processing because + /// Checks if the block root is currenlty in the availability cache awaiting import because /// of missing components. - pub fn has_block(&self, block_root: &Hash256) -> bool { - self.availability_cache.has_block(block_root) - } - - pub fn get_missing_blob_ids_with(&self, block_root: Hash256) -> MissingBlobs { - self.availability_cache - .with_pending_components(&block_root, |pending_components| match pending_components { - Some(pending_components) => self.get_missing_blob_ids( - block_root, - pending_components - .get_cached_block() - .as_ref() - .map(|b| b.as_block()), - &pending_components.verified_blobs, - ), - None => MissingBlobs::new_without_block(block_root, self.is_deneb()), - }) - } - - /// If there's no block, all possible ids will be returned that don't exist in the given blobs. - /// If there no blobs, all possible ids will be returned. - pub fn get_missing_blob_ids( + pub fn get_execution_valid_block( &self, - block_root: Hash256, - block: Option<&SignedBeaconBlock>, - blobs: &FixedVector, ::MaxBlobsPerBlock>, - ) -> MissingBlobs { - let Some(current_slot) = self.slot_clock.now_or_genesis() else { - error!( - self.log, - "Failed to read slot clock when checking for missing blob ids" - ); - return MissingBlobs::BlobsNotRequired; - }; + block_root: &Hash256, + ) -> Option>> { + self.availability_cache + .get_execution_valid_block(block_root) + } - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - if self.da_check_required_for_epoch(current_epoch) { - match block { - Some(cached_block) => { - let block_commitments_len = cached_block - .message() - .body() - .blob_kzg_commitments() - .map(|v| v.len()) - .unwrap_or(0); - let blob_ids = blobs + /// Return the set of imported blob indexes for `block_root`. Returns None if there is no block + /// component for `block_root`. + pub fn imported_blob_indexes(&self, block_root: &Hash256) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_blobs() .iter() - .take(block_commitments_len) - .enumerate() - .filter_map(|(index, blob_commitment_opt)| { - blob_commitment_opt.is_none().then_some(BlobIdentifier { - block_root, - index: index as u64, - }) - }) - .collect(); - MissingBlobs::KnownMissing(blob_ids) - } - None => { - MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) - } - } - } else { - MissingBlobs::BlobsNotRequired - } + .filter_map(|blob| blob.as_ref().map(|blob| blob.blob_index())) + .collect::>() + }) + }) } /// Get a blob from the availability cache. @@ -213,6 +164,11 @@ impl DataAvailabilityChecker { .put_pending_executed_block(executed_block) } + pub fn remove_pending_components(&self, block_root: Hash256) { + self.availability_cache + .remove_pending_components(block_root) + } + /// Verifies kzg commitments for an RpcBlock, returns a `MaybeAvailableBlock` that may /// include the fully available block. /// @@ -351,6 +307,18 @@ impl DataAvailabilityChecker { .map_or(false, |da_epoch| block_epoch >= da_epoch) } + pub fn da_check_required_for_current_epoch(&self) -> bool { + let Some(current_slot) = self.slot_clock.now_or_genesis() else { + error!( + self.log, + "Failed to read slot clock when checking for missing blob ids" + ); + return false; + }; + + self.da_check_required_for_epoch(current_slot.epoch(T::EthSpec::slots_per_epoch())) + } + /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. pub fn is_deneb(&self) -> bool { self.slot_clock.now().map_or(false, |slot| { @@ -544,61 +512,3 @@ impl MaybeAvailableBlock { } } } - -#[derive(Debug, Clone)] -pub enum MissingBlobs { - /// We know for certain these blobs are missing. - KnownMissing(Vec), - /// We think these blobs might be missing. - PossibleMissing(Vec), - /// Blobs are not required. - BlobsNotRequired, -} - -impl MissingBlobs { - pub fn new_without_block(block_root: Hash256, is_deneb: bool) -> Self { - if is_deneb { - MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) - } else { - MissingBlobs::BlobsNotRequired - } - } - pub fn is_empty(&self) -> bool { - match self { - MissingBlobs::KnownMissing(v) => v.is_empty(), - MissingBlobs::PossibleMissing(v) => v.is_empty(), - MissingBlobs::BlobsNotRequired => true, - } - } - pub fn contains(&self, blob_id: &BlobIdentifier) -> bool { - match self { - MissingBlobs::KnownMissing(v) => v.contains(blob_id), - MissingBlobs::PossibleMissing(v) => v.contains(blob_id), - MissingBlobs::BlobsNotRequired => false, - } - } - pub fn remove(&mut self, blob_id: &BlobIdentifier) { - match self { - MissingBlobs::KnownMissing(v) => v.retain(|id| id != blob_id), - MissingBlobs::PossibleMissing(v) => v.retain(|id| id != blob_id), - MissingBlobs::BlobsNotRequired => {} - } - } - pub fn indices(&self) -> Vec { - match self { - MissingBlobs::KnownMissing(v) => v.iter().map(|id| id.index).collect(), - MissingBlobs::PossibleMissing(v) => v.iter().map(|id| id.index).collect(), - MissingBlobs::BlobsNotRequired => vec![], - } - } -} - -impl Into> for MissingBlobs { - fn into(self) -> Vec { - match self { - MissingBlobs::KnownMissing(v) => v, - MissingBlobs::PossibleMissing(v) => v, - MissingBlobs::BlobsNotRequired => vec![], - } - } -} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs b/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs deleted file mode 100644 index 184dfc4500..0000000000 --- a/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::block_verification_types::RpcBlock; -use bls::Hash256; -use std::sync::Arc; -use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; - -/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct -/// is used to cache components as they are sent to the network service. We can't use the -/// data availability cache currently because any blocks or blobs without parents -/// won't pass validation and therefore won't make it into the cache. -pub struct ChildComponents { - pub block_root: Hash256, - pub downloaded_block: Option>>, - pub downloaded_blobs: FixedBlobSidecarList, -} - -impl From> for ChildComponents { - fn from(value: RpcBlock) -> Self { - let (block_root, block, blobs) = value.deconstruct(); - let fixed_blobs = blobs.map(|blobs| { - FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()) - }); - Self::new(block_root, Some(block), fixed_blobs) - } -} - -impl ChildComponents { - pub fn empty(block_root: Hash256) -> Self { - Self { - block_root, - downloaded_block: None, - downloaded_blobs: <_>::default(), - } - } - pub fn new( - block_root: Hash256, - block: Option>>, - blobs: Option>, - ) -> Self { - let mut cache = Self::empty(block_root); - if let Some(block) = block { - cache.merge_block(block); - } - if let Some(blobs) = blobs { - cache.merge_blobs(blobs); - } - cache - } - - pub fn merge_block(&mut self, block: Arc>) { - self.downloaded_block = Some(block); - } - - pub fn merge_blob(&mut self, blob: Arc>) { - if let Some(blob_ref) = self.downloaded_blobs.get_mut(blob.index as usize) { - *blob_ref = Some(blob); - } - } - - pub fn merge_blobs(&mut self, blobs: FixedBlobSidecarList) { - for blob in blobs.iter().flatten() { - self.merge_blob(blob.clone()); - } - } - - pub fn clear_blobs(&mut self) { - self.downloaded_blobs = FixedBlobSidecarList::default(); - } -} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 6c524786bf..d22f6b2cc9 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -22,6 +22,7 @@ pub enum Error { SlotClockError, } +#[derive(PartialEq, Eq)] pub enum ErrorCategory { /// Internal Errors (not caused by peers) Internal, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 91c776adc1..adc1a1e202 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -44,7 +44,7 @@ use ssz_types::{FixedVector, VariableList}; use std::num::NonZeroUsize; use std::{collections::HashSet, sync::Arc}; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; +use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// This represents the components of a partially available block /// @@ -432,11 +432,6 @@ impl Critical { Ok(()) } - /// Returns true if the block root is known, without altering the LRU ordering - pub fn has_block(&self, block_root: &Hash256) -> bool { - self.in_memory.peek(block_root).is_some() || self.store_keys.contains(block_root) - } - /// This only checks for the blobs in memory pub fn peek_blob( &self, @@ -503,6 +498,21 @@ impl Critical { } } + /// Removes and returns the pending_components corresponding to + /// the `block_root` or `None` if it does not exist + pub fn remove_pending_components(&mut self, block_root: Hash256) { + match self.in_memory.pop_entry(&block_root) { + Some { .. } => {} + None => { + // not in memory, is it in the store? + // We don't need to remove the data from the store as we have removed it from + // `store_keys` so we won't go looking for it on disk. The maintenance thread + // will remove it from disk the next time it runs. + self.store_keys.remove(&block_root); + } + } + } + /// Returns the number of pending component entries in memory. pub fn num_blocks(&self) -> usize { self.in_memory.len() @@ -549,8 +559,19 @@ impl OverflowLRUCache { } /// Returns true if the block root is known, without altering the LRU ordering - pub fn has_block(&self, block_root: &Hash256) -> bool { - self.critical.read().has_block(block_root) + pub fn get_execution_valid_block( + &self, + block_root: &Hash256, + ) -> Option>> { + self.critical + .read() + .peek_pending_components(block_root) + .and_then(|pending_components| { + pending_components + .executed_block + .as_ref() + .map(|block| block.block_cloned()) + }) } /// Fetch a blob from the cache without affecting the LRU ordering @@ -569,7 +590,7 @@ impl OverflowLRUCache { } } - pub fn with_pending_components>) -> R>( + pub fn peek_pending_components>) -> R>( &self, block_root: &Hash256, f: F, @@ -601,6 +622,11 @@ impl OverflowLRUCache { pending_components.merge_blobs(fixed_blobs); if pending_components.is_available() { + write_lock.put_pending_components( + block_root, + pending_components.clone(), + &self.overflow_store, + )?; // No need to hold the write lock anymore drop(write_lock); pending_components.make_available(|diet_block| { @@ -640,6 +666,11 @@ impl OverflowLRUCache { // Check if we have all components and entire set is consistent. if pending_components.is_available() { + write_lock.put_pending_components( + block_root, + pending_components.clone(), + &self.overflow_store, + )?; // No need to hold the write lock anymore drop(write_lock); pending_components.make_available(|diet_block| { @@ -655,6 +686,10 @@ impl OverflowLRUCache { } } + pub fn remove_pending_components(&self, block_root: Hash256) { + self.critical.write().remove_pending_components(block_root); + } + /// write all in memory objects to disk pub fn write_all_to_disk(&self) -> Result<(), AvailabilityCheckError> { let maintenance_lock = self.maintenance_lock.lock(); @@ -1189,10 +1224,17 @@ mod test { matches!(availability, Availability::Available(_)), "block doesn't have blobs, should be available" ); + assert_eq!( + cache.critical.read().in_memory.len(), + 1, + "cache should still have block as it hasn't been imported yet" + ); + // remove the blob to simulate successful import + cache.remove_pending_components(root); assert_eq!( cache.critical.read().in_memory.len(), 0, - "cache should be empty because we don't have blobs" + "cache should be empty now that block has been imported" ); } else { assert!( @@ -1257,6 +1299,12 @@ mod test { "block should be available: {:?}", availability ); + assert!( + cache.critical.read().in_memory.len() == 1, + "cache should still have available block until import" + ); + // remove the blob to simulate successful import + cache.remove_pending_components(root); assert!( cache.critical.read().in_memory.is_empty(), "cache should be empty now that all components available" @@ -1372,6 +1420,8 @@ mod test { .expect("should put blob"); if blob_index == expected_blobs - 1 { assert!(matches!(availability, Availability::Available(_))); + // remove the block from the cache to simulate import + cache.remove_pending_components(roots[0]); } else { // the first block should be brought back into memory assert!( diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index f8a243bd9e..9775d54c02 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -37,6 +37,10 @@ impl DietAvailabilityPendingExecutedBlock { &self.block } + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } + pub fn num_blobs_expected(&self) -> usize { self.block .message() diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 1fdcfdf8d0..5f91fe5d0c 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -20,6 +20,9 @@ pub struct ServerSentEventHandler { light_client_finality_update_tx: Sender>, light_client_optimistic_update_tx: Sender>, block_reward_tx: Sender>, + proposer_slashing_tx: Sender>, + attester_slashing_tx: Sender>, + bls_to_execution_change_tx: Sender>, log: Logger, } @@ -45,6 +48,9 @@ impl ServerSentEventHandler { let (light_client_finality_update_tx, _) = broadcast::channel(capacity); let (light_client_optimistic_update_tx, _) = broadcast::channel(capacity); let (block_reward_tx, _) = broadcast::channel(capacity); + let (proposer_slashing_tx, _) = broadcast::channel(capacity); + let (attester_slashing_tx, _) = broadcast::channel(capacity); + let (bls_to_execution_change_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -60,6 +66,9 @@ impl ServerSentEventHandler { light_client_finality_update_tx, light_client_optimistic_update_tx, block_reward_tx, + proposer_slashing_tx, + attester_slashing_tx, + bls_to_execution_change_tx, log, } } @@ -126,6 +135,18 @@ impl ServerSentEventHandler { .block_reward_tx .send(kind) .map(|count| log_count("block reward", count)), + EventKind::ProposerSlashing(_) => self + .proposer_slashing_tx + .send(kind) + .map(|count| log_count("proposer slashing", count)), + EventKind::AttesterSlashing(_) => self + .attester_slashing_tx + .send(kind) + .map(|count| log_count("attester slashing", count)), + EventKind::BlsToExecutionChange(_) => self + .bls_to_execution_change_tx + .send(kind) + .map(|count| log_count("bls to execution change", count)), }; if let Err(SendError(event)) = result { trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); @@ -184,6 +205,18 @@ impl ServerSentEventHandler { self.block_reward_tx.subscribe() } + pub fn subscribe_attester_slashing(&self) -> Receiver> { + self.attester_slashing_tx.subscribe() + } + + pub fn subscribe_proposer_slashing(&self) -> Receiver> { + self.proposer_slashing_tx.subscribe() + } + + pub fn subscribe_bls_to_execution_change(&self) -> Receiver> { + self.bls_to_execution_change_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -227,4 +260,16 @@ impl ServerSentEventHandler { pub fn has_block_reward_subscribers(&self) -> bool { self.block_reward_tx.receiver_count() > 0 } + + pub fn has_proposer_slashing_subscribers(&self) -> bool { + self.proposer_slashing_tx.receiver_count() > 0 + } + + pub fn has_attester_slashing_subscribers(&self) -> bool { + self.attester_slashing_tx.receiver_count() > 0 + } + + pub fn has_bls_to_execution_change_subscribers(&self) -> bool { + self.bls_to_execution_change_tx.receiver_count() > 0 + } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 221bb8b292..f419429e09 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -62,9 +62,10 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, - BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, - StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, + ChainSegmentResult, ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, + ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index fc3f032cdc..4ceaf675ce 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -32,18 +32,11 @@ lazy_static! { "beacon_block_processing_successes_total", "Count of blocks processed without error" ); + // Keeping the existing "snapshot_cache" metric name as it would break existing dashboards pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: Result = try_create_int_gauge( "beacon_block_processing_snapshot_cache_size", "Count snapshots in the snapshot cache" ); - pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES: Result = try_create_int_counter( - "beacon_block_processing_snapshot_cache_misses", - "Count of snapshot cache misses" - ); - pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES: Result = try_create_int_counter( - "beacon_block_processing_snapshot_cache_clones", - "Count of snapshot cache clones" - ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( @@ -1199,6 +1192,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { } let attestation_stats = beacon_chain.op_pool.attestation_stats(); + let chain_metrics = beacon_chain.metrics(); set_gauge_by_usize( &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, @@ -1207,7 +1201,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { set_gauge_by_usize( &BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE, - beacon_chain.reqresp_pre_import_cache.read().len(), + chain_metrics.reqresp_pre_import_cache_len, ); let da_checker_metrics = beacon_chain.data_availability_checker.metrics(); diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index ad597bf92a..08b2a51720 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -703,6 +703,11 @@ impl, Cold: ItemStore> BackgroundMigrator Duration { - // If the block arrived at the attestation deadline or later, it might get re-orged. - Duration::from_secs(seconds_per_slot) / 3 -} - -/// This snapshot is to be used for verifying a child of `self.beacon_block`. -#[derive(Debug)] -pub struct PreProcessingSnapshot { - /// This state is equivalent to the `self.beacon_block.state_root()` state that has been - /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for - /// the application of another block. - pub pre_state: BeaconState, - /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. - pub beacon_state_root: Option, - pub beacon_block: SignedBeaconBlock>, - pub beacon_block_root: Hash256, -} - -impl From> for PreProcessingSnapshot { - fn from(snapshot: BeaconSnapshot) -> Self { - let beacon_state_root = Some(snapshot.beacon_state_root()); - Self { - pre_state: snapshot.beacon_state, - beacon_state_root, - beacon_block: snapshot.beacon_block.clone_as_blinded(), - beacon_block_root: snapshot.beacon_block_root, - } - } -} - -impl CacheItem { - pub fn new_without_pre_state(snapshot: BeaconSnapshot) -> Self { - Self { - beacon_block: snapshot.beacon_block, - beacon_block_root: snapshot.beacon_block_root, - beacon_state: snapshot.beacon_state, - pre_state: None, - } - } - - fn clone_to_snapshot_with(&self, clone_config: CloneConfig) -> BeaconSnapshot { - BeaconSnapshot { - beacon_state: self.beacon_state.clone_with(clone_config), - beacon_block: self.beacon_block.clone(), - beacon_block_root: self.beacon_block_root, - } - } - - pub fn into_pre_state(self) -> PreProcessingSnapshot { - // Do not include the beacon state root if the state has been advanced. - let beacon_state_root = - Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); - - PreProcessingSnapshot { - beacon_block: self.beacon_block.clone_as_blinded(), - beacon_block_root: self.beacon_block_root, - pre_state: self.pre_state.unwrap_or(self.beacon_state), - beacon_state_root, - } - } - - pub fn clone_as_pre_state(&self) -> PreProcessingSnapshot { - // Do not include the beacon state root if the state has been advanced. - let beacon_state_root = - Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); - - PreProcessingSnapshot { - beacon_block: self.beacon_block.clone_as_blinded(), - beacon_block_root: self.beacon_block_root, - pre_state: self - .pre_state - .as_ref() - .map_or_else(|| self.beacon_state.clone(), |pre_state| pre_state.clone()), - beacon_state_root, - } - } -} - -/// The information required for block production. -pub struct BlockProductionPreState { - /// This state may or may not have been advanced forward a single slot. - /// - /// See the documentation in the `crate::state_advance_timer` module for more information. - pub pre_state: BeaconState, - /// This value will only be `Some` if `self.pre_state` was **not** advanced forward a single - /// slot. - /// - /// This value can be used to avoid tree-hashing the state during the first call to - /// `per_slot_processing`. - pub state_root: Option, -} - -pub enum StateAdvance { - /// The cache does not contain the supplied block root. - BlockNotFound, - /// The cache contains the supplied block root but the state has already been advanced. - AlreadyAdvanced, - /// The cache contains the supplied block root and the state has not yet been advanced. - State { - state: Box>, - state_root: Hash256, - block_slot: Slot, - }, -} - -/// The item stored in the `SnapshotCache`. -pub struct CacheItem { - beacon_block: Arc>, - beacon_block_root: Hash256, - /// This state is equivalent to `self.beacon_block.state_root()`. - beacon_state: BeaconState, - /// This state is equivalent to `self.beacon_state` that has had `per_slot_processing` applied - /// to it. This state assists in optimizing block processing. - pre_state: Option>, -} - -impl Into> for CacheItem { - fn into(self) -> BeaconSnapshot { - BeaconSnapshot { - beacon_state: self.beacon_state, - beacon_block: self.beacon_block, - beacon_block_root: self.beacon_block_root, - } - } -} - -/// Provides a cache of `BeaconSnapshot` that is intended primarily for block processing. -/// -/// ## Cache Queuing -/// -/// The cache has a non-standard queue mechanism (specifically, it is not LRU). -/// -/// The cache has a max number of elements (`max_len`). Until `max_len` is achieved, all snapshots -/// are simply added to the queue. Once `max_len` is achieved, adding a new snapshot will cause an -/// existing snapshot to be ejected. The ejected snapshot will: -/// -/// - Never be the `head_block_root`. -/// - Be the snapshot with the lowest `state.slot` (ties broken arbitrarily). -pub struct SnapshotCache { - max_len: usize, - head_block_root: Hash256, - snapshots: Vec>, -} - -impl SnapshotCache { - /// Instantiate a new cache which contains the `head` snapshot. - /// - /// Setting `max_len = 0` is equivalent to setting `max_len = 1`. - pub fn new(max_len: usize, head: BeaconSnapshot) -> Self { - Self { - max_len: cmp::max(max_len, 1), - head_block_root: head.beacon_block_root, - snapshots: vec![CacheItem::new_without_pre_state(head)], - } - } - - /// The block roots of all snapshots contained in `self`. - pub fn beacon_block_roots(&self) -> Vec { - self.snapshots.iter().map(|s| s.beacon_block_root).collect() - } - - #[allow(clippy::len_without_is_empty)] - /// The number of snapshots contained in `self`. - pub fn len(&self) -> usize { - self.snapshots.len() - } - - /// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see - /// struct-level documentation for more info). - pub fn insert( - &mut self, - snapshot: BeaconSnapshot, - pre_state: Option>, - spec: &ChainSpec, - ) { - let parent_root = snapshot.beacon_block.message().parent_root(); - let item = CacheItem { - beacon_block: snapshot.beacon_block.clone(), - beacon_block_root: snapshot.beacon_block_root, - beacon_state: snapshot.beacon_state, - pre_state, - }; - - // Remove the grandparent of the block that was just inserted. - // - // Assuming it's unlikely to see re-orgs deeper than one block, this method helps keep the - // cache small by removing any states that already have more than one descendant. - // - // Remove the grandparent first to free up room in the cache. - let grandparent_result = - process_results(item.beacon_state.rev_iter_block_roots(spec), |iter| { - iter.map(|(_slot, root)| root) - .find(|root| *root != item.beacon_block_root && *root != parent_root) - }); - if let Ok(Some(grandparent_root)) = grandparent_result { - let head_block_root = self.head_block_root; - self.snapshots.retain(|snapshot| { - let root = snapshot.beacon_block_root; - root == head_block_root || root != grandparent_root - }); - } - - if self.snapshots.len() < self.max_len { - self.snapshots.push(item); - } else { - let insert_at = self - .snapshots - .iter() - .enumerate() - .filter_map(|(i, snapshot)| { - if snapshot.beacon_block_root != self.head_block_root { - Some((i, snapshot.beacon_state.slot())) - } else { - None - } - }) - .min_by_key(|(_i, slot)| *slot) - .map(|(i, _slot)| i); - - if let Some(i) = insert_at { - self.snapshots[i] = item; - } - } - } - - /// If available, returns a `CacheItem` that should be used for importing/processing a block. - /// The method will remove the block from `self`, carrying across any caches that may or may not - /// be built. - /// - /// In the event the block being processed was observed late, clone the cache instead of - /// moving it. This allows us to process the next block quickly in the case of a re-org. - /// Additionally, if the slot was skipped, clone the cache. This ensures blocks that are - /// later than 1 slot still have access to the cache and can be processed quickly. - pub fn get_state_for_block_processing( - &mut self, - block_root: Hash256, - block_slot: Slot, - block_delay: Option, - spec: &ChainSpec, - ) -> Option<(PreProcessingSnapshot, bool)> { - self.snapshots - .iter() - .position(|snapshot| snapshot.beacon_block_root == block_root) - .map(|i| { - if let Some(cache) = self.snapshots.get(i) { - // Avoid cloning the block during sync (when the `block_delay` is `None`). - if let Some(delay) = block_delay { - if delay >= minimum_block_delay_for_clone(spec.seconds_per_slot) - && delay <= Duration::from_secs(spec.seconds_per_slot) * 4 - || block_slot > cache.beacon_block.slot() + 1 - { - return (cache.clone_as_pre_state(), true); - } - } - } - (self.snapshots.remove(i).into_pre_state(), false) - }) - } - - /// If available, obtains a clone of a `BeaconState` that should be used for block production. - /// The clone will use `CloneConfig:all()`, ensuring any tree-hash cache is cloned too. - /// - /// ## Note - /// - /// This method clones the `BeaconState` (instead of removing it) since we assume that any block - /// we produce will soon be pushed to the `BeaconChain` for importing/processing. Keeping a copy - /// of that `BeaconState` in `self` will greatly help with import times. - pub fn get_state_for_block_production( - &self, - block_root: Hash256, - ) -> Option> { - self.snapshots - .iter() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| { - if let Some(pre_state) = &snapshot.pre_state { - BlockProductionPreState { - pre_state: pre_state.clone_with(CloneConfig::all()), - state_root: None, - } - } else { - BlockProductionPreState { - pre_state: snapshot.beacon_state.clone_with(CloneConfig::all()), - state_root: Some(snapshot.beacon_block.state_root()), - } - } - }) - } - - /// If there is a snapshot with `block_root`, clone it and return the clone. - pub fn get_cloned( - &self, - block_root: Hash256, - clone_config: CloneConfig, - ) -> Option> { - self.snapshots - .iter() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| snapshot.clone_to_snapshot_with(clone_config)) - } - - pub fn get_for_state_advance(&mut self, block_root: Hash256) -> StateAdvance { - if let Some(snapshot) = self - .snapshots - .iter_mut() - .find(|snapshot| snapshot.beacon_block_root == block_root) - { - if snapshot.pre_state.is_some() { - StateAdvance::AlreadyAdvanced - } else { - let cloned = snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()); - - StateAdvance::State { - state: Box::new(std::mem::replace(&mut snapshot.beacon_state, cloned)), - state_root: snapshot.beacon_block.state_root(), - block_slot: snapshot.beacon_block.slot(), - } - } - } else { - StateAdvance::BlockNotFound - } - } - - pub fn update_pre_state(&mut self, block_root: Hash256, state: BeaconState) -> Option<()> { - self.snapshots - .iter_mut() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| { - snapshot.pre_state = Some(state); - }) - } - - /// Removes all snapshots from the queue that are less than or equal to the finalized epoch. - pub fn prune(&mut self, finalized_epoch: Epoch) { - self.snapshots.retain(|snapshot| { - snapshot.beacon_state.slot() > finalized_epoch.start_slot(E::slots_per_epoch()) - }) - } - - /// Inform the cache that the head of the beacon chain has changed. - /// - /// The snapshot that matches this `head_block_root` will never be ejected from the cache - /// during `Self::insert`. - pub fn update_head(&mut self, head_block_root: Hash256) { - self.head_block_root = head_block_root - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use types::{test_utils::generate_deterministic_keypair, BeaconBlock, MainnetEthSpec}; - - fn get_harness() -> BeaconChainHarness> { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .default_spec() - .deterministic_keypairs(1) - .fresh_ephemeral_store() - .build(); - - harness.advance_slot(); - - harness - } - - const CACHE_SIZE: usize = 4; - - fn get_snapshot(i: u64) -> BeaconSnapshot { - let spec = MainnetEthSpec::default_spec(); - - let beacon_state = get_harness().chain.head_beacon_state_cloned(); - - let signed_beacon_block = SignedBeaconBlock::from_block( - BeaconBlock::empty(&spec), - generate_deterministic_keypair(0) - .sk - .sign(Hash256::from_low_u64_be(42)), - ); - - BeaconSnapshot { - beacon_state, - beacon_block: Arc::new(signed_beacon_block), - beacon_block_root: Hash256::from_low_u64_be(i), - } - } - - #[test] - fn insert_get_prune_update() { - let spec = MainnetEthSpec::default_spec(); - let mut cache = SnapshotCache::new(CACHE_SIZE, get_snapshot(0)); - - // Insert a bunch of entries in the cache. It should look like this: - // - // Index Root - // 0 0 <--head - // 1 1 - // 2 2 - // 3 3 - for i in 1..CACHE_SIZE as u64 { - let mut snapshot = get_snapshot(i); - - // Each snapshot should be one slot into an epoch, with each snapshot one epoch apart. - *snapshot.beacon_state.slot_mut() = - Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); - - cache.insert(snapshot, None, &spec); - - assert_eq!( - cache.snapshots.len(), - i as usize + 1, - "cache length should be as expected" - ); - assert_eq!(cache.head_block_root, Hash256::from_low_u64_be(0)); - } - - // Insert a new value in the cache. Afterwards it should look like: - // - // Index Root - // 0 0 <--head - // 1 42 - // 2 2 - // 3 3 - assert_eq!(cache.snapshots.len(), CACHE_SIZE); - cache.insert(get_snapshot(42), None, &spec); - assert_eq!(cache.snapshots.len(), CACHE_SIZE); - - assert!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(1), - Slot::new(0), - None, - &spec - ) - .is_none(), - "the snapshot with the lowest slot should have been removed during the insert function" - ); - assert!(cache - .get_cloned(Hash256::from_low_u64_be(1), CloneConfig::none()) - .is_none()); - - assert_eq!( - cache - .get_cloned(Hash256::from_low_u64_be(0), CloneConfig::none()) - .expect("the head should still be in the cache") - .beacon_block_root, - Hash256::from_low_u64_be(0), - "get_cloned should get the correct snapshot" - ); - assert_eq!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(0), - Slot::new(0), - None, - &spec - ) - .expect("the head should still be in the cache") - .0 - .beacon_block_root, - Hash256::from_low_u64_be(0), - "get_state_for_block_processing should get the correct snapshot" - ); - - assert_eq!( - cache.snapshots.len(), - CACHE_SIZE - 1, - "get_state_for_block_processing should shorten the cache" - ); - - // Prune the cache. Afterwards it should look like: - // - // Index Root - // 0 2 - // 1 3 - cache.prune(Epoch::new(2)); - - assert_eq!(cache.snapshots.len(), 2); - - cache.update_head(Hash256::from_low_u64_be(2)); - - // Over-fill the cache so it needs to eject some old values on insert. - for i in 0..CACHE_SIZE as u64 { - cache.insert(get_snapshot(u64::max_value() - i), None, &spec); - } - - // Ensure that the new head value was not removed from the cache. - assert_eq!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(2), - Slot::new(0), - None, - &spec - ) - .expect("the new head should still be in the cache") - .0 - .beacon_block_root, - Hash256::from_low_u64_be(2), - "get_state_for_block_processing should get the correct snapshot" - ); - } -} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8fbd5d575f..465aa70782 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -684,6 +684,7 @@ where .set_builder_url( SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), None, + None, ) .unwrap(); @@ -1881,6 +1882,7 @@ where block_root, RpcBlock::new(Some(block_root), block, sidecars).unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, || Ok(()), ) .await? @@ -1907,6 +1909,7 @@ where block_root, RpcBlock::new(Some(block_root), block, sidecars).unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, || Ok(()), ) .await? diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 98a112daff..9c196b12e1 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -473,6 +473,7 @@ async fn assert_invalid_signature( ) .unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await; @@ -541,6 +542,7 @@ async fn invalid_signature_gossip_block() { signed_block.canonical_root(), Arc::new(signed_block), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await, @@ -875,6 +877,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1165,6 +1168,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1196,6 +1200,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1342,6 +1347,7 @@ async fn add_base_block_to_altair_chain() { base_block.canonical_root(), Arc::new(base_block.clone()), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1477,6 +1483,7 @@ async fn add_altair_block_to_base_chain() { altair_block.canonical_root(), Arc::new(altair_block.clone()), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 0ef348319a..0c36d21f2e 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -702,6 +702,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), fork_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -802,6 +803,7 @@ async fn switches_heads() { fork_block.canonical_root(), fork_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1061,7 +1063,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) @@ -1352,6 +1354,7 @@ async fn build_optimistic_chain( block.canonical_root(), block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1926,6 +1929,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ba8a6bf701..5da92573f7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2458,6 +2458,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { full_block.canonical_root(), RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -2676,6 +2677,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { invalid_fork_block.canonical_root(), invalid_fork_block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -2689,6 +2691,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { valid_fork_block.canonical_root(), valid_fork_block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index e27180a002..2f496eecd7 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -12,7 +12,8 @@ use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use types::{ - BeaconState, BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot, + BeaconState, BeaconStateError, BlockImportSource, EthSpec, Hash256, Keypair, MinimalEthSpec, + RelativeEpoch, Slot, }; // Should ideally be divisible by 3. @@ -686,6 +687,7 @@ async fn run_skip_slot_test(skip_slots: u64) { harness_a.chain.head_snapshot().beacon_block_root, harness_a.get_head_block(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 6c49a28ec8..3373dd1c72 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -15,10 +15,7 @@ strum = { workspace = true } task_executor = { workspace = true } slot_clock = { workspace = true } lighthouse_network = { workspace = true } -hex = { workspace = true } -derivative = { workspace = true } types = { workspace = true } -ethereum_ssz = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index fee55b39ad..5bf13d82b7 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -60,7 +60,9 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use types::{Attestation, Hash256, SignedAggregateAndProof, SubnetId}; +use types::{ + Attestation, BeaconState, ChainSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SubnetId, +}; use types::{EthSpec, Slot}; use work_reprocessing_queue::IgnoredRpcBlock; use work_reprocessing_queue::{ @@ -85,123 +87,98 @@ const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. const DEFAULT_MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * DEFAULT_MAX_WORK_EVENT_QUEUE_LEN / 4; -/// The maximum number of queued `Attestation` objects that will be stored before we start dropping -/// them. -const MAX_UNAGGREGATED_ATTESTATION_QUEUE_LEN: usize = 16_384; +/// Over-provision queues based on active validator count by some factor. The beacon chain has +/// strict churns that prevent the validator set size from changing rapidly. By over-provisioning +/// slightly, we don't need to adjust the queues during the lifetime of a process. +const ACTIVE_VALIDATOR_COUNT_OVERPROVISION_PERCENT: usize = 110; -/// The maximum number of queued `Attestation` objects that will be stored before we start dropping -/// them. -const MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 8_192; +/// Maximum number of queued items that will be stored before dropping them +pub struct BeaconProcessorQueueLengths { + aggregate_queue: usize, + attestation_queue: usize, + unknown_block_aggregate_queue: usize, + unknown_block_attestation_queue: usize, + sync_message_queue: usize, + sync_contribution_queue: usize, + gossip_voluntary_exit_queue: usize, + gossip_proposer_slashing_queue: usize, + gossip_attester_slashing_queue: usize, + finality_update_queue: usize, + optimistic_update_queue: usize, + unknown_light_client_update_queue: usize, + rpc_block_queue: usize, + rpc_blob_queue: usize, + chain_segment_queue: usize, + backfill_chain_segment: usize, + gossip_block_queue: usize, + gossip_blob_queue: usize, + delayed_block_queue: usize, + status_queue: usize, + bbrange_queue: usize, + bbroots_queue: usize, + blbroots_queue: usize, + blbrange_queue: usize, + gossip_bls_to_execution_change_queue: usize, + lc_bootstrap_queue: usize, + lc_optimistic_update_queue: usize, + lc_finality_update_queue: usize, + api_request_p0_queue: usize, + api_request_p1_queue: usize, +} -/// The maximum number of queued `SignedAggregateAndProof` objects that will be stored before we -/// start dropping them. -const MAX_AGGREGATED_ATTESTATION_QUEUE_LEN: usize = 4_096; +impl BeaconProcessorQueueLengths { + pub fn from_state( + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + let active_validator_count = + match state.get_cached_active_validator_indices(RelativeEpoch::Current) { + Ok(indices) => indices.len(), + Err(_) => state + .get_active_validator_indices(state.current_epoch(), spec) + .map_err(|e| format!("Error computing active indices: {:?}", e))? + .len(), + }; + let active_validator_count = + (ACTIVE_VALIDATOR_COUNT_OVERPROVISION_PERCENT * active_validator_count) / 100; + let slots_per_epoch = E::slots_per_epoch() as usize; -/// The maximum number of queued `SignedAggregateAndProof` objects that will be stored before we -/// start dropping them. -const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `SignedBeaconBlock` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobSidecar` objects received on gossip that -/// will be stored before we start dropping them. -const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but -/// within acceptable clock disparity) that will be queued before we start dropping them. -const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `SignedVoluntaryExit` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_EXIT_QUEUE_LEN: usize = 4_096; - -/// The maximum number of queued `ProposerSlashing` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096; - -/// The maximum number of queued `AttesterSlashing` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096; - -/// The maximum number of queued `LightClientFinalityUpdate` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored -/// for reprocessing before we start dropping them. -const MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN: usize = 128; - -/// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping -/// them. -const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; - -/// The maximum number of queued `SignedContributionAndProof` objects that will be stored before we -/// start dropping them. -const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024; - -/// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobSidecar` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `Vec` objects received during syncing that will -/// be stored before we start dropping them. -const MAX_CHAIN_SEGMENT_QUEUE_LEN: usize = 64; - -/// The maximum number of queued `StatusMessage` objects received from the network RPC that will be -/// stored before we start dropping them. -const MAX_STATUS_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlocksByRangeRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobsByRangeRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1024; - -/// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobsByRootRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOBS_BY_ROOTS_QUEUE_LEN: usize = 1_024; - -/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them. -/// -/// This value is set high to accommodate the large spike that is expected immediately after Capella -/// is activated. -const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384; - -/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `LightClientOptimisticUpdateRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 512; - -/// The maximum number of queued `LightClientFinalityUpdateRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_LIGHT_CLIENT_FINALITY_UPDATE_QUEUE_LEN: usize = 512; - -/// The maximum number of priority-0 (highest priority) messages that will be queued before -/// they begin to be dropped. -const MAX_API_REQUEST_P0_QUEUE_LEN: usize = 1_024; - -/// The maximum number of priority-1 (second-highest priority) messages that will be queued before -/// they begin to be dropped. -const MAX_API_REQUEST_P1_QUEUE_LEN: usize = 1_024; + Ok(Self { + aggregate_queue: 4096, + unknown_block_aggregate_queue: 1024, + // Capacity for a full slot's worth of attestations if subscribed to all subnets + attestation_queue: active_validator_count / slots_per_epoch, + // Capacity for a full slot's worth of attestations if subscribed to all subnets + unknown_block_attestation_queue: active_validator_count / slots_per_epoch, + sync_message_queue: 2048, + sync_contribution_queue: 1024, + gossip_voluntary_exit_queue: 4096, + gossip_proposer_slashing_queue: 4096, + gossip_attester_slashing_queue: 4096, + finality_update_queue: 1024, + optimistic_update_queue: 1024, + unknown_light_client_update_queue: 128, + rpc_block_queue: 1024, + rpc_blob_queue: 1024, + chain_segment_queue: 64, + backfill_chain_segment: 64, + gossip_block_queue: 1024, + gossip_blob_queue: 1024, + delayed_block_queue: 1024, + status_queue: 1024, + bbrange_queue: 1024, + bbroots_queue: 1024, + blbroots_queue: 1024, + blbrange_queue: 1024, + gossip_bls_to_execution_change_queue: 16384, + lc_bootstrap_queue: 1024, + lc_optimistic_update_queue: 512, + lc_finality_update_queue: 512, + api_request_p0_queue: 1024, + api_request_p1_queue: 1024, + }) + } +} /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -772,6 +749,7 @@ impl BeaconProcessor { /// /// The optional `work_journal_tx` allows for an outside process to receive a log of all work /// events processed by `self`. This should only be used during testing. + #[allow(clippy::too_many_arguments)] pub fn spawn_manager( mut self, event_rx: mpsc::Receiver>, @@ -780,6 +758,7 @@ impl BeaconProcessor { work_journal_tx: Option>, slot_clock: S, maximum_gossip_clock_disparity: Duration, + queue_lengths: BeaconProcessorQueueLengths, ) -> Result<(), String> { // Used by workers to communicate that they are finished a task. let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); @@ -787,61 +766,61 @@ impl BeaconProcessor { // Using LIFO queues for attestations since validator profits rely upon getting fresh // attestations into blocks. Additionally, later attestations contain more information than // earlier ones, so we consider them more valuable. - let mut aggregate_queue = LifoQueue::new(MAX_AGGREGATED_ATTESTATION_QUEUE_LEN); + let mut aggregate_queue = LifoQueue::new(queue_lengths.aggregate_queue); let mut aggregate_debounce = TimeLatch::default(); - let mut attestation_queue = LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_QUEUE_LEN); + let mut attestation_queue = LifoQueue::new(queue_lengths.attestation_queue); let mut attestation_debounce = TimeLatch::default(); let mut unknown_block_aggregate_queue = - LifoQueue::new(MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); + LifoQueue::new(queue_lengths.unknown_block_aggregate_queue); let mut unknown_block_attestation_queue = - LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); + LifoQueue::new(queue_lengths.unknown_block_attestation_queue); - let mut sync_message_queue = LifoQueue::new(MAX_SYNC_MESSAGE_QUEUE_LEN); - let mut sync_contribution_queue = LifoQueue::new(MAX_SYNC_CONTRIBUTION_QUEUE_LEN); + let mut sync_message_queue = LifoQueue::new(queue_lengths.sync_message_queue); + let mut sync_contribution_queue = LifoQueue::new(queue_lengths.sync_contribution_queue); // Using a FIFO queue for voluntary exits since it prevents exit censoring. I don't have // a strong feeling about queue type for exits. - let mut gossip_voluntary_exit_queue = FifoQueue::new(MAX_GOSSIP_EXIT_QUEUE_LEN); + let mut gossip_voluntary_exit_queue = + FifoQueue::new(queue_lengths.gossip_voluntary_exit_queue); // Using a FIFO queue for slashing to prevent people from flushing their slashings from the // queues with lots of junk messages. let mut gossip_proposer_slashing_queue = - FifoQueue::new(MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN); + FifoQueue::new(queue_lengths.gossip_proposer_slashing_queue); let mut gossip_attester_slashing_queue = - FifoQueue::new(MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN); + FifoQueue::new(queue_lengths.gossip_attester_slashing_queue); // Using a FIFO queue for light client updates to maintain sequence order. - let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN); - let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN); + let mut finality_update_queue = FifoQueue::new(queue_lengths.finality_update_queue); + let mut optimistic_update_queue = FifoQueue::new(queue_lengths.optimistic_update_queue); let mut unknown_light_client_update_queue = - FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN); + FifoQueue::new(queue_lengths.unknown_light_client_update_queue); // Using a FIFO queue since blocks need to be imported sequentially. - let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); - let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN); - let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); - let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); - let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); - let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); - let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); + let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); + let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); + let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); + let mut backfill_chain_segment = FifoQueue::new(queue_lengths.backfill_chain_segment); + let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); + let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); + let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); - let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); - let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); - let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); - let mut blbroots_queue = FifoQueue::new(MAX_BLOBS_BY_ROOTS_QUEUE_LEN); - let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN); + let mut status_queue = FifoQueue::new(queue_lengths.status_queue); + let mut bbrange_queue = FifoQueue::new(queue_lengths.bbrange_queue); + let mut bbroots_queue = FifoQueue::new(queue_lengths.bbroots_queue); + let mut blbroots_queue = FifoQueue::new(queue_lengths.blbroots_queue); + let mut blbrange_queue = FifoQueue::new(queue_lengths.blbrange_queue); let mut gossip_bls_to_execution_change_queue = - FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); + FifoQueue::new(queue_lengths.gossip_bls_to_execution_change_queue); - let mut lc_bootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); + let mut lc_bootstrap_queue = FifoQueue::new(queue_lengths.lc_bootstrap_queue); let mut lc_optimistic_update_queue = - FifoQueue::new(MAX_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUEUE_LEN); - let mut lc_finality_update_queue = - FifoQueue::new(MAX_LIGHT_CLIENT_FINALITY_UPDATE_QUEUE_LEN); + FifoQueue::new(queue_lengths.lc_optimistic_update_queue); + let mut lc_finality_update_queue = FifoQueue::new(queue_lengths.lc_finality_update_queue); - let mut api_request_p0_queue = FifoQueue::new(MAX_API_REQUEST_P0_QUEUE_LEN); - let mut api_request_p1_queue = FifoQueue::new(MAX_API_REQUEST_P1_QUEUE_LEN); + let mut api_request_p0_queue = FifoQueue::new(queue_lengths.api_request_p0_queue); + let mut api_request_p1_queue = FifoQueue::new(queue_lengths.api_request_p1_queue); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 21b9b84133..c3658f45c7 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -9,5 +9,4 @@ reqwest = { workspace = true } sensitive_url = { workspace = true } eth2 = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 2b373292f3..91ee00a65f 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -29,10 +29,13 @@ pub struct Timeouts { get_builder_status: Duration, } -impl Default for Timeouts { - fn default() -> Self { +impl Timeouts { + fn new(get_header_timeout: Option) -> Self { + let get_header = + get_header_timeout.unwrap_or(Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS)); + Self { - get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + get_header, post_validators: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), post_blinded_blocks: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), get_builder_status: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), @@ -49,13 +52,17 @@ pub struct BuilderHttpClient { } impl BuilderHttpClient { - pub fn new(server: SensitiveUrl, user_agent: Option) -> Result { + pub fn new( + server: SensitiveUrl, + user_agent: Option, + builder_header_timeout: Option, + ) -> Result { let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); let client = reqwest::Client::builder().user_agent(&user_agent).build()?; Ok(Self { client, server, - timeouts: Timeouts::default(), + timeouts: Timeouts::new(builder_header_timeout), user_agent, }) } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 16c4a947a6..4ac035d17b 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -16,8 +16,6 @@ store = { workspace = true } network = { workspace = true } timer = { path = "../timer" } lighthouse_network = { workspace = true } -logging = { workspace = true } -parking_lot = { workspace = true } types = { workspace = true } eth2_config = { workspace = true } slot_clock = { workspace = true } @@ -44,6 +42,4 @@ slasher_service = { path = "../../slasher/service" } monitoring_api = { workspace = true } execution_layer = { workspace = true } beacon_processor = { workspace = true } -num_cpus = { workspace = true } ethereum_ssz = { workspace = true } -tree_hash = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 2af4e74c22..393ce35f00 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -19,8 +19,8 @@ use beacon_chain::{ store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; -use beacon_processor::BeaconProcessorConfig; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; +use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2::{ @@ -884,6 +884,14 @@ where None, beacon_chain.slot_clock.clone(), beacon_chain.spec.maximum_gossip_clock_disparity(), + BeaconProcessorQueueLengths::from_state( + &beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_state, + &beacon_chain.spec, + )?, )?; } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index a6fd07789d..0a2f24748d 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -434,11 +434,9 @@ async fn capella_readiness_logging( .canonical_head .cached_head() .snapshot - .beacon_block - .message() - .body() - .execution_payload() - .map_or(false, |payload| payload.withdrawals_root().is_ok()); + .beacon_state + .fork_name_unchecked() + >= ForkName::Capella; let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -496,11 +494,9 @@ async fn deneb_readiness_logging( .canonical_head .cached_head() .snapshot - .beacon_block - .message() - .body() - .execution_payload() - .map_or(false, |payload| payload.blob_gas_used().is_ok()); + .beacon_state + .fork_name_unchecked() + >= ForkName::Deneb; let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -549,17 +545,13 @@ async fn electra_readiness_logging( beacon_chain: &BeaconChain, log: &Logger, ) { - // TODO(electra): Once Electra has features, this code can be swapped back. - let electra_completed = false; - //let electra_completed = beacon_chain - // .canonical_head - // .cached_head() - // .snapshot - // .beacon_block - // .message() - // .body() - // .execution_payload() - // .map_or(false, |payload| payload.electra_placeholder().is_ok()); + let electra_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_state + .fork_name_unchecked() + >= ForkName::Electra; let has_execution_layer = beacon_chain.execution_layer.is_some(); diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 2f716cd19b..2ffca4a571 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -11,12 +11,9 @@ sloggers = { workspace = true } environment = { workspace = true } [dependencies] -reqwest = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } -serde_json = { workspace = true } serde = { workspace = true } -hex = { workspace = true } types = { workspace = true } merkle_proof = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 31082394ba..d68a8b6f28 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -23,7 +23,7 @@ use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; /// Indicates the default eth1 chain id we use for the deposit contract. -pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; +pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Mainnet; /// Indicates the default eth1 endpoint. pub const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545"; @@ -266,7 +266,7 @@ pub struct Config { pub endpoint: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, - /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). + /// The eth1 chain id where the deposit contract is deployed (Holesky/Mainnet). pub chain_id: Eth1Id, /// Defines the first block that the `DepositCache` will start searching for deposit logs. /// @@ -450,11 +450,6 @@ impl Service { /// Returns the follow distance that has been shortened to accommodate for differences in the /// spacing between blocks. - /// - /// ## Notes - /// - /// This is useful since the spec declares `SECONDS_PER_ETH1_BLOCK` to be `14`, whilst it is - /// actually `15` on Goerli. pub fn cache_follow_distance(&self) -> u64 { self.config().cache_follow_distance() } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 28cd16e4ef..ff147ad3b4 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -8,9 +8,7 @@ edition = { workspace = true } [dependencies] types = { workspace = true } tokio = { workspace = true } -async-trait = "0.1.51" slog = { workspace = true } -futures = { workspace = true } sensitive_url = { workspace = true } reqwest = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index f3f059a435..ce1e0fec5d 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -632,6 +632,9 @@ impl ExecutionPayloadBodyV1 { withdrawals, blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, + // TODO(electra) + deposit_receipts: <_>::default(), + withdrawal_requests: <_>::default(), })) } else { Err(format!( diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 93705a1692..9c8a91909c 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -238,7 +238,6 @@ pub mod deposit_methods { /// Represents an eth1 chain/network id. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub enum Eth1Id { - Goerli, Mainnet, Custom(u64), } @@ -262,7 +261,6 @@ pub mod deposit_methods { fn into(self) -> u64 { match self { Eth1Id::Mainnet => 1, - Eth1Id::Goerli => 5, Eth1Id::Custom(id) => id, } } @@ -273,7 +271,6 @@ pub mod deposit_methods { let into = |x: Eth1Id| -> u64 { x.into() }; match id { id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, - id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, id => Eth1Id::Custom(id), } } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 5c4d6ab1ac..306972ada2 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -319,6 +319,9 @@ impl From> for ExecutionPayloadElectra .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + // TODO(electra) + deposit_receipts: Default::default(), + withdrawal_requests: Default::default(), } } } @@ -752,6 +755,9 @@ pub mod serde_logs_bloom { #[serde(rename_all = "camelCase")] pub struct JsonClientVersionV1 { pub code: String, + // This `default` is required until Geth v1.13.x is no longer supported on mainnet. + // See: https://github.com/ethereum/go-ethereum/pull/29351 + #[serde(default)] pub name: String, pub version: String, pub commit: String, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 668a5ce84b..f8806bcd32 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -370,6 +370,9 @@ pub struct Config { pub execution_endpoint: Option, /// Endpoint urls for services providing the builder api. pub builder_url: Option, + /// The timeout value used when making a request to fetch a block header + /// from the builder api. + pub builder_header_timeout: Option, /// User agent to send with requests to the builder API. pub builder_user_agent: Option, /// JWT secret for the above endpoint running the engine api. @@ -400,6 +403,7 @@ impl ExecutionLayer { execution_endpoint: url, builder_url, builder_user_agent, + builder_header_timeout, secret_file, suggested_fee_recipient, jwt_id, @@ -469,7 +473,7 @@ impl ExecutionLayer { }; if let Some(builder_url) = builder_url { - el.set_builder_url(builder_url, builder_user_agent)?; + el.set_builder_url(builder_url, builder_user_agent, builder_header_timeout)?; } Ok(el) @@ -491,9 +495,14 @@ impl ExecutionLayer { &self, builder_url: SensitiveUrl, builder_user_agent: Option, + builder_header_timeout: Option, ) -> Result<(), Error> { - let builder_client = BuilderHttpClient::new(builder_url.clone(), builder_user_agent) - .map_err(Error::Builder)?; + let builder_client = BuilderHttpClient::new( + builder_url.clone(), + builder_user_agent, + builder_header_timeout, + ) + .map_err(Error::Builder)?; info!( self.log(), "Using external block builder"; @@ -2003,6 +2012,11 @@ impl ExecutionLayer { withdrawals, blob_gas_used: electra_block.blob_gas_used, excess_blob_gas: electra_block.excess_blob_gas, + // TODO(electra) + // deposit_receipts: electra_block.deposit_receipts, + // withdrawal_requests: electra_block.withdrawal_requests, + deposit_receipts: <_>::default(), + withdrawal_requests: <_>::default(), }) } }; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 77e12bcef6..e80c6b2370 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -659,6 +659,8 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().into(), blob_gas_used: 0, excess_blob_gas: 0, + deposit_receipts: vec![].into(), + withdrawal_requests: vec![].into(), }), _ => unreachable!(), }, diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index f99fcb55bf..1252e0100b 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -1,7 +1,3 @@ -//! NOTE: These tests will not pass unless an anvil is running on `ENDPOINT` (see below). -//! -//! You can start a suitable instance using the `anvil_test_node.sh` script in the `scripts` -//! dir in the root of the `lighthouse` repo. #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5f4620589e..02db6b6a05 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2121,14 +2121,7 @@ pub fn serve( task_spawner: TaskSpawner, eth1_service: eth1::Service| { task_spawner.blocking_response_task(Priority::P1, move || match accept_header { - Some(api_types::Accept::Json) | None => { - let snapshot = eth1_service.get_deposit_snapshot(); - Ok( - warp::reply::json(&api_types::GenericResponse::from(snapshot)) - .into_response(), - ) - } - _ => eth1_service + Some(api_types::Accept::Ssz) => eth1_service .get_deposit_snapshot() .map(|snapshot| { Response::builder() @@ -2154,6 +2147,13 @@ pub fn serve( )) }) }), + _ => { + let snapshot = eth1_service.get_deposit_snapshot(); + Ok( + warp::reply::json(&api_types::GenericResponse::from(snapshot)) + .into_response(), + ) + } }) }, ); @@ -4358,6 +4358,15 @@ pub fn serve( api_types::EventTopic::BlockReward => { event_handler.subscribe_block_reward() } + api_types::EventTopic::AttesterSlashing => { + event_handler.subscribe_attester_slashing() + } + api_types::EventTopic::ProposerSlashing => { + event_handler.subscribe_proposer_slashing() + } + api_types::EventTopic::BlsToExecutionChange => { + event_handler.subscribe_bls_to_execution_change() + } }; receivers.push( diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 26ee183c83..3eada3a3d4 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -31,7 +31,13 @@ lazy_static::lazy_static! { ); pub static ref HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: Result = try_create_histogram_vec( "http_api_block_broadcast_delay_times", - "Time between start of the slot and when the block was broadcast", + "Time between start of the slot and when the block completed broadcast and processing", + &["provenance"] + ); + pub static ref HTTP_API_BLOCK_GOSSIP_TIMES: Result = try_create_histogram_vec_with_buckets( + "http_api_block_gossip_times", + "Time between receiving the block on HTTP and publishing it on gossip", + decimal_buckets(-2, 2), &["provenance"] ); pub static ref HTTP_API_BLOCK_PUBLISHED_LATE_TOTAL: Result = try_create_int_counter( diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 0d176e6a53..10d000ef6f 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -19,8 +19,8 @@ use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlobSidecarList, EthSpec, ExecPayload, ExecutionBlockHash, - ForkName, FullPayload, FullPayloadBellatrix, Hash256, SignedBeaconBlock, + AbstractExecPayload, BeaconBlockRef, BlobSidecarList, BlockImportSource, EthSpec, ExecPayload, + ExecutionBlockHash, ForkName, FullPayload, FullPayloadBellatrix, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, VariableList, }; use warp::http::StatusCode; @@ -60,6 +60,11 @@ pub async fn publish_block (block_contents, true), ProvenancedBlock::Builder(block_contents, _) => (block_contents, false), }; + let provenance = if is_locally_built_block { + "local" + } else { + "builder" + }; let block = block_contents.inner_block().clone(); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); @@ -75,7 +80,18 @@ pub async fn publish_block block.slot(), "publish_delay" => ?publish_delay); + metrics::observe_timer_vec( + &metrics::HTTP_API_BLOCK_GOSSIP_TIMES, + &[provenance], + publish_delay, + ); + + info!( + log, + "Signed block published to network via HTTP API"; + "slot" => block.slot(), + "publish_delay_ms" => publish_delay.as_millis() + ); match block.as_ref() { SignedBeaconBlock::Base(_) @@ -214,6 +230,7 @@ pub async fn publish_block( None, chain.slot_clock.clone(), chain.spec.maximum_gossip_clock_disparity(), + BeaconProcessorQueueLengths::from_state( + &chain.canonical_head.cached_head().snapshot.beacon_state, + &chain.spec, + ) + .unwrap(), ) .unwrap(); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d44b9a688c..2828b15a93 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -70,6 +70,7 @@ struct ApiTester { attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, + bls_to_execution_change: SignedBlsToExecutionChange, network_rx: NetworkReceivers, local_enr: Enr, external_peer_id: PeerId, @@ -128,6 +129,7 @@ impl ApiTester { }) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer_with_config() .build(); @@ -223,6 +225,7 @@ impl ApiTester { let attester_slashing = harness.make_attester_slashing(vec![0, 1]); let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); @@ -289,6 +292,7 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, + bls_to_execution_change, network_rx, local_enr, external_peer_id, @@ -301,6 +305,7 @@ impl ApiTester { BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .build(), ); @@ -336,6 +341,7 @@ impl ApiTester { let attester_slashing = harness.make_attester_slashing(vec![0, 1]); let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); @@ -373,6 +379,7 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, + bls_to_execution_change, network_rx, local_enr, external_peer_id, @@ -5214,6 +5221,9 @@ impl ApiTester { EventTopic::Block, EventTopic::Head, EventTopic::FinalizedCheckpoint, + EventTopic::AttesterSlashing, + EventTopic::ProposerSlashing, + EventTopic::BlsToExecutionChange, ]; let mut events_future = self .client @@ -5256,6 +5266,20 @@ impl ApiTester { &[EventKind::VoluntaryExit(self.voluntary_exit.clone())] ); + // Produce a BLS to execution change event + self.client + .post_beacon_pool_bls_to_execution_changes(&[self.bls_to_execution_change.clone()]) + .await + .unwrap(); + + let bls_events = poll_events(&mut events_future, 1, Duration::from_millis(10000)).await; + assert_eq!( + bls_events.as_slice(), + &[EventKind::BlsToExecutionChange(Box::new( + self.bls_to_execution_change.clone() + ))] + ); + // Submit the next block, which is on an epoch boundary, so this will produce a finalized // checkpoint event, head event, and block event let block_root = self.next_block.signed_block().canonical_root(); @@ -5353,6 +5377,42 @@ impl ApiTester { .await; assert_eq!(reorg_event.as_slice(), &[expected_reorg]); + // Test attester slashing event + let mut attester_slashing_event_future = self + .client + .get_events::(&[EventTopic::AttesterSlashing]) + .await + .unwrap(); + + self.harness.add_attester_slashing(vec![1, 2, 3]).unwrap(); + + let attester_slashing_event = poll_events( + &mut attester_slashing_event_future, + 1, + Duration::from_millis(10000), + ) + .await; + + assert!(attester_slashing_event.len() == 1); + + // Test proposer slashing event + let mut proposer_slashing_event_future = self + .client + .get_events::(&[EventTopic::ProposerSlashing]) + .await + .unwrap(); + + self.harness.add_proposer_slashing(1).unwrap(); + + let proposer_slashing_event = poll_events( + &mut proposer_slashing_event_future, + 1, + Duration::from_millis(10000), + ) + .await; + + assert!(proposer_slashing_event.len() == 1); + self } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 1617c0bd6c..b318bd4fb3 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -13,8 +13,6 @@ types = { workspace = true } serde = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } slog = { workspace = true } lighthouse_version = { workspace = true } tokio = { workspace = true } @@ -43,18 +41,11 @@ superstruct = { workspace = true } prometheus-client = "0.22.0" unused_port = { workspace = true } delay_map = { workspace = true } -tracing = { workspace = true } -byteorder = { workspace = true } bytes = { workspace = true } either = { workspace = true } # Local dependencies -futures-ticker = "0.0.3" -getrandom = "0.2.11" -hex_fmt = "0.3.0" -instant = "0.1.12" void = "1.0.2" -base64 = "0.21.5" libp2p-mplex = "0.41" [dependencies.libp2p] @@ -72,4 +63,3 @@ async-channel = { workspace = true } [features] libp2p-websocket = [] - diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 871955c059..d8fa445e63 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -10,7 +10,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] -wasm-bindgen = ["getrandom/js", "instant/wasm-bindgen"] +wasm-bindgen = ["getrandom/js"] [dependencies] async-channel = { workspace = true } @@ -25,7 +25,6 @@ futures-ticker = "0.0.3" futures-timer = "3.0.2" getrandom = "0.2.12" hex_fmt = "0.3.0" -instant = "0.1.12" libp2p = { version = "0.53", default-features = false } quick-protobuf = "0.8" quick-protobuf-codec = "0.3" @@ -33,11 +32,10 @@ rand = "0.8" regex = "1.10.3" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" -smallvec = "1.13.1" tracing = "0.1.37" void = "1.0.2" - prometheus-client = "0.22.0" +web-time = "1.1.0" [dev-dependencies] quickcheck = { workspace = true } diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index 2567a3691e..f83a24baaf 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -20,13 +20,13 @@ //! Data structure for efficiently storing known back-off's when pruning peers. use crate::topic::TopicHash; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::{ hash_map::{Entry, HashMap}, HashSet, }; use std::time::Duration; +use web_time::Instant; #[derive(Copy, Clone)] struct HeartbeatIndex(usize); diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index ce0437342e..ccebb4e267 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -34,7 +34,6 @@ use futures_ticker::Ticker; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; -use instant::Instant; use libp2p::core::{multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, Endpoint, Multiaddr}; use libp2p::identity::Keypair; use libp2p::identity::PeerId; @@ -44,6 +43,7 @@ use libp2p::swarm::{ ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; +use web_time::{Instant, SystemTime}; use super::gossip_promises::GossipPromises; use super::handler::{Handler, HandlerEvent, HandlerIn}; @@ -67,7 +67,6 @@ use super::{ types::RpcOut, }; use super::{PublishError, SubscriptionError, TopicScoreParams, ValidationError}; -use instant::SystemTime; use quick_protobuf::{MessageWrite, Writer}; use std::{cmp::Ordering::Equal, fmt::Debug}; diff --git a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs index 43ca178556..2bfb20595a 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs @@ -21,9 +21,9 @@ use super::peer_score::RejectReason; use super::MessageId; use super::ValidationError; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::HashMap; +use web_time::Instant; /// Tracks recently sent `IWANT` messages and checks if peers respond to them. #[derive(Default)] diff --git a/beacon_node/lighthouse_network/gossipsub/src/handler.rs b/beacon_node/lighthouse_network/gossipsub/src/handler.rs index 298570955f..359bf8da42 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/handler.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/handler.rs @@ -26,7 +26,6 @@ use asynchronous_codec::Framed; use futures::future::Either; use futures::prelude::*; use futures::StreamExt; -use instant::Instant; use libp2p::core::upgrade::DeniedUpgrade; use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, @@ -37,6 +36,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; +use web_time::Instant; /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. diff --git a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs index 4d609434f1..fa02f06f69 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs @@ -24,11 +24,11 @@ use super::metrics::{Metrics, Penalty}; use super::time_cache::TimeCache; use super::{MessageId, TopicHash}; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::{hash_map, HashMap, HashSet}; use std::net::IpAddr; use std::time::Duration; +use web_time::Instant; mod params; use super::ValidationError; diff --git a/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs b/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs index 89fd4afee0..a3e5c01ac4 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs @@ -21,13 +21,13 @@ //! This implements a time-based LRU cache for checking gossipsub message duplicates. use fnv::FnvHashMap; -use instant::Instant; use std::collections::hash_map::{ self, Entry::{Occupied, Vacant}, }; use std::collections::VecDeque; use std::time::Duration; +use web_time::Instant; struct ExpiringElement { /// The element that expires diff --git a/beacon_node/lighthouse_network/gossipsub/src/types.rs b/beacon_node/lighthouse_network/gossipsub/src/types.rs index 712698b42a..84bdfb786f 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/types.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/types.rs @@ -25,7 +25,6 @@ use async_channel::{Receiver, Sender}; use futures::stream::Peekable; use futures::{Future, Stream, StreamExt}; use futures_timer::Delay; -use instant::Duration; use libp2p::identity::PeerId; use libp2p::swarm::ConnectionId; use prometheus_client::encoding::EncodeLabelValue; @@ -36,6 +35,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::{fmt, pin::Pin}; +use web_time::Duration; use crate::rpc_proto::proto; #[cfg(feature = "serde")] diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 8cc2ea86c0..5c937a1e0b 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -398,16 +398,32 @@ impl Discovery { /// automatically update the external address. /// /// If the external address needs to be modified, use `update_enr_udp_socket. - pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> { + /// + /// This returns Ok(true) if the ENR was updated, otherwise Ok(false) if nothing was done. + pub fn update_enr_tcp_port(&mut self, port: u16, v6: bool) -> Result { + let enr_field = if v6 { + if self.discv5.external_enr().read().tcp6() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "tcp6" + } else { + if self.discv5.external_enr().read().tcp4() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "tcp" + }; + self.discv5 - .enr_insert("tcp", &port) + .enr_insert(enr_field, &port) .map_err(|e| format!("{:?}", e))?; // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); - Ok(()) + Ok(true) } // TODO: Group these functions here once the ENR is shared across discv5 and lighthouse and @@ -415,16 +431,35 @@ impl Discovery { // This currently doesn't support ipv6. All of these functions should be removed and // addressed properly in the following issue. // https://github.com/sigp/lighthouse/issues/4706 - pub fn update_enr_quic_port(&mut self, port: u16) -> Result<(), String> { + pub fn update_enr_quic_port(&mut self, port: u16, v6: bool) -> Result { + let enr_field = if v6 { + if self.discv5.external_enr().read().quic6() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "quic6" + } else { + if self.discv5.external_enr().read().quic4() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "quic" + }; + let current_field = self.discv5.external_enr().read().quic4(); + if current_field == Some(port) { + // The current field is already set, no need to update. + return Ok(false); + } + self.discv5 - .enr_insert("quic", &port) + .enr_insert(enr_field, &port) .map_err(|e| format!("{:?}", e))?; // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); - Ok(()) + Ok(true) } /// Updates the local ENR UDP socket. @@ -1057,7 +1092,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_tcp_port(port) + self.update_enr_tcp_port(port, false) } (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { if !self.update_ports.quic4 { @@ -1065,7 +1100,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_quic_port(port) + self.update_enr_quic_port(port, false) } _ => { debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); @@ -1079,7 +1114,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_tcp_port(port) + self.update_enr_tcp_port(port, true) } (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { if !self.update_ports.quic6 { @@ -1087,7 +1122,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_quic_port(port) + self.update_enr_quic_port(port, true) } _ => { debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); @@ -1103,9 +1138,10 @@ impl NetworkBehaviour for Discovery { let local_enr: Enr = self.discv5.local_enr(); match attempt_enr_update { - Ok(_) => { + Ok(true) => { info!(self.log, "Updated local ENR"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()) } + Ok(false) => {} // Nothing to do, ENR already configured Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index df5bbba99c..daf95fb8c9 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -352,6 +352,31 @@ where !matches!(self.state, HandlerState::Deactivated) } + // NOTE: This function gets polled to completion upon a connection close. + fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { + // Inform the network behaviour of any failed requests + + while let Some(substream_id) = self.outbound_substreams.keys().next().cloned() { + let outbound_info = self + .outbound_substreams + .remove(&substream_id) + .expect("The value must exist for a key"); + // If the state of the connection is closing, we do not need to report this case to + // the behaviour, as the connection has just closed non-gracefully + if matches!(outbound_info.state, OutboundSubstreamState::Closing(_)) { + continue; + } + + // Register this request as an RPC Error + return Poll::Ready(Some(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto: outbound_info.proto, + id: outbound_info.req_id, + }))); + } + Poll::Ready(None) + } + fn poll( &mut self, cx: &mut Context<'_>, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index f91a5b471a..86086feda3 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -972,6 +972,12 @@ impl Network { .goodbye_peer(peer_id, reason, source); } + /// Hard (ungraceful) disconnect for testing purposes only + /// Use goodbye_peer for disconnections, do not use this function. + pub fn __hard_disconnect_testing_only(&mut self, peer_id: PeerId) { + let _ = self.swarm.disconnect_peer_id(peer_id); + } + /// Returns an iterator over all enr entries in the DHT. pub fn enr_entries(&self) -> Vec { self.discovery().table_entries_enr() @@ -1373,12 +1379,25 @@ impl Network { let peer_id = event.peer_id; if !self.peer_manager().is_connected(&peer_id) { - debug!( - self.log, - "Ignoring rpc message of disconnecting peer"; - event - ); - return None; + // Sync expects a RPCError::Disconnected to drop associated lookups with this peer. + // Silencing this event breaks the API contract with RPC where every request ends with + // - A stream termination event, or + // - An RPCError event + return if let HandlerEvent::Err(HandlerErr::Outbound { + id: RequestId::Application(id), + error, + .. + }) = event.event + { + Some(NetworkEvent::RPCFailed { peer_id, id, error }) + } else { + debug!( + self.log, + "Ignoring rpc message of disconnecting peer"; + event + ); + None + }; } let handler_id = event.conn_id; @@ -1681,12 +1700,16 @@ impl Network { libp2p::upnp::Event::NewExternalAddr(addr) => { info!(self.log, "UPnP route established"; "addr" => %addr); let mut iter = addr.iter(); - // Skip Ip address. - iter.next(); + let is_ip6 = { + let addr = iter.next(); + matches!(addr, Some(MProtocol::Ip6(_))) + }; match iter.next() { Some(multiaddr::Protocol::Udp(udp_port)) => match iter.next() { Some(multiaddr::Protocol::QuicV1) => { - if let Err(e) = self.discovery_mut().update_enr_quic_port(udp_port) { + if let Err(e) = + self.discovery_mut().update_enr_quic_port(udp_port, is_ip6) + { warn!(self.log, "Failed to update ENR"; "error" => e); } } @@ -1695,7 +1718,7 @@ impl Network { } }, Some(multiaddr::Protocol::Tcp(tcp_port)) => { - if let Err(e) = self.discovery_mut().update_enr_tcp_port(tcp_port) { + if let Err(e) = self.discovery_mut().update_enr_tcp_port(tcp_port, is_ip6) { warn!(self.log, "Failed to update ENR"; "error" => e); } } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index a60af4db3d..e2b72f8673 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,7 +3,7 @@ mod common; use common::Protocol; -use lighthouse_network::rpc::methods::*; +use lighthouse_network::rpc::{methods::*, RPCError}; use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; use slog::{debug, warn, Level}; use ssz::Encode; @@ -996,6 +996,96 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { }) } +#[test] +fn test_disconnect_triggers_rpc_error() { + // set up the logging. The level and enabled logging or not + let log_level = Level::Debug; + let enable_logging = false; + + let log = common::build_log(log_level, enable_logging); + let spec = E::default_spec(); + + let rt = Arc::new(Runtime::new().unwrap()); + // get sender/receiver + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + &log, + ForkName::Base, + &spec, + Protocol::Tcp, + ) + .await; + + // BlocksByRoot Request + let rpc_request = Request::BlocksByRoot(BlocksByRootRequest::new( + // Must have at least one root for the request to create a stream + vec![Hash256::from_low_u64_be(0)], + &spec, + )); + + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.send_request(peer_id, 42, rpc_request.clone()); + } + NetworkEvent::RPCFailed { error, id: 42, .. } => match error { + RPCError::Disconnected => return, + other => panic!("received unexpected error {:?}", other), + }, + other => { + warn!(log, "Ignoring other event {:?}", other); + } + } + } + }; + + // determine messages to send (PeerId, RequestId). If some, indicates we still need to send + // messages + let mut sending_peer = None; + let receiver_future = async { + loop { + // this future either drives the sending/receiving or times out allowing messages to be + // sent in the timeout + match futures::future::select( + Box::pin(receiver.next_event()), + Box::pin(tokio::time::sleep(Duration::from_secs(1))), + ) + .await + { + futures::future::Either::Left((ev, _)) => match ev { + NetworkEvent::RequestReceived { peer_id, .. } => { + sending_peer = Some(peer_id); + } + other => { + warn!(log, "Ignoring other event {:?}", other); + } + }, + futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required + } + + // if we need to send messages send them here. This will happen after a delay + if let Some(peer_id) = sending_peer.take() { + warn!(log, "Receiver got request, disconnecting peer"); + receiver.__hard_disconnect_testing_only(peer_id); + } + } + }; + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + /// Establishes a pair of nodes and disconnects the pair based on the selected protocol via an RPC /// Goodbye message. fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) { diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 406015360e..0ad7f53ee7 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -39,19 +39,14 @@ logging = { workspace = true } task_executor = { workspace = true } igd-next = "0.14" itertools = { workspace = true } -num_cpus = { workspace = true } lru_cache = { workspace = true } -lru = { workspace = true } strum = { workspace = true } -tokio-util = { workspace = true } derivative = { workspace = true } delay_map = { workspace = true } -ethereum-types = { workspace = true } operation_pool = { workspace = true } execution_layer = { workspace = true } beacon_processor = { workspace = true } parking_lot = { workspace = true } -environment = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index d3804fbed8..bf4cbd09ab 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -244,6 +244,23 @@ lazy_static! { "sync_parent_block_lookups", "Number of parent block lookups underway" ); + pub static ref SYNC_LOOKUP_CREATED: Result = try_create_int_counter( + "sync_lookups_created_total", + "Total count of sync lookups created", + ); + pub static ref SYNC_LOOKUP_DROPPED: Result = try_create_int_counter_vec( + "sync_lookups_dropped_total", + "Total count of sync lookups dropped by reason", + &["reason"] + ); + pub static ref SYNC_LOOKUP_COMPLETED: Result = try_create_int_counter( + "sync_lookups_completed_total", + "Total count of sync lookups completed", + ); + pub static ref SYNC_LOOKUPS_STUCK: Result = try_create_int_counter( + "sync_lookups_stuck_total", + "Total count of sync lookups that are stuck and dropped", + ); /* * Block Delay Metrics diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 7b8826bd85..374dca2a5a 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -31,8 +31,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, BlobSidecar, EthSpec, Hash256, IndexedAttestation, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + beacon_block::BlockImportSource, Attestation, AttesterSlashing, BlobSidecar, EthSpec, Hash256, + IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, @@ -753,7 +753,9 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - match self.chain.process_gossip_blob(verified_blob).await { + let result = self.chain.process_gossip_blob(verified_blob).await; + + match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { // Note: Reusing block imported metric here metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -778,26 +780,40 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } + Err(BlockError::BlockIsAlreadyKnown(_)) => { + debug!( + self.log, + "Ignoring gossip blob already imported"; + "block_root" => ?block_root, + "blob_index" => blob_index, + ); + } Err(err) => { debug!( self.log, "Invalid gossip blob"; "outcome" => ?err, - "block root" => ?block_root, - "block slot" => blob_slot, - "blob index" => blob_index, + "block_root" => ?block_root, + "block_slot" => blob_slot, + "blob_index" => blob_index, ); self.gossip_penalize_peer( peer_id, PeerAction::MidToleranceError, "bad_gossip_blob_ssz", ); - trace!( - self.log, - "Invalid gossip blob ssz"; - ); } } + + // If a block is in the da_checker, sync maybe awaiting for an event when block is finally + // imported. A block can become imported both after processing a block or blob. If a + // importing a block results in `Imported`, notify. Do not notify of blob errors. + if matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))) { + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }); + } } /// Process the beacon block received from the gossip network and: @@ -1137,9 +1153,16 @@ impl NetworkBeaconProcessor { let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; + // TODO(block source) + let result = self .chain - .process_block_with_early_caching(block_root, verified_block, NotifyExecutionLayer::Yes) + .process_block_with_early_caching( + block_root, + verified_block, + BlockImportSource::Gossip, + NotifyExecutionLayer::Yes, + ) .await; match &result { @@ -1183,19 +1206,18 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } - Err(BlockError::ParentUnknown(block)) => { - // Inform the sync manager to find parents for this block - // This should not occur. It should be checked by `should_forward_block` + Err(BlockError::ParentUnknown(_)) => { + // This should not occur. It should be checked by `should_forward_block`. + // Do not send sync message UnknownParentBlock to prevent conflicts with the + // BlockComponentProcessed message below. If this error ever happens, lookup sync + // can recover by receiving another block / blob / attestation referencing the + // chain that includes this block. error!( self.log, "Block with unknown parent attempted to be processed"; + "block_root" => %block_root, "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownParentBlock( - peer_id, - block.clone(), - block_root, - )); } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( @@ -1259,6 +1281,11 @@ impl NetworkBeaconProcessor { &self.log, ); } + + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))), + }); } pub fn process_gossip_voluntary_exit( diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f10646c741..cabe39f929 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,7 +1,5 @@ -use crate::{ - service::NetworkMessage, - sync::{manager::BlockProcessType, SyncMessage}, -}; +use crate::sync::manager::BlockProcessType; +use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 887974c6e0..acd02ab6ad 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -24,6 +24,7 @@ use store::KzgCommitment; use tokio::sync::mpsc; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; +use types::BlockImportSource; use types::{Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. @@ -33,8 +34,6 @@ pub enum ChainSegmentProcessId { RangeBatchId(ChainId, Epoch), /// Processing ID for a backfill syncing batch. BackSyncBatchId(Epoch), - /// Processing Id of the parent lookup of a block. - ParentLookup(Hash256), } /// Returned when a chain segment import fails. @@ -155,7 +154,12 @@ impl NetworkBeaconProcessor { let result = self .chain - .process_block_with_early_caching(block_root, block, NotifyExecutionLayer::Yes) + .process_block_with_early_caching( + block_root, + block, + BlockImportSource::Lookup, + NotifyExecutionLayer::Yes, + ) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -172,17 +176,15 @@ impl NetworkBeaconProcessor { if reprocess_tx.try_send(reprocess_msg).is_err() { error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) }; - if matches!(process_type, BlockProcessType::SingleBlock { .. }) { - self.chain.block_times_cache.write().set_time_observed( - hash, - slot, - seen_timestamp, - None, - None, - ); + self.chain.block_times_cache.write().set_time_observed( + hash, + slot, + seen_timestamp, + None, + None, + ); - self.chain.recompute_head_at_current_slot().await; - } + self.chain.recompute_head_at_current_slot().await; } // Sync handles these results self.send_sync_message(SyncMessage::BlockComponentProcessed { @@ -396,41 +398,6 @@ impl NetworkBeaconProcessor { } } } - // this is a parent lookup request from the sync manager - ChainSegmentProcessId::ParentLookup(chain_head) => { - debug!( - self.log, "Processing parent lookup"; - "chain_hash" => %chain_head, - "blocks" => downloaded_blocks.len() - ); - // parent blocks are ordered from highest slot to lowest, so we need to process in - // reverse - match self - .process_blocks(downloaded_blocks.iter().rev(), notify_execution_layer) - .await - { - (imported_blocks, Err(e)) => { - debug!(self.log, "Parent lookup failed"; "error" => %e.message); - match e.peer_action { - Some(penalty) => BatchProcessResult::FaultyFailure { - imported_blocks: imported_blocks > 0, - penalty, - }, - None => BatchProcessResult::NonFaultyFailure, - } - } - (imported_blocks, Ok(_)) => { - debug!( - self.log, "Parent lookup processed successfully"; - "chain_hash" => %chain_head, - "imported_blocks" => imported_blocks - ); - BatchProcessResult::Success { - was_non_empty: imported_blocks > 0, - } - } - } - } }; self.send_sync_message(SyncMessage::BatchProcessed { sync_type, result }); diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index dd58eb8355..06b12c14ae 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -239,6 +239,11 @@ impl TestRig { Some(work_journal_tx), harness.chain.slot_clock.clone(), chain.spec.maximum_gossip_clock_disparity(), + BeaconProcessorQueueLengths::from_state( + &chain.canonical_head.cached_head().snapshot.beacon_state, + &chain.spec, + ) + .unwrap(), ); assert!(beacon_processor.is_ok()); @@ -311,9 +316,7 @@ impl TestRig { block_root, RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), - BlockProcessType::ParentLookup { - chain_hash: Hash256::random(), - }, + BlockProcessType::SingleBlock { id: 0 }, ) .unwrap(); } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index ab9ffb95a6..92c6bb6c3e 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -29,6 +29,10 @@ pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// Currently a whole slot ahead. const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; +/// The number of slots after an aggregator duty where we remove the entry from +/// `aggregate_validators_on_subnet` delay map. +const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; + #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub(crate) enum SubscriptionKind { /// Long lived subscriptions. @@ -462,23 +466,27 @@ impl AttestationService { ) -> Result<(), &'static str> { let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - // Calculate how long before we need to subscribe to the subnet. - let time_to_subscription_start = { - // The short time we schedule the subscription before it's actually required. This - // ensures we are subscribed on time, and allows consecutive subscriptions to the same - // subnet to overlap, reducing subnet churn. - let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; - // The time to the required slot. - let time_to_subscription_slot = self - .beacon_chain - .slot_clock - .duration_to_slot(slot) - .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. - time_to_subscription_slot.saturating_sub(advance_subscription_duration) - }; + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain + .slot_clock + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + // Calculate how long before we need to subscribe to the subnet. + let time_to_subscription_start = + time_to_subscription_slot.saturating_sub(advance_subscription_duration); + + // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` + // delay map. + let time_to_unsubscribe = + time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - tracked_vals.insert(ExactSubnet { subnet_id, slot }); + tracked_vals.insert_at(ExactSubnet { subnet_id, slot }, time_to_unsubscribe); } // If the subscription should be done in the future, schedule it. Otherwise subscribe diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 67fe871acc..ce7d04ac0a 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -87,16 +87,17 @@ pub enum ProcessResult { } /// The ways a backfill sync can fail. +// The info in the enum variants is displayed in logging, clippy thinks it's dead code. #[derive(Debug)] pub enum BackFillError { /// A batch failed to be downloaded. - BatchDownloadFailed(BatchId), + BatchDownloadFailed(#[allow(dead_code)] BatchId), /// A batch could not be processed. - BatchProcessingFailed(BatchId), + BatchProcessingFailed(#[allow(dead_code)] BatchId), /// A batch entered an invalid state. - BatchInvalidState(BatchId, String), + BatchInvalidState(#[allow(dead_code)] BatchId, #[allow(dead_code)] String), /// The sync algorithm entered an invalid state. - InvalidSyncState(String), + InvalidSyncState(#[allow(dead_code)] String), /// The chain became paused. Paused, } @@ -306,11 +307,7 @@ impl BackFillSync { /// A peer has disconnected. /// If the peer has active batches, those are considered failed and re-requested. #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] - pub fn peer_disconnected( - &mut self, - peer_id: &PeerId, - network: &mut SyncNetworkContext, - ) -> Result<(), BackFillError> { + pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), BackFillError> { if matches!( self.state(), BackFillState::Failed | BackFillState::NotRequired @@ -318,37 +315,7 @@ impl BackFillSync { return Ok(()); } - if let Some(batch_ids) = self.active_requests.remove(peer_id) { - // fail the batches - for id in batch_ids { - if let Some(batch) = self.batches.get_mut(&id) { - match batch.download_failed(false) { - Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { - self.fail_sync(BackFillError::BatchDownloadFailed(id))?; - } - Ok(BatchOperationOutcome::Continue) => {} - Err(e) => { - self.fail_sync(BackFillError::BatchInvalidState(id, e.0))?; - } - } - // If we have run out of peers in which to retry this batch, the backfill state - // transitions to a paused state. - // We still need to reset the state for all the affected batches, so we should not - // short circuit early - if self.retry_batch_download(network, id).is_err() { - debug!( - self.log, - "Batch could not be retried"; - "batch_id" => id, - "error" => "no synced peers" - ); - } - } else { - debug!(self.log, "Batch not found while removing peer"; - "peer" => %peer_id, "batch" => id) - } - } - } + self.active_requests.remove(peer_id); // Remove the peer from the participation list self.participating_peers.remove(peer_id); @@ -985,7 +952,7 @@ impl BackFillSync { Err(e) => { // NOTE: under normal conditions this shouldn't happen but we handle it anyway warn!(self.log, "Could not send batch request"; - "batch_id" => batch_id, "error" => e, &batch); + "batch_id" => batch_id, "error" => ?e, &batch); // register the failed download and check if the batch can be retried if let Err(e) = batch.start_downloading_from_peer(peer, 1) { return self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)); diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 7193dd6e21..aef76fb0da 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -1,21 +1,17 @@ -use crate::sync::block_lookups::parent_lookup::PARENT_FAIL_TOLERANCE; use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; -use crate::sync::block_lookups::{ - BlobRequestState, BlockLookups, BlockRequestState, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, -}; -use crate::sync::manager::{BlockProcessType, Id, SingleLookupReqId}; -use crate::sync::network_context::{ - BlobsByRootSingleBlockRequest, BlocksByRootSingleRequest, SyncNetworkContext, -}; +use crate::sync::block_lookups::{BlobRequestState, BlockRequestState, PeerId}; +use crate::sync::manager::Id; +use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::ChildComponents; use beacon_chain::BeaconChainTypes; use std::sync::Arc; -use std::time::Duration; use types::blob_sidecar::FixedBlobSidecarList; -use types::{Hash256, SignedBeaconBlock}; +use types::SignedBeaconBlock; + +use super::single_block_lookup::DownloadResult; +use super::SingleLookupId; #[derive(Debug, Copy, Clone)] pub enum ResponseType { @@ -23,21 +19,6 @@ pub enum ResponseType { Blob, } -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub enum LookupType { - Current, - Parent, -} - -impl LookupType { - fn max_attempts(&self) -> u8 { - match self { - LookupType::Current => SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, - LookupType::Parent => PARENT_FAIL_TOLERANCE, - } - } -} - /// This trait unifies common single block lookup functionality across blocks and blobs. This /// includes making requests, verifying responses, and handling processing results. A /// `SingleBlockLookup` includes both a `BlockRequestState` and a `BlobRequestState`, this trait is @@ -47,121 +28,29 @@ impl LookupType { /// safety when handling a block/blob response ensuring we only mutate the correct corresponding /// state. pub trait RequestState { - /// The type of the request . - type RequestType; - /// The type created after validation. type VerifiedResponseType: Clone; - /// We convert a `VerifiedResponseType` to this type prior to sending it to the beacon processor. - type ReconstructedResponseType; - - /* Request building methods */ - - /// Construct a new request. - fn build_request( - &mut self, - lookup_type: LookupType, - ) -> Result<(PeerId, Self::RequestType), LookupRequestError> { - // Verify and construct request. - self.too_many_attempts(lookup_type)?; - let peer = self.get_peer()?; - let request = self.new_request(); - Ok((peer, request)) - } - - /// Construct a new request and send it. - fn build_request_and_send( - &mut self, - id: Id, - lookup_type: LookupType, - cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - // Check if request is necessary. - if !self.get_state().is_awaiting_download() { - return Ok(()); - } - - // Construct request. - let (peer_id, request) = self.build_request(lookup_type)?; - - // Update request state. - let req_counter = self.get_state_mut().on_download_start(peer_id); - - // Make request - let id = SingleLookupReqId { - id, - req_counter, - lookup_type, - }; - Self::make_request(id, peer_id, request, cx) - } - - /// Verify the current request has not exceeded the maximum number of attempts. - fn too_many_attempts(&self, lookup_type: LookupType) -> Result<(), LookupRequestError> { - let request_state = self.get_state(); - - if request_state.failed_attempts() >= lookup_type.max_attempts() { - let cannot_process = request_state.more_failed_processing_attempts(); - Err(LookupRequestError::TooManyAttempts { cannot_process }) - } else { - Ok(()) - } - } - - /// Get the next peer to request. Draws from the set of peers we think should have both the - /// block and blob first. If that fails, we draw from the set of peers that may have either. - fn get_peer(&mut self) -> Result { - self.get_state_mut() - .use_rand_available_peer() - .ok_or(LookupRequestError::NoPeers) - } - - /// Initialize `Self::RequestType`. - fn new_request(&self) -> Self::RequestType; - - /// Send the request to the network service. + /// Request the network context to prepare a request of a component of `block_root`. If the + /// request is not necessary because the component is already known / processed, return false. + /// Return true if it sent a request and we can expect an event back from the network. fn make_request( - id: SingleLookupReqId, + &self, + id: Id, peer_id: PeerId, - request: Self::RequestType, + downloaded_block_expected_blobs: Option, cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError>; + ) -> Result; /* Response handling methods */ - /// A getter for the parent root of the response. Returns an `Option` because we won't know - /// the blob parent if we don't end up getting any blobs in the response. - fn get_parent_root(verified_response: &Self::VerifiedResponseType) -> Option; - - /// Caches the verified response in the lookup if necessary. This is only necessary for lookups - /// triggered by `UnknownParent` errors. - fn add_to_child_components( - verified_response: Self::VerifiedResponseType, - components: &mut ChildComponents, - ); - - /// Convert a verified response to the type we send to the beacon processor. - fn verified_to_reconstructed( - block_root: Hash256, - verified: Self::VerifiedResponseType, - ) -> Self::ReconstructedResponseType; - /// Send the response to the beacon processor. - fn send_reconstructed_for_processing( + fn send_for_processing( id: Id, - bl: &BlockLookups, - block_root: Hash256, - verified: Self::ReconstructedResponseType, - duration: Duration, + result: DownloadResult, cx: &SyncNetworkContext, ) -> Result<(), LookupRequestError>; - /// Register a failure to process the block or blob. - fn register_failure_downloading(&mut self) { - self.get_state_mut().on_download_failure() - } - /* Utility methods */ /// Returns the `ResponseType` associated with this trait implementation. Useful in logging. @@ -171,64 +60,44 @@ pub trait RequestState { fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. - fn get_state(&self) -> &SingleLookupRequestState; + fn get_state(&self) -> &SingleLookupRequestState; /// A getter for a mutable reference to the SingleLookupRequestState associated with this trait. - fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; } -impl RequestState for BlockRequestState { - type RequestType = BlocksByRootSingleRequest; +impl RequestState for BlockRequestState { type VerifiedResponseType = Arc>; - type ReconstructedResponseType = RpcBlock; - - fn new_request(&self) -> Self::RequestType { - BlocksByRootSingleRequest(self.requested_block_root) - } fn make_request( - id: SingleLookupReqId, + &self, + id: SingleLookupId, peer_id: PeerId, - request: Self::RequestType, + _: Option, cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - cx.block_lookup_request(id, peer_id, request) - .map_err(LookupRequestError::SendFailed) + ) -> Result { + cx.block_lookup_request(id, peer_id, self.requested_block_root) + .map_err(LookupRequestError::SendFailedNetwork) } - fn get_parent_root(verified_response: &Arc>) -> Option { - Some(verified_response.parent_root()) - } - - fn add_to_child_components( - verified_response: Arc>, - components: &mut ChildComponents, - ) { - components.merge_block(verified_response); - } - - fn verified_to_reconstructed( - block_root: Hash256, - block: Arc>, - ) -> RpcBlock { - RpcBlock::new_without_blobs(Some(block_root), block) - } - - fn send_reconstructed_for_processing( - id: Id, - bl: &BlockLookups, - block_root: Hash256, - constructed: RpcBlock, - duration: Duration, + fn send_for_processing( + id: SingleLookupId, + download_result: DownloadResult, cx: &SyncNetworkContext, ) -> Result<(), LookupRequestError> { - bl.send_block_for_processing( + let DownloadResult { + value, block_root, - constructed, - duration, - BlockProcessType::SingleBlock { id }, - cx, + seen_timestamp, + peer_id: _, + } = download_result; + cx.send_block_for_processing( + id, + block_root, + RpcBlock::new_without_blobs(Some(block_root), value), + seen_timestamp, ) + .map_err(LookupRequestError::SendFailedProcessor) } fn response_type() -> ResponseType { @@ -237,73 +106,46 @@ impl RequestState for BlockRequestState { fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.block_request_state } - fn get_state(&self) -> &SingleLookupRequestState { + fn get_state(&self) -> &SingleLookupRequestState { &self.state } - fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { &mut self.state } } impl RequestState for BlobRequestState { - type RequestType = BlobsByRootSingleBlockRequest; type VerifiedResponseType = FixedBlobSidecarList; - type ReconstructedResponseType = FixedBlobSidecarList; - - fn new_request(&self) -> Self::RequestType { - BlobsByRootSingleBlockRequest { - block_root: self.block_root, - indices: self.requested_ids.indices(), - } - } fn make_request( - id: SingleLookupReqId, - peer_id: PeerId, - request: Self::RequestType, - cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - cx.blob_lookup_request(id, peer_id, request) - .map_err(LookupRequestError::SendFailed) - } - - fn get_parent_root(verified_response: &FixedBlobSidecarList) -> Option { - verified_response - .into_iter() - .filter_map(|blob| blob.as_ref()) - .map(|blob| blob.block_parent_root()) - .next() - } - - fn add_to_child_components( - verified_response: FixedBlobSidecarList, - components: &mut ChildComponents, - ) { - components.merge_blobs(verified_response); - } - - fn verified_to_reconstructed( - _block_root: Hash256, - blobs: FixedBlobSidecarList, - ) -> FixedBlobSidecarList { - blobs - } - - fn send_reconstructed_for_processing( + &self, id: Id, - bl: &BlockLookups, - block_root: Hash256, - verified: FixedBlobSidecarList, - duration: Duration, + peer_id: PeerId, + downloaded_block_expected_blobs: Option, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.blob_lookup_request( + id, + peer_id, + self.block_root, + downloaded_block_expected_blobs, + ) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, cx: &SyncNetworkContext, ) -> Result<(), LookupRequestError> { - bl.send_blobs_for_processing( + let DownloadResult { + value, block_root, - verified, - duration, - BlockProcessType::SingleBlob { id }, - cx, - ) + seen_timestamp, + peer_id: _, + } = download_result; + cx.send_blobs_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) } fn response_type() -> ResponseType { @@ -312,10 +154,10 @@ impl RequestState for BlobRequestState { fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.blob_request_state } - fn get_state(&self) -> &SingleLookupRequestState { + fn get_state(&self) -> &SingleLookupRequestState { &self.state } - fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { &mut self.state } } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a2909b49dd..f685b7e59d 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,756 +1,483 @@ -use self::single_block_lookup::SingleBlockLookup; -use super::manager::BlockProcessingResult; -use super::network_context::{LookupFailure, LookupVerifyError}; -use super::BatchProcessResult; -use super::{manager::BlockProcessType, network_context::SyncNetworkContext}; +use self::parent_chain::{compute_parent_chains, NodeChain}; +pub use self::single_block_lookup::DownloadResult; +use self::single_block_lookup::{LookupRequestError, LookupResult, SingleBlockLookup}; +use super::manager::{BlockProcessType, BlockProcessingResult, SLOT_IMPORT_TOLERANCE}; +use super::network_context::{RpcResponseResult, SyncNetworkContext}; use crate::metrics; -use crate::network_beacon_processor::ChainSegmentProcessId; -use crate::sync::block_lookups::common::LookupType; -use crate::sync::block_lookups::parent_lookup::{ParentLookup, RequestError}; -use crate::sync::block_lookups::single_block_lookup::{CachedChild, LookupRequestError}; +use crate::sync::block_lookups::common::ResponseType; +use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; use crate::sync::manager::{Id, SingleLookupReqId}; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; -pub use beacon_chain::data_availability_checker::ChildComponents; -use beacon_chain::data_availability_checker::{ - AvailabilityCheckErrorCategory, DataAvailabilityChecker, -}; -use beacon_chain::validator_monitor::timestamp_now; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::data_availability_checker::AvailabilityCheckErrorCategory; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; pub use common::RequestState; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; pub use single_block_lookup::{BlobRequestState, BlockRequestState}; -use slog::{debug, error, trace, warn, Logger}; -use smallvec::SmallVec; -use std::collections::{HashMap, VecDeque}; +use slog::{debug, error, warn, Logger}; +use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; use store::Hash256; -use types::blob_sidecar::FixedBlobSidecarList; -use types::Slot; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; pub mod common; -mod parent_lookup; +pub mod parent_chain; mod single_block_lookup; #[cfg(test)] mod tests; -pub type DownloadedBlock = (Hash256, RpcBlock); +/// The maximum depth we will search for a parent block. In principle we should have sync'd any +/// canonical chain to its head once the peer connects. A chain should not appear where it's depth +/// is further back than the most recent head slot. +pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; -pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; +pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; + +/// Maximum time we allow a lookup to exist before assuming it is stuck and will never make +/// progress. Assume the worse case processing time per block component set * times max depth. +/// 15 * 2 * 32 = 16 minutes. +const LOOKUP_MAX_DURATION_STUCK_SECS: u64 = 15 * PARENT_DEPTH_TOLERANCE as u64; +/// The most common case of child-lookup without peers is receiving block components before the +/// attestation deadline when the node is lagging behind. Once peers start attesting for the child +/// lookup at most after 4 seconds, the lookup should gain peers. +const LOOKUP_MAX_DURATION_NO_PEERS_SECS: u64 = 10; + +pub enum BlockComponent { + Block(DownloadResult>>), + Blob(DownloadResult>>), +} + +impl BlockComponent { + fn parent_root(&self) -> Hash256 { + match self { + BlockComponent::Block(block) => block.value.parent_root(), + BlockComponent::Blob(blob) => blob.value.block_parent_root(), + } + } + fn get_type(&self) -> &'static str { + match self { + BlockComponent::Block(_) => "block", + BlockComponent::Blob(_) => "blob", + } + } +} + +pub type SingleLookupId = u32; enum Action { Retry, - ParentUnknown { parent_root: Hash256, slot: Slot }, + ParentUnknown { parent_root: Hash256 }, Drop, Continue, } pub struct BlockLookups { - /// Parent chain lookups being downloaded. - parent_lookups: SmallVec<[ParentLookup; 3]>, - - processing_parent_lookups: HashMap, SingleBlockLookup)>, - /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, - single_block_lookups: FnvHashMap>, - - pub(crate) da_checker: Arc>, + // TODO: Why not index lookups by block_root? + single_block_lookups: FnvHashMap>, /// The logger for the import manager. log: Logger, } +#[cfg(test)] +/// Tuple of `SingleLookupId`, requested block root, awaiting parent block root (if any), +/// and list of peers that claim to have imported this set of block components. +pub(crate) type BlockLookupSummary = (Id, Hash256, Option, Vec); + impl BlockLookups { - pub fn new(da_checker: Arc>, log: Logger) -> Self { + pub fn new(log: Logger) -> Self { Self { - parent_lookups: Default::default(), - processing_parent_lookups: Default::default(), failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), - da_checker, log, } } #[cfg(test)] - pub(crate) fn active_single_lookups(&self) -> Vec { - self.single_block_lookups.keys().cloned().collect() + pub(crate) fn insert_failed_chain(&mut self, block_root: Hash256) { + self.failed_chains.insert(block_root); } #[cfg(test)] - pub(crate) fn active_parent_lookups(&self) -> Vec { - self.parent_lookups + pub(crate) fn get_failed_chains(&mut self) -> Vec { + self.failed_chains.keys().cloned().collect() + } + + #[cfg(test)] + pub(crate) fn active_single_lookups(&self) -> Vec { + self.single_block_lookups .iter() - .map(|r| r.chain_hash()) - .collect::>() + .map(|(id, l)| { + ( + *id, + l.block_root(), + l.awaiting_parent(), + l.all_peers().copied().collect(), + ) + }) + .collect() } - #[cfg(test)] - pub(crate) fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { - self.failed_chains.contains(chain_hash) + /// Returns a vec of all parent lookup chains by tip, in descending slot order (tip first) + pub(crate) fn active_parent_lookups(&self) -> Vec { + compute_parent_chains( + &self + .single_block_lookups + .values() + .map(|lookup| lookup.into()) + .collect::>(), + ) } /* Lookup requests */ - /// Creates a lookup for the block with the given `block_root` and immediately triggers it. - pub fn search_block( + /// Creates a parent lookup for the block with the given `block_root` and immediately triggers it. + /// If a parent lookup exists or is triggered, a current lookup will be created. + pub fn search_child_and_parent( &mut self, block_root: Hash256, - peer_source: &[PeerId], + block_component: BlockComponent, + peer_id: PeerId, cx: &mut SyncNetworkContext, ) { - self.new_current_lookup(block_root, None, peer_source, cx) - } + let parent_root = block_component.parent_root(); - /// Creates a lookup for the block with the given `block_root`, while caching other block - /// components we've already received. The block components are cached here because we haven't - /// imported its parent and therefore can't fully validate it and store it in the data - /// availability cache. - /// - /// The request is immediately triggered. - pub fn search_child_block( - &mut self, - block_root: Hash256, - child_components: ChildComponents, - peer_source: &[PeerId], - cx: &mut SyncNetworkContext, - ) { - self.new_current_lookup(block_root, Some(child_components), peer_source, cx) - } - - /// Attempts to trigger the request matching the given `block_root`. - pub fn trigger_single_lookup( - &mut self, - mut single_block_lookup: SingleBlockLookup, - cx: &mut SyncNetworkContext, - ) { - let block_root = single_block_lookup.block_root(); - match single_block_lookup.request_block_and_blobs(cx) { - Ok(()) => self.add_single_lookup(single_block_lookup), - Err(e) => { - debug!(self.log, "Single block lookup failed"; - "error" => ?e, - "block_root" => ?block_root, - ); - } + let parent_lookup_exists = + self.search_parent_of_child(parent_root, block_root, &[peer_id], cx); + // Only create the child lookup if the parent exists + if parent_lookup_exists { + // `search_parent_of_child` ensures that parent root is not a failed chain + self.new_current_lookup( + block_root, + Some(block_component), + Some(parent_root), + // On a `UnknownParentBlock` or `UnknownParentBlob` event the peer is not required + // to have the rest of the block components (refer to decoupled blob gossip). Create + // the lookup with zero peers to house the block components. + &[], + cx, + ); } } - /// Adds a lookup to the `single_block_lookups` map. - pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { - self.single_block_lookups - .insert(single_block_lookup.id, single_block_lookup); + /// Seach a block whose parent root is unknown. + /// Returns true if the lookup is created or already exists + pub fn search_unknown_block( + &mut self, + block_root: Hash256, + peer_source: &[PeerId], + cx: &mut SyncNetworkContext, + ) { + self.new_current_lookup(block_root, None, None, peer_source, cx); + } - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); + /// A block or blob triggers the search of a parent. + /// Check if this new lookup extends a bad chain: + /// - Extending `child_block_root_trigger` would exceed the max depth + /// - `block_root_to_search` is a failed chain + /// Returns true if the lookup is created or already exists + pub fn search_parent_of_child( + &mut self, + block_root_to_search: Hash256, + child_block_root_trigger: Hash256, + peers: &[PeerId], + cx: &mut SyncNetworkContext, + ) -> bool { + let parent_chains = self.active_parent_lookups(); + + for (chain_idx, parent_chain) in parent_chains.iter().enumerate() { + if parent_chain.ancestor() == child_block_root_trigger + && parent_chain.len() >= PARENT_DEPTH_TOLERANCE + { + debug!(self.log, "Parent lookup chain too long"; "block_root" => ?block_root_to_search); + + // Searching for this parent would extend a parent chain over the max + // Insert the tip only to failed chains + self.failed_chains.insert(parent_chain.tip); + + // Note: Drop only the chain that's too long until it merges with another chain + // that's not too long. Consider this attack: there's a chain of valid unknown + // blocks A -> B. A malicious peer builds `PARENT_DEPTH_TOLERANCE` garbage + // blocks on top of A forming A -> C. The malicious peer forces us to fetch C + // from it, which will result in parent A hitting the chain_too_long error. Then + // the valid chain A -> B is dropped too. + if let Ok(block_to_drop) = find_oldest_fork_ancestor(parent_chains, chain_idx) { + // Drop all lookups descending from the child of the too long parent chain + if let Some((lookup_id, lookup)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == block_to_drop) + { + for &peer_id in lookup.all_peers() { + cx.report_peer( + peer_id, + PeerAction::LowToleranceError, + "chain_too_long", + ); + } + self.drop_lookup_and_children(*lookup_id); + } + } + + return false; + } + } + + // `block_root_to_search` is a failed chain check happens inside new_current_lookup + self.new_current_lookup(block_root_to_search, None, None, peers, cx) } /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. - pub fn new_current_lookup( + /// Returns true if the lookup is created or already exists + fn new_current_lookup( &mut self, block_root: Hash256, - child_components: Option>, + block_component: Option>, + awaiting_parent: Option, peers: &[PeerId], cx: &mut SyncNetworkContext, - ) { + ) -> bool { + // If this block or it's parent is part of a known failed chain, ignore it. + if self.failed_chains.contains(&block_root) { + debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => ?block_root); + for peer_id in peers { + cx.report_peer(*peer_id, PeerAction::MidToleranceError, "failed_chain"); + } + return false; + } + // Do not re-request a block that is already being requested - if let Some((_, lookup)) = self + if let Some((&lookup_id, lookup)) = self .single_block_lookups .iter_mut() .find(|(_id, lookup)| lookup.is_for_block(block_root)) { - lookup.add_peers(peers); - if let Some(components) = child_components { - lookup.add_child_components(components); + if let Some(block_component) = block_component { + let component_type = block_component.get_type(); + let imported = lookup.add_child_components(block_component); + if !imported { + debug!(self.log, "Lookup child component ignored"; "block_root" => ?block_root, "type" => component_type); + } } - return; + + if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers) { + warn!(self.log, "Error adding peers to ancestor lookup"; "error" => ?e); + } + + return true; } - if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { - parent_req.is_for_block(block_root) || parent_req.contains_block(&block_root) - }) { - parent_lookup.add_peers(peers); - - // If the block was already downloaded, or is being downloaded in this moment, do not - // request it. - trace!(self.log, "Already searching for block in a parent lookup request"; "block_root" => ?block_root); - return; + // Ensure that awaiting parent exists, otherwise this lookup won't be able to make progress + if let Some(awaiting_parent) = awaiting_parent { + if !self + .single_block_lookups + .iter() + .any(|(_, lookup)| lookup.is_for_block(awaiting_parent)) + { + warn!(self.log, "Ignoring child lookup parent lookup not found"; "block_root" => ?awaiting_parent); + return false; + } } - if self - .processing_parent_lookups - .values() - .any(|(hashes, _last_parent_request)| hashes.contains(&block_root)) - { - // we are already processing this block, ignore it. - trace!(self.log, "Already processing block in a parent request"; "block_root" => ?block_root); - return; - } + // If we know that this lookup has unknown parent (is awaiting a parent lookup to resolve), + // signal here to hold processing downloaded data. + let mut lookup = SingleBlockLookup::new(block_root, peers, cx.next_id(), awaiting_parent); - let msg = if child_components.is_some() { + let msg = if block_component.is_some() { "Searching for components of a block with unknown parent" } else { "Searching for block components" }; - - let lookup = SingleBlockLookup::new( - block_root, - child_components, - peers, - self.da_checker.clone(), - cx.next_id(), - LookupType::Current, - ); - debug!( self.log, "{}", msg; "peer_ids" => ?peers, - "block" => ?block_root, + "block_root" => ?block_root, + "id" => lookup.id, ); - self.trigger_single_lookup(lookup, cx); - } + metrics::inc_counter(&metrics::SYNC_LOOKUP_CREATED); - /// If a block is attempted to be processed but we do not know its parent, this function is - /// called in order to find the block's parent. - pub fn search_parent( - &mut self, - slot: Slot, - block_root: Hash256, - parent_root: Hash256, - peer_id: PeerId, - cx: &mut SyncNetworkContext, - ) { - // If this block or it's parent is part of a known failed chain, ignore it. - if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { - debug!(self.log, "Block is from a past failed chain. Dropping"; - "block_root" => ?block_root, "block_slot" => slot); - return; + // Add block components to the new request + if let Some(block_component) = block_component { + lookup.add_child_components(block_component); } - // Make sure this block is not already downloaded, and that neither it or its parent is - // being searched for. - if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { - parent_req.contains_block(&parent_root) || parent_req.is_for_block(parent_root) - }) { - parent_lookup.add_peer(peer_id); - // we are already searching for this block, ignore it - debug!(self.log, "Already searching for parent block"; - "block_root" => ?block_root, "parent_root" => ?parent_root); - return; + let id = lookup.id; + let lookup = match self.single_block_lookups.entry(id) { + Entry::Vacant(entry) => entry.insert(lookup), + Entry::Occupied(_) => { + // Should never happen + warn!(self.log, "Lookup exists with same id"; "id" => id); + return false; + } + }; + + let result = lookup.continue_requests(cx); + if self.on_lookup_result(id, result, "new_current_lookup", cx) { + self.update_metrics(); + true + } else { + false } - - if self - .processing_parent_lookups - .iter() - .any(|(chain_hash, (hashes, _peers))| { - chain_hash == &block_root - || hashes.contains(&block_root) - || hashes.contains(&parent_root) - }) - { - // we are already processing this block, ignore it. - debug!(self.log, "Already processing parent block"; - "block_root" => ?block_root, "parent_root" => ?parent_root); - return; - } - let parent_lookup = ParentLookup::new( - block_root, - parent_root, - peer_id, - self.da_checker.clone(), - cx, - ); - - debug!(self.log, "Created new parent lookup"; "block_root" => ?block_root, "parent_root" => ?parent_root); - - self.request_parent(parent_lookup, cx); } /* Lookup responses */ - /// Get a single block lookup by its ID. This method additionally ensures the `req_counter` - /// matches the current `req_counter` for the lookup. This ensures any stale responses from requests - /// that have been retried are ignored. - fn get_single_lookup>( + /// Process a block or blob response received from a single lookup request. + pub fn on_download_response>( &mut self, id: SingleLookupReqId, - ) -> Option> { - let mut lookup = self.single_block_lookups.remove(&id.id)?; - - let request_state = R::request_state_mut(&mut lookup); - if request_state - .get_state() - .is_current_req_counter(id.req_counter) - { - Some(lookup) - } else { - // We don't want to drop the lookup, just ignore the old response. - self.single_block_lookups.insert(id.id, lookup); - None - } - } - - /// Checks whether a single block lookup is waiting for a parent lookup to complete. This is - /// necessary because we want to make sure all parents are processed before sending a child - /// for processing, otherwise the block will fail validation and will be returned to the network - /// layer with an `UnknownParent` error. - pub fn has_pending_parent_request(&self, block_root: Hash256) -> bool { - self.parent_lookups - .iter() - .any(|parent_lookup| parent_lookup.chain_hash() == block_root) + peer_id: PeerId, + response: RpcResponseResult, + cx: &mut SyncNetworkContext, + ) { + let result = self.on_download_response_inner::(id, peer_id, response, cx); + self.on_lookup_result(id.lookup_id, result, "download_response", cx); } /// Process a block or blob response received from a single lookup request. - pub fn single_lookup_response>( + pub fn on_download_response_inner>( &mut self, - lookup_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, - response: R::VerifiedResponseType, - seen_timestamp: Duration, + response: RpcResponseResult, cx: &mut SyncNetworkContext, - ) { - let id = lookup_id.id; - let response_type = R::response_type(); + ) -> Result { + // Note: no need to downscore peers here, already downscored on network context - let Some(mut lookup) = self.get_single_lookup::(lookup_id) else { + let response_type = R::response_type(); + let Some(lookup) = self.single_block_lookups.get_mut(&id.lookup_id) else { // We don't have the ability to cancel in-flight RPC requests. So this can happen // if we started this RPC request, and later saw the block/blobs via gossip. - debug!( - self.log, - "Block returned for single block lookup not present"; - "response_type" => ?response_type, - ); - return; + debug!(self.log, "Block returned for single block lookup not present"; "id" => ?id); + return Err(LookupRequestError::UnknownLookup); }; - let expected_block_root = lookup.block_root(); - debug!(self.log, - "Peer returned response for single lookup"; - "peer_id" => %peer_id , - "id" => ?id, - "block_root" => ?expected_block_root, - "response_type" => ?response_type, - ); + let block_root = lookup.block_root(); + let request_state = R::request_state_mut(lookup).get_state_mut(); - match self.handle_verified_response::( - seen_timestamp, - cx, - BlockProcessType::SingleBlock { id: lookup.id }, - response, - &mut lookup, - ) { - Ok(_) => { - self.single_block_lookups.insert(id, lookup); + match response { + Ok((response, seen_timestamp)) => { + debug!(self.log, + "Received lookup download success"; + "block_root" => ?block_root, + "id" => ?id, + "peer_id" => %peer_id, + "response_type" => ?response_type, + ); + + // Register the download peer here. Once we have received some data over the wire we + // attribute it to this peer for scoring latter regardless of how the request was + // done. + request_state.on_download_success( + id.req_id, + DownloadResult { + value: response, + block_root, + seen_timestamp, + peer_id, + }, + )?; + // continue_request will send for processing as the request state is AwaitingProcessing } Err(e) => { debug!(self.log, - "Single lookup request failed"; - "error" => ?e, - "block_root" => ?expected_block_root, + "Received lookup download failure"; + "block_root" => ?block_root, + "id" => ?id, + "peer_id" => %peer_id, + "response_type" => ?response_type, + "error" => %e, ); + + request_state.on_download_failure(id.req_id)?; + // continue_request will retry a download as the request state is AwaitingDownload } } - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); - } - - /// Consolidates error handling for `single_lookup_response`. An `Err` here should always mean - /// the lookup is dropped. - fn handle_verified_response>( - &self, - seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - process_type: BlockProcessType, - verified_response: R::VerifiedResponseType, - lookup: &mut SingleBlockLookup, - ) -> Result<(), LookupRequestError> { - let id = lookup.id; - let block_root = lookup.block_root(); - - let cached_child = lookup.add_response::(verified_response.clone()); - match cached_child { - CachedChild::Ok(block) => { - // If we have an outstanding parent request for this block, delay sending the response until - // all parent blocks have been processed, otherwise we will fail validation with an - // `UnknownParent`. - let delay_send = match lookup.lookup_type { - LookupType::Parent => false, - LookupType::Current => self.has_pending_parent_request(lookup.block_root()), - }; - - if !delay_send { - R::request_state_mut(lookup) - .get_state_mut() - .on_download_success() - .map_err(LookupRequestError::BadState)?; - self.send_block_for_processing( - block_root, - block, - seen_timestamp, - process_type, - cx, - )? - } - } - CachedChild::DownloadIncomplete => { - R::request_state_mut(lookup) - .get_state_mut() - .on_download_success() - .map_err(LookupRequestError::BadState)?; - // If this was the result of a block request, we can't determine if the block peer - // did anything wrong. If we already had both a block and blobs response processed, - // we should penalize the blobs peer because they did not provide all blobs on the - // initial request. - if lookup.both_components_downloaded() { - lookup.penalize_blob_peer(cx); - lookup.blob_request_state.state.on_download_failure(); - } - lookup.request_block_and_blobs(cx)?; - } - CachedChild::NotRequired => { - R::request_state_mut(lookup) - .get_state_mut() - .on_download_success() - .map_err(LookupRequestError::BadState)?; - - R::send_reconstructed_for_processing( - id, - self, - block_root, - R::verified_to_reconstructed(block_root, verified_response), - seen_timestamp, - cx, - )? - } - CachedChild::Err(e) => { - warn!(self.log, "Consistency error in cached block"; - "error" => ?e, - "block_root" => ?block_root - ); - lookup.handle_consistency_failure(cx); - lookup.request_block_and_blobs(cx)?; - } - } - Ok(()) - } - - /// Get a parent block lookup by its ID. This method additionally ensures the `req_counter` - /// matches the current `req_counter` for the lookup. This any stale responses from requests - /// that have been retried are ignored. - fn get_parent_lookup>( - &mut self, - id: SingleLookupReqId, - ) -> Option> { - let mut parent_lookup = if let Some(pos) = self - .parent_lookups - .iter() - .position(|request| request.current_parent_request.id == id.id) - { - self.parent_lookups.remove(pos) - } else { - return None; - }; - - if R::request_state_mut(&mut parent_lookup.current_parent_request) - .get_state() - .is_current_req_counter(id.req_counter) - { - Some(parent_lookup) - } else { - self.parent_lookups.push(parent_lookup); - None - } - } - - /// Process a response received from a parent lookup request. - pub fn parent_lookup_response>( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - response: R::VerifiedResponseType, - seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - ) { - let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { - debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); - return; - }; - - debug!(self.log, - "Peer returned response for parent lookup"; - "peer_id" => %peer_id , - "id" => ?id, - "block_root" => ?parent_lookup.current_parent_request.block_request_state.requested_block_root, - "response_type" => ?R::response_type(), - ); - - match self.parent_lookup_response_inner::( - peer_id, - response, - seen_timestamp, - cx, - &mut parent_lookup, - ) { - Ok(()) => { - self.parent_lookups.push(parent_lookup); - } - Err(e) => { - self.handle_parent_request_error(&mut parent_lookup, cx, e); - } - } - - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } - - /// Consolidates error handling for `parent_lookup_response`. An `Err` here should always mean - /// the lookup is dropped. - fn parent_lookup_response_inner>( - &mut self, - peer_id: PeerId, - response: R::VerifiedResponseType, - seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - parent_lookup: &mut ParentLookup, - ) -> Result<(), RequestError> { - // check if the parent of this block isn't in the failed cache. If it is, this chain should - // be dropped and the peer downscored. - if let Some(parent_root) = R::get_parent_root(&response) { - if self.failed_chains.contains(&parent_root) { - let request_state = R::request_state_mut(&mut parent_lookup.current_parent_request); - request_state.register_failure_downloading(); - debug!( - self.log, - "Parent chain ignored due to past failure"; - "block" => %parent_root, - ); - // Add the root block to failed chains - self.failed_chains.insert(parent_lookup.chain_hash()); - - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "bbroot_failed_chains", - ); - return Ok(()); - } - } - - self.handle_verified_response::( - seen_timestamp, - cx, - BlockProcessType::ParentLookup { - chain_hash: parent_lookup.chain_hash(), - }, - response, - &mut parent_lookup.current_parent_request, - )?; - - Ok(()) - } - - /// Handle logging and peer scoring for `RequestError`s during parent lookup requests. - fn handle_parent_request_error( - &mut self, - parent_lookup: &mut ParentLookup, - cx: &SyncNetworkContext, - e: RequestError, - ) { - debug!(self.log, "Failed to request parent"; "error" => e.as_static()); - match e { - RequestError::SendFailed(_) => { - // Probably shutting down, nothing to do here. Drop the request - } - RequestError::ChainTooLong => { - self.failed_chains.insert(parent_lookup.chain_hash()); - // This indicates faulty peers. - for &peer_id in parent_lookup.all_used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - RequestError::TooManyAttempts { cannot_process } => { - // We only consider the chain failed if we were unable to process it. - // We could have failed because one peer continually failed to send us - // bad blocks. We still allow other peers to send us this chain. Note - // that peers that do this, still get penalised. - if cannot_process { - self.failed_chains.insert(parent_lookup.chain_hash()); - } - // This indicates faulty peers. - for &peer_id in parent_lookup.all_used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - RequestError::NoPeers => { - // This happens if the peer disconnects while the block is being - // processed. Drop the request without extra penalty - } - RequestError::BadState(..) => { - warn!(self.log, "Failed to request parent"; "error" => e.as_static()); - } - } + lookup.continue_requests(cx) } /* Error responses */ - pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { - /* Check disconnection for single lookups */ - self.single_block_lookups.retain(|_, req| { - let should_drop_lookup = - req.should_drop_lookup_on_disconnected_peer(peer_id, cx, &self.log); + pub fn peer_disconnected(&mut self, peer_id: &PeerId) { + self.single_block_lookups.retain(|_, lookup| { + lookup.remove_peer(peer_id); - !should_drop_lookup + // Note: this condition should be removed in the future. It's not strictly necessary to drop a + // lookup if there are no peers left. Lookup should only be dropped if it can not make progress + if lookup.has_no_peers() { + debug!(self.log, + "Dropping single lookup after peer disconnection"; + "block_root" => ?lookup.block_root() + ); + false + } else { + true + } }); - - /* Check disconnection for parent lookups */ - while let Some(pos) = self - .parent_lookups - .iter_mut() - .position(|req| req.check_peer_disconnected(peer_id).is_err()) - { - let parent_lookup = self.parent_lookups.remove(pos); - debug!(self.log, "Dropping parent lookup after peer disconnected"; &parent_lookup); - self.request_parent(parent_lookup, cx); - } - } - - /// An RPC error has occurred during a parent lookup. This function handles this case. - pub fn parent_lookup_failed>( - &mut self, - id: SingleLookupReqId, - peer_id: &PeerId, - cx: &mut SyncNetworkContext, - error: LookupFailure, - ) { - // Only downscore lookup verify errors. RPC errors are downscored in the network handler. - if let LookupFailure::LookupVerifyError(e) = &error { - // Downscore peer even if lookup is not known - self.downscore_on_rpc_error(peer_id, e, cx); - } - - let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { - debug!(self.log, - "RPC failure for a block parent lookup request that was not found"; - "peer_id" => %peer_id, - "error" => %error - ); - return; - }; - R::request_state_mut(&mut parent_lookup.current_parent_request) - .register_failure_downloading(); - debug!(self.log, "Parent lookup block request failed"; - "chain_hash" => %parent_lookup.chain_hash(), "id" => ?id, "error" => %error - ); - - self.request_parent(parent_lookup, cx); - - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } - - /// An RPC error has occurred during a single lookup. This function handles this case.\ - pub fn single_block_lookup_failed>( - &mut self, - id: SingleLookupReqId, - peer_id: &PeerId, - cx: &mut SyncNetworkContext, - error: LookupFailure, - ) { - // Only downscore lookup verify errors. RPC errors are downscored in the network handler. - if let LookupFailure::LookupVerifyError(e) = &error { - // Downscore peer even if lookup is not known - self.downscore_on_rpc_error(peer_id, e, cx); - } - - let log = self.log.clone(); - let Some(mut lookup) = self.get_single_lookup::(id) else { - debug!(log, "Error response to dropped lookup"; "error" => %error); - return; - }; - let block_root = lookup.block_root(); - let request_state = R::request_state_mut(&mut lookup); - let response_type = R::response_type(); - trace!(log, - "Single lookup failed"; - "block_root" => ?block_root, - "error" => %error, - "peer_id" => %peer_id, - "response_type" => ?response_type - ); - let id = id.id; - request_state.register_failure_downloading(); - if let Err(e) = lookup.request_block_and_blobs(cx) { - debug!(self.log, - "Single lookup retry failed"; - "error" => ?e, - "block_root" => ?block_root, - ); - } else { - self.single_block_lookups.insert(id, lookup); - } - - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); } /* Processing responses */ - pub fn single_block_component_processed>( + pub fn on_processing_result( &mut self, - target_id: Id, + process_type: BlockProcessType, result: BlockProcessingResult, cx: &mut SyncNetworkContext, ) { - let Some(mut lookup) = self.single_block_lookups.remove(&target_id) else { - debug!(self.log, "Unknown single block lookup"; "target_id" => target_id); - return; + let lookup_result = match process_type { + BlockProcessType::SingleBlock { id } => { + self.on_processing_result_inner::>(id, result, cx) + } + BlockProcessType::SingleBlob { id } => { + self.on_processing_result_inner::>(id, result, cx) + } + }; + let id = match process_type { + BlockProcessType::SingleBlock { id } | BlockProcessType::SingleBlob { id } => id, + }; + self.on_lookup_result(id, lookup_result, "processing_result", cx); + } + + pub fn on_processing_result_inner>( + &mut self, + lookup_id: SingleLookupId, + result: BlockProcessingResult, + cx: &mut SyncNetworkContext, + ) -> Result { + let Some(lookup) = self.single_block_lookups.get_mut(&lookup_id) else { + debug!(self.log, "Unknown single block lookup"; "id" => lookup_id); + return Err(LookupRequestError::UnknownLookup); }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(&mut lookup); + let request_state = R::request_state_mut(lookup).get_state_mut(); - let peer_id = match request_state.get_state().processing_peer() { - Ok(peer_id) => peer_id, - Err(e) => { - debug!(self.log, "Attempting to process single block lookup in bad state"; "id" => target_id, "response_type" => ?R::response_type(), "error" => e); - return; - } - }; debug!( self.log, - "Block component processed for lookup"; - "response_type" => ?R::response_type(), + "Received lookup processing result"; + "component" => ?R::response_type(), "block_root" => ?block_root, + "id" => lookup_id, "result" => ?result, - "id" => target_id, ); let action = match result { BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown(_)) => { // Successfully imported - trace!(self.log, "Single block processing succeeded"; "block" => %block_root); - Action::Drop + request_state.on_processing_success()?; + Action::Continue } BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( @@ -759,28 +486,17 @@ impl BlockLookups { )) => { // `on_processing_success` is called here to ensure the request state is updated prior to checking // if both components have been processed. - if R::request_state_mut(&mut lookup) - .get_state_mut() - .on_processing_success() - .is_err() - { - warn!( - self.log, - "Single block processing state incorrect"; - "action" => "dropping single block request" - ); - Action::Drop - // If this was the result of a block request, we can't determined if the block peer did anything - // wrong. If we already had both a block and blobs response processed, we should penalize the - // blobs peer because they did not provide all blobs on the initial request. - } else if lookup.both_components_processed() { - lookup.penalize_blob_peer(cx); + request_state.on_processing_success()?; - // Try it again if possible. - lookup.blob_request_state.state.on_processing_failure(); - Action::Retry + if lookup.both_components_processed() { + // We don't request for other block components until being sure that the block has + // data. If we request blobs / columns to a peer we are sure those must exist. + // Therefore if all components are processed and we still receive `MissingComponents` + // it indicates an internal bug. + return Err(LookupRequestError::MissingComponentsAfterAllProcessed); } else { - Action::Continue + // Continue request, potentially request blobs + Action::Retry } } BlockProcessingResult::Ignored => { @@ -788,25 +504,28 @@ impl BlockLookups { // This implies that the cpu is overloaded. Drop the request. warn!( self.log, - "Single block processing was ignored, cpu might be overloaded"; - "action" => "dropping single block request" + "Lookup component processing ignored, cpu might be overloaded"; + "component" => ?R::response_type(), ); Action::Drop } BlockProcessingResult::Err(e) => { - let root = lookup.block_root(); - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); match e { BlockError::BeaconChainError(e) => { // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + error!(self.log, "Beacon chain error processing lookup component"; "block_root" => %block_root, "error" => ?e); Action::Drop } BlockError::ParentUnknown(block) => { - let slot = block.slot(); - let parent_root = block.parent_root(); - lookup.add_child_components(block.into()); - Action::ParentUnknown { parent_root, slot } + // Reverts the status of this request to `AwaitingProcessing` holding the + // downloaded data. A future call to `continue_requests` will re-submit it + // once there are no pending parent requests. + // Note: `BlockError::ParentUnknown` is only returned when processing + // blocks, not blobs. + request_state.revert_to_awaiting_processing()?; + Action::ParentUnknown { + parent_root: block.parent_root(), + } } ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -814,35 +533,36 @@ impl BlockLookups { debug!( self.log, "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; - "root" => %root, + "block_root" => ?block_root, "error" => ?e ); Action::Drop } - BlockError::AvailabilityCheck(e) => match e.category() { - AvailabilityCheckErrorCategory::Internal => { - warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup.block_request_state.state.on_download_failure(); - lookup.blob_request_state.state.on_download_failure(); - Action::Retry - } - AvailabilityCheckErrorCategory::Malicious => { - warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup.handle_availability_check_failure(cx); - Action::Retry - } - }, - other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - if let Ok(block_peer) = lookup.block_request_state.state.processing_peer() { - cx.report_peer( - block_peer, - PeerAction::MidToleranceError, - "single_block_failure", - ); + BlockError::AvailabilityCheck(e) + if e.category() == AvailabilityCheckErrorCategory::Internal => + { + // There errors indicate internal problems and should not downscore the peer + warn!(self.log, "Internal availability check failure"; "block_root" => ?block_root, "error" => ?e); + + // Here we choose *not* to call `on_processing_failure` because this could result in a bad + // lookup state transition. This error invalidates both blob and block requests, and we don't know the + // state of both requests. Blobs may have already successfullly processed for example. + // We opt to drop the lookup instead. + Action::Drop + } + other => { + debug!(self.log, "Invalid lookup component"; "block_root" => ?block_root, "component" => ?R::response_type(), "error" => ?other); + + let peer_id = request_state.on_processing_failure()?; + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + match R::response_type() { + ResponseType::Block => "lookup_block_processing_failure", + ResponseType::Blob => "lookup_blobs_processing_failure", + }, + ); - lookup.block_request_state.state.on_processing_failure(); - } Action::Retry } } @@ -851,466 +571,132 @@ impl BlockLookups { match action { Action::Retry => { - if let Err(e) = lookup.request_block_and_blobs(cx) { - warn!(self.log, "Single block lookup failed"; "block_root" => %block_root, "error" => ?e); - // Failed with too many retries, drop with noop - self.update_metrics(); - } else { - self.single_block_lookups.insert(target_id, lookup); - } + // Trigger download for all components in case `MissingComponents` failed the blob + // request. Also if blobs are `AwaitingProcessing` and need to be progressed + lookup.continue_requests(cx) } - Action::ParentUnknown { parent_root, slot } => { - // TODO: Consider including all peers from the lookup, claiming to know this block, not - // just the one that sent this specific block - self.search_parent(slot, block_root, parent_root, peer_id, cx); - self.single_block_lookups.insert(target_id, lookup); + Action::ParentUnknown { parent_root } => { + let peers = lookup.all_peers().copied().collect::>(); + lookup.set_awaiting_parent(parent_root); + debug!(self.log, "Marking lookup as awaiting parent"; "id" => lookup.id, "block_root" => ?block_root, "parent_root" => ?parent_root); + self.search_parent_of_child(parent_root, block_root, &peers, cx); + Ok(LookupResult::Pending) } Action::Drop => { - // drop with noop - self.update_metrics(); + // Drop with noop + Err(LookupRequestError::Failed) } Action::Continue => { - self.single_block_lookups.insert(target_id, lookup); + // Drop this completed lookup only + Ok(LookupResult::Completed) } } } - pub fn parent_block_processed( + pub fn on_external_processing_result( &mut self, - chain_hash: Hash256, - result: BlockProcessingResult, + block_root: Hash256, + imported: bool, cx: &mut SyncNetworkContext, ) { - let index = self - .parent_lookups - .iter() - .enumerate() - .find(|(_, lookup)| lookup.chain_hash() == chain_hash) - .map(|(index, _)| index); - - let Some(mut parent_lookup) = index.map(|index| self.parent_lookups.remove(index)) else { - return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); - }; - - match &result { - BlockProcessingResult::Ok(status) => match status { - AvailabilityProcessingStatus::Imported(block_root) => { - debug!(self.log, "Parent block processing succeeded"; &parent_lookup, "block_root" => ?block_root) - } - AvailabilityProcessingStatus::MissingComponents(_, block_root) => { - debug!(self.log, "Parent missing parts, triggering single block lookup"; &parent_lookup,"block_root" => ?block_root) - } - }, - BlockProcessingResult::Err(e) => { - debug!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) - } - BlockProcessingResult::Ignored => { - debug!( - self.log, - "Parent block processing job was ignored"; - "action" => "re-requesting block", - &parent_lookup - ); - } - } - - match result { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - _, - block_root, - )) => { - let expected_block_root = parent_lookup.current_parent_request.block_root(); - if block_root != expected_block_root { - warn!( - self.log, - "Parent block processing result/request root mismatch"; - "request" =>?expected_block_root, - "result" => ?block_root - ); - return; - } - - // We only send parent blocks + blobs for processing together. This means a - // `MissingComponents` response here indicates missing blobs. Therefore we always - // register a blob processing failure here. - parent_lookup - .current_parent_request - .blob_request_state - .state - .on_processing_failure(); - match parent_lookup - .current_parent_request - .request_block_and_blobs(cx) - { - Ok(()) => self.parent_lookups.push(parent_lookup), - Err(e) => self.handle_parent_request_error(&mut parent_lookup, cx, e.into()), - } - } - BlockProcessingResult::Err(BlockError::ParentUnknown(block)) => { - parent_lookup.add_unknown_parent_block(block); - self.request_parent(parent_lookup, cx); - } - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown(_)) => { - let (chain_hash, blocks, hashes, block_request) = - parent_lookup.parts_for_processing(); - - let blocks = self.add_child_block_to_chain(chain_hash, blocks, cx).into(); - - let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); - - // Check if the beacon processor is available - let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { - return trace!( - self.log, - "Dropping parent chain segment that was ready for processing."; - "chain_hash" => %chain_hash, - ); - }; - - match beacon_processor.send_chain_segment(process_id, blocks) { - Ok(_) => { - self.processing_parent_lookups - .insert(chain_hash, (hashes, block_request)); - } - Err(e) => { - error!( - self.log, - "Failed to send chain segment to processor"; - "error" => ?e - ); - } - } - } - ref e @ BlockProcessingResult::Err(BlockError::ExecutionPayloadError(ref epe)) - if !epe.penalize_peer() => - { - // These errors indicate that the execution layer is offline - // and failed to validate the execution payload. Do not downscore peer. - debug!( - self.log, - "Parent lookup failed. Execution layer is offline"; - "chain_hash" => %chain_hash, - "error" => ?e - ); - } - BlockProcessingResult::Err(outcome) => { - self.handle_parent_block_error(outcome, cx, parent_lookup); - } - BlockProcessingResult::Ignored => { - // Beacon processor signalled to ignore the block processing result. - // This implies that the cpu is overloaded. Drop the request. - warn!( - self.log, - "Parent block processing was ignored, cpu might be overloaded"; - "action" => "dropping parent request" - ); - } - } - - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } - - /// Find the child block that spawned the parent lookup request and add it to the chain - /// to send for processing. - fn add_child_block_to_chain( - &mut self, - chain_hash: Hash256, - mut blocks: VecDeque>, - cx: &mut SyncNetworkContext, - ) -> VecDeque> { - // Find the child block that spawned the parent lookup request and add it to the chain - // to send for processing. - if let Some(child_lookup_id) = self + let Some((id, lookup)) = self .single_block_lookups - .iter() - .find_map(|(id, lookup)| (lookup.block_root() == chain_hash).then_some(*id)) - { - let Some(child_lookup) = self.single_block_lookups.get_mut(&child_lookup_id) else { - debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); - return blocks; - }; - match child_lookup.get_cached_child_block() { - CachedChild::Ok(rpc_block) => { - // Insert this block at the front. This order is important because we later check - // for linear roots in `filter_chain_segment` - blocks.push_front(rpc_block); - } - CachedChild::DownloadIncomplete => { - trace!(self.log, "Parent lookup chain complete, awaiting child response"; "chain_hash" => ?chain_hash); - } - CachedChild::NotRequired => { - warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); - } - CachedChild::Err(e) => { - warn!( - self.log, - "Consistency error in child block triggering chain or parent lookups"; - "error" => ?e, - "chain_hash" => ?chain_hash - ); - child_lookup.handle_consistency_failure(cx); - if let Err(e) = child_lookup.request_block_and_blobs(cx) { - debug!(self.log, - "Failed to request block and blobs, dropping lookup"; - "error" => ?e - ); - self.single_block_lookups.remove(&child_lookup_id); - } - } - } + .iter_mut() + .find(|(_, lookup)| lookup.is_for_block(block_root)) + else { + // Ok to ignore gossip process events + return; + }; + + let lookup_result = if imported { + Ok(LookupResult::Completed) } else { - debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); + lookup.continue_requests(cx) }; - blocks + let id = *id; + self.on_lookup_result(id, lookup_result, "external_processing_result", cx); } - /// Handle the peer scoring, retries, and logging related to a `BlockError` returned from - /// processing a block + blobs for a parent lookup. - fn handle_parent_block_error( - &mut self, - outcome: BlockError<::EthSpec>, - cx: &mut SyncNetworkContext, - mut parent_lookup: ParentLookup, - ) { - // We should always have a block peer. - let block_peer_id = match parent_lookup.block_processing_peer() { - Ok(peer_id) => peer_id, - Err(e) => { - warn!(self.log, "Parent lookup in bad state"; "chain_hash" => %parent_lookup.chain_hash(), "error" => e); - return; - } - }; + /// Makes progress on the immediate children of `block_root` + pub fn continue_child_lookups(&mut self, block_root: Hash256, cx: &mut SyncNetworkContext) { + let mut lookup_results = vec![]; // < need to buffer lookup results to not re-borrow &mut self - // We may not have a blob peer, if there were no blobs required for this block. - let blob_peer_id = parent_lookup.blob_processing_peer().ok(); - - // all else we consider the chain a failure and downvote the peer that sent - // us the last block - warn!( - self.log, "Invalid parent chain"; - "score_adjustment" => %PeerAction::MidToleranceError, - "outcome" => ?outcome, - "block_peer_id" => %block_peer_id, - ); - // This currently can be a host of errors. We permit this due to the partial - // ambiguity. - cx.report_peer( - block_peer_id, - PeerAction::MidToleranceError, - "parent_request_err", - ); - // Don't downscore the same peer twice - if let Some(blob_peer_id) = blob_peer_id { - if block_peer_id != blob_peer_id { - debug!( - self.log, "Additionally down-scoring blob peer"; - "score_adjustment" => %PeerAction::MidToleranceError, - "outcome" => ?outcome, - "blob_peer_id" => %blob_peer_id, - ); - cx.report_peer( - blob_peer_id, - PeerAction::MidToleranceError, - "parent_request_err", - ); + for (id, lookup) in self.single_block_lookups.iter_mut() { + if lookup.awaiting_parent() == Some(block_root) { + lookup.resolve_awaiting_parent(); + debug!(self.log, "Continuing child lookup"; "parent_root" => ?block_root, "id" => id, "block_root" => ?lookup.block_root()); + let result = lookup.continue_requests(cx); + lookup_results.push((*id, result)); } } - // Try again if possible - parent_lookup.processing_failed(); - self.request_parent(parent_lookup, cx); + for (id, result) in lookup_results { + self.on_lookup_result(id, result, "continue_child_lookups", cx); + } } - pub fn parent_chain_processed( - &mut self, - chain_hash: Hash256, - result: BatchProcessResult, - cx: &mut SyncNetworkContext, - ) { - let Some((_hashes, request)) = self.processing_parent_lookups.remove(&chain_hash) else { - return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result); - }; + /// Drops `dropped_id` lookup and all its children recursively. Lookups awaiting a parent need + /// the parent to make progress to resolve, therefore we must drop them if the parent is + /// dropped. + pub fn drop_lookup_and_children(&mut self, dropped_id: SingleLookupId) { + if let Some(dropped_lookup) = self.single_block_lookups.remove(&dropped_id) { + debug!(self.log, "Dropping lookup"; + "id" => ?dropped_id, + "block_root" => ?dropped_lookup.block_root(), + "awaiting_parent" => ?dropped_lookup.awaiting_parent(), + ); - debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); + let child_lookups = self + .single_block_lookups + .iter() + .filter(|(_, lookup)| lookup.awaiting_parent() == Some(dropped_lookup.block_root())) + .map(|(id, _)| *id) + .collect::>(); + + for id in child_lookups { + self.drop_lookup_and_children(id); + } + } + } + + /// Common handler a lookup request error, drop it and update metrics + /// Returns true if the lookup is created or already exists + fn on_lookup_result( + &mut self, + id: SingleLookupId, + result: Result, + source: &str, + cx: &mut SyncNetworkContext, + ) -> bool { match result { - BatchProcessResult::Success { .. } => { - let Some(id) = self - .single_block_lookups - .iter() - .find_map(|(id, req)| (req.block_root() == chain_hash).then_some(*id)) - else { - warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); - return; - }; - - let Some(lookup) = self.single_block_lookups.get_mut(&id) else { - warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); - return; - }; - - match lookup.get_cached_child_block() { - CachedChild::Ok(rpc_block) => { - // This is the correct block, send it for processing - if self - .send_block_for_processing( - chain_hash, - rpc_block, - timestamp_now(), - BlockProcessType::SingleBlock { id }, - cx, - ) - .is_err() - { - // Remove to avoid inconsistencies - self.single_block_lookups.remove(&id); - } - } - CachedChild::DownloadIncomplete => { - trace!(self.log, "Parent chain complete, awaiting child response"; "chain_hash" => %chain_hash); - } - CachedChild::NotRequired => { - warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); - } - CachedChild::Err(e) => { - warn!( - self.log, - "Consistency error in child block triggering parent lookup"; - "chain_hash" => %chain_hash, - "error" => ?e - ); - lookup.handle_consistency_failure(cx); - if let Err(e) = lookup.request_block_and_blobs(cx) { - debug!(self.log, - "Failed to request block and blobs, dropping lookup"; - "error" => ?e - ); - self.single_block_lookups.remove(&id); - } - } + Ok(LookupResult::Pending) => true, // no action + Ok(LookupResult::Completed) => { + if let Some(lookup) = self.single_block_lookups.remove(&id) { + debug!(self.log, "Dropping completed lookup"; "block" => ?lookup.block_root(), "id" => id); + metrics::inc_counter(&metrics::SYNC_LOOKUP_COMPLETED); + // Block imported, continue the requests of pending child blocks + self.continue_child_lookups(lookup.block_root(), cx); + self.update_metrics(); + } else { + debug!(self.log, "Attempting to drop non-existent lookup"; "id" => id); } + false } - BatchProcessResult::FaultyFailure { - imported_blocks: _, - penalty, - } => { - self.failed_chains.insert(chain_hash); - for peer_source in request.all_used_peers() { - cx.report_peer(*peer_source, penalty, "parent_chain_failure") - } - } - BatchProcessResult::NonFaultyFailure => { - // We might request this chain again if there is need but otherwise, don't try again + // If UnknownLookup do not log the request error. No need to drop child lookups nor + // update metrics because the lookup does not exist. + Err(LookupRequestError::UnknownLookup) => false, + Err(error) => { + debug!(self.log, "Dropping lookup on request error"; "id" => id, "source" => source, "error" => ?error); + metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[error.into()]); + self.drop_lookup_and_children(id); + self.update_metrics(); + false } } - - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); } /* Helper functions */ - fn send_block_for_processing( - &self, - block_root: Hash256, - block: RpcBlock, - duration: Duration, - process_type: BlockProcessType, - cx: &SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - match cx.beacon_processor_if_enabled() { - Some(beacon_processor) => { - debug!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); - if let Err(e) = beacon_processor.send_rpc_beacon_block( - block_root, - block, - duration, - process_type, - ) { - error!( - self.log, - "Failed to send sync block to processor"; - "error" => ?e - ); - Err(LookupRequestError::SendFailed( - "beacon processor send failure", - )) - } else { - Ok(()) - } - } - None => { - trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block_root); - Err(LookupRequestError::SendFailed( - "beacon processor unavailable", - )) - } - } - } - - fn send_blobs_for_processing( - &self, - block_root: Hash256, - blobs: FixedBlobSidecarList, - duration: Duration, - process_type: BlockProcessType, - cx: &SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - match cx.beacon_processor_if_enabled() { - Some(beacon_processor) => { - trace!(self.log, "Sending blobs for processing"; "block" => ?block_root, "process_type" => ?process_type); - if let Err(e) = - beacon_processor.send_rpc_blobs(block_root, blobs, duration, process_type) - { - error!( - self.log, - "Failed to send sync blobs to processor"; - "error" => ?e - ); - Err(LookupRequestError::SendFailed( - "beacon processor send failure", - )) - } else { - Ok(()) - } - } - None => { - trace!(self.log, "Dropping blobs ready for processing. Beacon processor not available"; "block_root" => %block_root); - Err(LookupRequestError::SendFailed( - "beacon processor unavailable", - )) - } - } - } - - /// Attempts to request the next unknown parent. This method handles peer scoring and dropping - /// the lookup in the event of failure. - fn request_parent( - &mut self, - mut parent_lookup: ParentLookup, - cx: &mut SyncNetworkContext, - ) { - let response = parent_lookup.request_parent(cx); - - match response { - Err(e) => { - self.handle_parent_request_error(&mut parent_lookup, cx, e); - } - Ok(_) => self.parent_lookups.push(parent_lookup), - } - - // We remove and add back again requests so we want this updated regardless of outcome. - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } - /// Drops all the single block requests and returns how many requests were dropped. pub fn drop_single_block_requests(&mut self) -> usize { let requests_to_drop = self.single_block_lookups.len(); @@ -1318,34 +704,169 @@ impl BlockLookups { requests_to_drop } - /// Drops all the parent chain requests and returns how many requests were dropped. - pub fn drop_parent_chain_requests(&mut self) -> usize { - self.parent_lookups.drain(..).len() - } - - pub fn downscore_on_rpc_error( - &self, - peer_id: &PeerId, - error: &LookupVerifyError, - cx: &SyncNetworkContext, - ) { - // Note: logging the report event here with the full error display. The log inside - // `report_peer` only includes a smaller string, like "invalid_data" - let error_str: &'static str = error.into(); - - debug!(self.log, "reporting peer for sync lookup error"; "error" => error_str); - cx.report_peer(*peer_id, PeerAction::LowToleranceError, error_str); - } - pub fn update_metrics(&self) { metrics::set_gauge( &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, self.single_block_lookups.len() as i64, ); + } - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); + /// Perform some prune operations on lookups on some interval + pub fn prune_lookups(&mut self) { + self.drop_lookups_without_peers(); + self.drop_stuck_lookups(); + } + + /// Lookups without peers are allowed to exist for some time. See this common race condition: + /// + /// 1. Receive unknown block parent event + /// 2. Create child lookup with zero peers + /// 3. Parent is processed, before receiving any attestation for the child block + /// 4. Child lookup is attempted to make progress but has no peers + /// 5. We receive an attestion for child block and add a peer to the child block lookup + /// + /// On step 4 we could drop the lookup because we attempt to issue a request with no peers + /// available. This has two issues: + /// - We may drop the lookup while some other block component is processing, triggering an + /// unknown lookup error. This can potentially cause un-related child lookups to also be + /// dropped when calling `drop_lookup_and_children`. + /// - We lose all progress of the lookup, and have to re-download its components that we may + /// already have there cached. + /// + /// Instead there's no negative for keeping lookups with no peers around for some time. If we + /// regularly prune them, it should not be a memory concern (TODO: maybe yes!). + fn drop_lookups_without_peers(&mut self) { + for (lookup_id, block_root) in self + .single_block_lookups + .values() + .filter(|lookup| { + // Do not drop lookup that are awaiting events to prevent inconsinstencies. If a + // lookup gets stuck, it will be eventually pruned by `drop_stuck_lookups` + lookup.has_no_peers() + && lookup.elapsed_since_created() + > Duration::from_secs(LOOKUP_MAX_DURATION_NO_PEERS_SECS) + && !lookup.is_awaiting_event() + }) + .map(|lookup| (lookup.id, lookup.block_root())) + .collect::>() + { + debug!(self.log, "Dropping lookup with no peers"; + "id" => lookup_id, + "block_root" => ?block_root + ); + self.drop_lookup_and_children(lookup_id); + } + } + + /// Safety mechanism to unstuck lookup sync. Lookup sync if purely event driven and depends on + /// external components to feed it events to make progress. If there is a bug in network, in + /// beacon processor, or here internally: lookups can get stuck forever. A stuck lookup can + /// stall a node indefinitely as other lookup will be awaiting on a parent lookup to make + /// progress. + /// + /// If a lookup lasts more than LOOKUP_MAX_DURATION_SECS this function will find its oldest + /// ancestor and then drop it and all its children. This action will allow the node to unstuck + /// itself. Bugs that cause lookups to get stuck may be triggered consistently. So this strategy + /// is useful for two reasons: + /// + /// - One single clear warn level log per stuck incident + /// - If the original bug is sporadic, it reduces the time a node is stuck from forever to 15 min + fn drop_stuck_lookups(&mut self) { + // While loop to find and drop all disjoint trees of potentially stuck lookups. + while let Some(stuck_lookup) = self.single_block_lookups.values().find(|lookup| { + lookup.elapsed_since_created() > Duration::from_secs(LOOKUP_MAX_DURATION_STUCK_SECS) + }) { + let ancestor_stuck_lookup = match self.find_oldest_ancestor_lookup(stuck_lookup) { + Ok(lookup) => lookup, + Err(e) => { + warn!(self.log, "Error finding oldest ancestor lookup"; "error" => ?e); + // Default to dropping the lookup that exceeds the max duration so at least + // eventually sync should be unstuck + stuck_lookup + } + }; + + if stuck_lookup.id == ancestor_stuck_lookup.id { + warn!(self.log, "Notify the devs, a sync lookup is stuck"; + "block_root" => ?stuck_lookup.block_root(), + "lookup" => ?stuck_lookup, + ); + } else { + warn!(self.log, "Notify the devs, a sync lookup is stuck"; + "block_root" => ?stuck_lookup.block_root(), + "lookup" => ?stuck_lookup, + "ancestor_block_root" => ?ancestor_stuck_lookup.block_root(), + "ancestor_lookup" => ?ancestor_stuck_lookup, + ); + } + + metrics::inc_counter(&metrics::SYNC_LOOKUPS_STUCK); + self.drop_lookup_and_children(ancestor_stuck_lookup.id); + } + } + + /// Recursively find the oldest ancestor lookup of another lookup + fn find_oldest_ancestor_lookup<'a>( + &'a self, + lookup: &'a SingleBlockLookup, + ) -> Result<&'a SingleBlockLookup, String> { + if let Some(awaiting_parent) = lookup.awaiting_parent() { + if let Some(lookup) = self + .single_block_lookups + .values() + .find(|l| l.block_root() == awaiting_parent) + { + self.find_oldest_ancestor_lookup(lookup) + } else { + Err(format!( + "Lookup references unknown parent {awaiting_parent:?}" + )) + } + } else { + Ok(lookup) + } + } + + /// Adds peers to a lookup and its ancestors recursively. + /// Note: Takes a `lookup_id` as argument to allow recursion on mutable lookups, without having + /// to duplicate the code to add peers to a lookup + fn add_peers_to_lookup_and_ancestors( + &mut self, + lookup_id: SingleLookupId, + peers: &[PeerId], + ) -> Result<(), String> { + let lookup = self + .single_block_lookups + .get_mut(&lookup_id) + .ok_or(format!("Unknown lookup for id {lookup_id}"))?; + + for peer in peers { + if lookup.add_peer(*peer) { + debug!(self.log, "Adding peer to existing single block lookup"; + "block_root" => ?lookup.block_root(), + "peer" => ?peer + ); + } + } + + // We may choose to attempt to continue a lookup here. It is possible that a lookup had zero + // peers and after adding this set of peers it can make progress again. Note that this + // recursive function iterates from child to parent, so continuing the child first is weird. + // However, we choose to not attempt to continue the lookup for simplicity. It's not + // strictly required and just and optimization for a rare corner case. + + if let Some(parent_root) = lookup.awaiting_parent() { + if let Some((&child_id, _)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == parent_root) + { + self.add_peers_to_lookup_and_ancestors(child_id, peers) + } else { + Err(format!("Lookup references unknown parent {parent_root:?}")) + } + } else { + Ok(()) + } } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs new file mode 100644 index 0000000000..7f4fe5119f --- /dev/null +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -0,0 +1,198 @@ +use super::single_block_lookup::SingleBlockLookup; +use beacon_chain::BeaconChainTypes; +use std::collections::{HashMap, HashSet}; +use types::Hash256; + +/// Summary of a lookup of which we may not know it's parent_root yet +pub(crate) struct Node { + block_root: Hash256, + parent_root: Option, +} + +impl From<&SingleBlockLookup> for Node { + fn from(value: &SingleBlockLookup) -> Self { + Self { + block_root: value.block_root(), + parent_root: value.awaiting_parent(), + } + } +} + +/// Wrapper around a chain of block roots that have a least one element (tip) +pub(crate) struct NodeChain { + // Parent chain blocks in descending slot order + pub(crate) chain: Vec, + pub(crate) tip: Hash256, +} + +impl NodeChain { + /// Returns the block_root of the oldest ancestor (min slot) of this chain + pub(crate) fn ancestor(&self) -> Hash256 { + self.chain.last().copied().unwrap_or(self.tip) + } + pub(crate) fn len(&self) -> usize { + self.chain.len() + } +} + +/// Given a set of nodes that reference each other, returns a list of chains with unique tips that +/// contain at least two elements. In descending slot order (tip first). +pub(crate) fn compute_parent_chains(nodes: &[Node]) -> Vec { + let mut child_to_parent = HashMap::new(); + let mut parent_to_child = HashMap::>::new(); + for node in nodes { + child_to_parent.insert(node.block_root, node.parent_root); + if let Some(parent_root) = node.parent_root { + parent_to_child + .entry(parent_root) + .or_default() + .push(node.block_root); + } + } + + let mut parent_chains = vec![]; + + // Iterate blocks with no children + for tip in nodes { + let mut block_root = tip.block_root; + if !parent_to_child.contains_key(&block_root) { + let mut chain = vec![]; + + // Resolve chain of blocks + while let Some(parent_root) = child_to_parent.get(&block_root) { + // block_root is a known block that may or may not have a parent root + chain.push(block_root); + if let Some(parent_root) = parent_root { + block_root = *parent_root; + } else { + break; + } + } + + if chain.len() > 1 { + parent_chains.push(NodeChain { + chain, + tip: tip.block_root, + }); + } + } + } + + parent_chains +} + +/// Given a list of node chains, find the oldest node of a specific chain that is not contained in +/// any other chain. +pub(crate) fn find_oldest_fork_ancestor( + parent_chains: Vec, + chain_idx: usize, +) -> Result { + let mut other_blocks = HashSet::new(); + + // Register blocks from other chains + for (i, parent_chain) in parent_chains.iter().enumerate() { + if i != chain_idx { + for block in &parent_chain.chain { + other_blocks.insert(block); + } + } + } + + // Should never happen + let parent_chain = parent_chains + .get(chain_idx) + .ok_or("chain_idx out of bounds")?; + // Find the first block in the target parent chain that is not in other parent chains + // Iterate in ascending slot order + for block in parent_chain.chain.iter().rev() { + if !other_blocks.contains(block) { + return Ok(*block); + } + } + + // No match means that the chain is fully contained within another chain. This should never + // happen, but if that was the case just return the tip + Ok(parent_chain.tip) +} + +#[cfg(test)] +mod tests { + use super::{compute_parent_chains, find_oldest_fork_ancestor, Node}; + use types::Hash256; + + fn h(n: u64) -> Hash256 { + Hash256::from_low_u64_be(n) + } + + fn n(block: u64) -> Node { + Node { + block_root: h(block), + parent_root: None, + } + } + + fn np(parent: u64, block: u64) -> Node { + Node { + block_root: h(block), + parent_root: Some(h(parent)), + } + } + + fn compute_parent_chains_test(nodes: &[Node], expected_chain: Vec>) { + assert_eq!( + compute_parent_chains(nodes) + .iter() + .map(|c| c.chain.clone()) + .collect::>(), + expected_chain + ); + } + + fn find_oldest_fork_ancestor_test(nodes: &[Node], expected: Hash256) { + let chains = compute_parent_chains(nodes); + println!( + "chains {:?}", + chains.iter().map(|c| &c.chain).collect::>() + ); + assert_eq!(find_oldest_fork_ancestor(chains, 0).unwrap(), expected); + } + + #[test] + fn compute_parent_chains_empty_case() { + compute_parent_chains_test(&[], vec![]); + } + + #[test] + fn compute_parent_chains_single_branch() { + compute_parent_chains_test(&[n(0), np(0, 1), np(1, 2)], vec![vec![h(2), h(1), h(0)]]); + } + + #[test] + fn compute_parent_chains_single_branch_with_solo() { + compute_parent_chains_test( + &[n(0), np(0, 1), np(1, 2), np(3, 4)], + vec![vec![h(2), h(1), h(0)]], + ); + } + + #[test] + fn compute_parent_chains_two_forking_branches() { + compute_parent_chains_test( + &[n(0), np(0, 1), np(1, 2), np(1, 3)], + vec![vec![h(2), h(1), h(0)], vec![h(3), h(1), h(0)]], + ); + } + + #[test] + fn compute_parent_chains_two_independent_branches() { + compute_parent_chains_test( + &[n(0), np(0, 1), np(1, 2), n(3), np(3, 4)], + vec![vec![h(2), h(1), h(0)], vec![h(4), h(3)]], + ); + } + + #[test] + fn find_oldest_fork_ancestor_simple_case() { + find_oldest_fork_ancestor_test(&[n(0), np(0, 1), np(1, 2), np(0, 3)], h(1)) + } +} diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs deleted file mode 100644 index 11eb908953..0000000000 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ /dev/null @@ -1,227 +0,0 @@ -use super::common::LookupType; -use super::single_block_lookup::{LookupRequestError, SingleBlockLookup}; -use super::{DownloadedBlock, PeerId}; -use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; -use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::{ChildComponents, DataAvailabilityChecker}; -use beacon_chain::BeaconChainTypes; -use std::collections::VecDeque; -use std::sync::Arc; -use store::Hash256; - -/// How many attempts we try to find a parent of a block before we give up trying. -pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; -/// The maximum depth we will search for a parent block. In principle we should have sync'd any -/// canonical chain to its head once the peer connects. A chain should not appear where it's depth -/// is further back than the most recent head slot. -pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; - -/// Maintains a sequential list of parents to lookup and the lookup's current state. -pub(crate) struct ParentLookup { - /// The root of the block triggering this parent request. - chain_hash: Hash256, - /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, - /// Request of the last parent. - pub current_parent_request: SingleBlockLookup, -} - -#[derive(Debug, PartialEq, Eq)] -pub(crate) enum RequestError { - SendFailed(&'static str), - ChainTooLong, - /// We witnessed too many failures trying to complete this parent lookup. - TooManyAttempts { - /// We received more failures trying to process the blocks than downloading them - /// from peers. - cannot_process: bool, - }, - NoPeers, - BadState(String), -} - -impl ParentLookup { - pub fn new( - block_root: Hash256, - parent_root: Hash256, - peer_id: PeerId, - da_checker: Arc>, - cx: &mut SyncNetworkContext, - ) -> Self { - let current_parent_request = SingleBlockLookup::new( - parent_root, - Some(ChildComponents::empty(block_root)), - &[peer_id], - da_checker, - cx.next_id(), - LookupType::Parent, - ); - - Self { - chain_hash: block_root, - downloaded_blocks: vec![], - current_parent_request, - } - } - - pub fn contains_block(&self, block_root: &Hash256) -> bool { - self.downloaded_blocks - .iter() - .any(|(root, _d_block)| root == block_root) - } - - pub fn is_for_block(&self, block_root: Hash256) -> bool { - self.current_parent_request.is_for_block(block_root) - } - - /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent(&mut self, cx: &mut SyncNetworkContext) -> Result<(), RequestError> { - // check to make sure this request hasn't failed - if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE { - return Err(RequestError::ChainTooLong); - } - - self.current_parent_request - .request_block_and_blobs(cx) - .map_err(Into::into) - } - - pub fn check_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { - self.current_parent_request - .block_request_state - .state - .check_peer_disconnected(peer_id) - .and_then(|()| { - self.current_parent_request - .blob_request_state - .state - .check_peer_disconnected(peer_id) - }) - } - - pub fn add_unknown_parent_block(&mut self, block: RpcBlock) { - let next_parent = block.parent_root(); - // Cache the block. - let current_root = self.current_parent_request.block_root(); - self.downloaded_blocks.push((current_root, block)); - - // Update the parent request. - self.current_parent_request - .update_requested_parent_block(next_parent) - } - - pub fn block_processing_peer(&self) -> Result { - self.current_parent_request - .block_request_state - .state - .processing_peer() - } - - pub fn blob_processing_peer(&self) -> Result { - self.current_parent_request - .blob_request_state - .state - .processing_peer() - } - - /// Consumes the parent request and destructures it into it's parts. - #[allow(clippy::type_complexity)] - pub fn parts_for_processing( - self, - ) -> ( - Hash256, - VecDeque>, - Vec, - SingleBlockLookup, - ) { - let ParentLookup { - chain_hash, - downloaded_blocks, - current_parent_request, - } = self; - let block_count = downloaded_blocks.len(); - let mut blocks = VecDeque::with_capacity(block_count); - let mut hashes = Vec::with_capacity(block_count); - for (hash, block) in downloaded_blocks.into_iter() { - blocks.push_back(block); - hashes.push(hash); - } - (chain_hash, blocks, hashes, current_parent_request) - } - - /// Get the parent lookup's chain hash. - pub fn chain_hash(&self) -> Hash256 { - self.chain_hash - } - - pub fn processing_failed(&mut self) { - self.current_parent_request - .block_request_state - .state - .on_processing_failure(); - self.current_parent_request - .blob_request_state - .state - .on_processing_failure(); - if let Some(components) = self.current_parent_request.child_components.as_mut() { - components.downloaded_block = None; - components.downloaded_blobs = <_>::default(); - } - } - - pub fn add_peer(&mut self, peer: PeerId) { - self.current_parent_request.add_peer(peer) - } - - /// Adds a list of peers to the parent request. - pub fn add_peers(&mut self, peers: &[PeerId]) { - self.current_parent_request.add_peers(peers) - } - - pub fn all_used_peers(&self) -> impl Iterator + '_ { - self.current_parent_request.all_used_peers() - } -} - -impl From for RequestError { - fn from(e: LookupRequestError) -> Self { - use LookupRequestError as E; - match e { - E::TooManyAttempts { cannot_process } => { - RequestError::TooManyAttempts { cannot_process } - } - E::NoPeers => RequestError::NoPeers, - E::SendFailed(msg) => RequestError::SendFailed(msg), - E::BadState(msg) => RequestError::BadState(msg), - } - } -} - -impl slog::KV for ParentLookup { - fn serialize( - &self, - record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - serializer.emit_arguments("chain_hash", &format_args!("{}", self.chain_hash))?; - slog::Value::serialize(&self.current_parent_request, record, "parent", serializer)?; - serializer.emit_usize("downloaded_blocks", self.downloaded_blocks.len())?; - slog::Result::Ok(()) - } -} - -impl RequestError { - pub fn as_static(&self) -> &'static str { - match self { - RequestError::SendFailed(e) => e, - RequestError::ChainTooLong => "chain_too_long", - RequestError::TooManyAttempts { cannot_process } if *cannot_process => { - "too_many_processing_attempts" - } - RequestError::TooManyAttempts { cannot_process: _ } => "too_many_downloading_attempts", - RequestError::NoPeers => "no_peers", - RequestError::BadState(..) => "bad_state", - } - } -} diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 077af7c3d1..13efd36ab7 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,25 +1,30 @@ -use super::common::LookupType; -use super::PeerId; +use super::common::ResponseType; +use super::{BlockComponent, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS}; use crate::sync::block_lookups::common::RequestState; use crate::sync::block_lookups::Id; -use crate::sync::network_context::SyncNetworkContext; -use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::ChildComponents; -use beacon_chain::data_availability_checker::{ - AvailabilityCheckError, DataAvailabilityChecker, MissingBlobs, +use crate::sync::network_context::{ + LookupRequestResult, ReqId, RpcRequestSendError, SendErrorProcessor, SyncNetworkContext, }; use beacon_chain::BeaconChainTypes; -use itertools::Itertools; -use lighthouse_network::PeerAction; +use derivative::Derivative; use rand::seq::IteratorRandom; -use slog::{debug, Logger}; use std::collections::HashSet; use std::fmt::Debug; use std::sync::Arc; +use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; use types::blob_sidecar::FixedBlobSidecarList; -use types::EthSpec; +use types::{EthSpec, SignedBeaconBlock}; + +// Dedicated enum for LookupResult to force its usage +#[must_use = "LookupResult must be handled with on_lookup_result"] +pub enum LookupResult { + /// Lookup completed successfully + Completed, + /// Lookup is expecting some future event from the network + Pending, +} #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupRequestError { @@ -28,45 +33,103 @@ pub enum LookupRequestError { /// The failed attempts were primarily due to processing failures. cannot_process: bool, }, + /// No peers left to serve this lookup NoPeers, - SendFailed(&'static str), + /// Error sending event to network + SendFailedNetwork(RpcRequestSendError), + /// Error sending event to processor + SendFailedProcessor(SendErrorProcessor), + /// Inconsistent lookup request state BadState(String), + /// Lookup failed for some other reason and should be dropped + Failed, + /// Received MissingComponents when all components have been processed. This should never + /// happen, and indicates some internal bug + MissingComponentsAfterAllProcessed, + /// Attempted to retrieve a not known lookup id + UnknownLookup, + /// Received a download result for a different request id than the in-flight request. + /// There should only exist a single request at a time. Having multiple requests is a bug and + /// can result in undefined state, so it's treated as a hard error and the lookup is dropped. + UnexpectedRequestId { + expected_req_id: ReqId, + req_id: ReqId, + }, } +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct SingleBlockLookup { pub id: Id, - pub lookup_type: LookupType, - pub block_request_state: BlockRequestState, + pub block_request_state: BlockRequestState, pub blob_request_state: BlobRequestState, - pub da_checker: Arc>, - /// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` - /// because any blocks or blobs without parents won't hit the data availability cache. - pub child_components: Option>, + /// Peers that claim to have imported this set of block components + #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] + peers: HashSet, + block_root: Hash256, + awaiting_parent: Option, + created: Instant, } impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, - child_components: Option>, peers: &[PeerId], - da_checker: Arc>, id: Id, - lookup_type: LookupType, + awaiting_parent: Option, ) -> Self { - let is_deneb = da_checker.is_deneb(); Self { id, - lookup_type, - block_request_state: BlockRequestState::new(requested_block_root, peers), - blob_request_state: BlobRequestState::new(requested_block_root, peers, is_deneb), - da_checker, - child_components, + block_request_state: BlockRequestState::new(requested_block_root), + blob_request_state: BlobRequestState::new(requested_block_root), + peers: HashSet::from_iter(peers.iter().copied()), + block_root: requested_block_root, + awaiting_parent, + created: Instant::now(), } } /// Get the block root that is being requested. pub fn block_root(&self) -> Hash256 { - self.block_request_state.requested_block_root + self.block_root + } + + pub fn awaiting_parent(&self) -> Option { + self.awaiting_parent + } + + /// Mark this lookup as awaiting a parent lookup from being processed. Meanwhile don't send + /// components for processing. + pub fn set_awaiting_parent(&mut self, parent_root: Hash256) { + self.awaiting_parent = Some(parent_root) + } + + /// Mark this lookup as no longer awaiting a parent lookup. Components can be sent for + /// processing. + pub fn resolve_awaiting_parent(&mut self) { + self.awaiting_parent = None; + } + + /// Returns the time elapsed since this lookup was created + pub fn elapsed_since_created(&self) -> Duration { + self.created.elapsed() + } + + /// Maybe insert a verified response into this lookup. Returns true if imported + pub fn add_child_components(&mut self, block_component: BlockComponent) -> bool { + match block_component { + BlockComponent::Block(block) => self + .block_request_state + .state + .insert_verified_response(block), + BlockComponent::Blob(_) => { + // For now ignore single blobs, as the blob request state assumes all blobs are + // attributed to the same peer = the peer serving the remaining blobs. Ignoring this + // block component has a minor effect, causing the node to re-request this blob + // once the parent chain is successfully resolved + false + } + } } /// Check the block root matches the requested block root. @@ -74,401 +137,395 @@ impl SingleBlockLookup { self.block_root() == block_root } - /// Update the requested block, this should only be used in a chain of parent lookups to request - /// the next parent. - pub fn update_requested_parent_block(&mut self, block_root: Hash256) { - self.block_request_state.requested_block_root = block_root; - self.blob_request_state.block_root = block_root; - self.block_request_state.state.state = State::AwaitingDownload; - self.blob_request_state.state.state = State::AwaitingDownload; - self.child_components = Some(ChildComponents::empty(block_root)); - } - - /// Get all unique used peers across block and blob requests. - pub fn all_used_peers(&self) -> impl Iterator + '_ { - self.block_request_state - .state - .get_used_peers() - .chain(self.blob_request_state.state.get_used_peers()) - .unique() - } - - /// Send the necessary requests for blocks and/or blobs. This will check whether we have - /// downloaded the block and/or blobs already and will not send requests if so. It will also - /// inspect the request state or blocks and blobs to ensure we are not already processing or - /// downloading the block and/or blobs. - pub fn request_block_and_blobs( - &mut self, - cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - let block_already_downloaded = self.block_already_downloaded(); - let blobs_already_downloaded = self.blobs_already_downloaded(); - - if !block_already_downloaded { - self.block_request_state - .build_request_and_send(self.id, self.lookup_type, cx)?; - } - if !blobs_already_downloaded { - self.blob_request_state - .build_request_and_send(self.id, self.lookup_type, cx)?; - } - Ok(()) - } - - /// Returns a `CachedChild`, which is a wrapper around a `RpcBlock` that is either: - /// - /// 1. `NotRequired`: there is no child caching required for this lookup. - /// 2. `DownloadIncomplete`: Child caching is required, but all components are not yet downloaded. - /// 3. `Ok`: The child is required and we have downloaded it. - /// 4. `Err`: The child is required, but has failed consistency checks. - pub fn get_cached_child_block(&self) -> CachedChild { - if let Some(components) = self.child_components.as_ref() { - let Some(block) = components.downloaded_block.as_ref() else { - return CachedChild::DownloadIncomplete; - }; - - if !self.missing_blob_ids().is_empty() { - return CachedChild::DownloadIncomplete; - } - - match RpcBlock::new_from_fixed( - self.block_request_state.requested_block_root, - block.clone(), - components.downloaded_blobs.clone(), - ) { - Ok(rpc_block) => CachedChild::Ok(rpc_block), - Err(e) => CachedChild::Err(e), - } - } else { - CachedChild::NotRequired - } - } - - /// Accepts a verified response, and adds it to the child components if required. This method - /// returns a `CachedChild` which provides a completed block + blob response if all components have been - /// received, or information about whether the child is required and if it has been downloaded. - pub fn add_response>( - &mut self, - verified_response: R::VerifiedResponseType, - ) -> CachedChild { - if let Some(child_components) = self.child_components.as_mut() { - R::add_to_child_components(verified_response, child_components); - self.get_cached_child_block() - } else { - CachedChild::NotRequired - } - } - - /// Add a child component to the lookup request. Merges with any existing child components. - pub fn add_child_components(&mut self, components: ChildComponents) { - if let Some(ref mut existing_components) = self.child_components { - let ChildComponents { - block_root: _, - downloaded_block, - downloaded_blobs, - } = components; - if let Some(block) = downloaded_block { - existing_components.merge_block(block); - } - existing_components.merge_blobs(downloaded_blobs); - } else { - self.child_components = Some(components); - } - } - - /// Add all given peers to both block and blob request states. - pub fn add_peer(&mut self, peer_id: PeerId) { - self.block_request_state.state.add_peer(&peer_id); - self.blob_request_state.state.add_peer(&peer_id); - } - - /// Add all given peers to both block and blob request states. - pub fn add_peers(&mut self, peers: &[PeerId]) { - for peer in peers { - self.add_peer(*peer); - } - } - - /// Returns true if the block has already been downloaded. - pub fn both_components_downloaded(&self) -> bool { - self.block_request_state.state.is_downloaded() - && self.blob_request_state.state.is_downloaded() - } - /// Returns true if the block has already been downloaded. pub fn both_components_processed(&self) -> bool { self.block_request_state.state.is_processed() && self.blob_request_state.state.is_processed() } - /// Checks both the block and blob request states to see if the peer is disconnected. - /// - /// Returns true if the lookup should be dropped. - pub fn should_drop_lookup_on_disconnected_peer( + /// Returns true if this request is expecting some event to make progress + pub fn is_awaiting_event(&self) -> bool { + self.awaiting_parent.is_some() + || self.block_request_state.state.is_awaiting_event() + || self.blob_request_state.state.is_awaiting_event() + } + + /// Makes progress on all requests of this lookup. Any error is not recoverable and must result + /// in dropping the lookup. May mark the lookup as completed. + pub fn continue_requests( &mut self, - peer_id: &PeerId, cx: &mut SyncNetworkContext, - log: &Logger, - ) -> bool { - let block_root = self.block_root(); - let block_peer_disconnected = self + ) -> Result { + // TODO: Check what's necessary to download, specially for blobs + self.continue_request::>(cx)?; + self.continue_request::>(cx)?; + + // If all components of this lookup are already processed, there will be no future events + // that can make progress so it must be dropped. Consider the lookup completed. + // This case can happen if we receive the components from gossip during a retry. + if self.block_request_state.state.is_processed() + && self.blob_request_state.state.is_processed() + { + Ok(LookupResult::Completed) + } else { + Ok(LookupResult::Pending) + } + } + + /// Potentially makes progress on this request if it's in a progress-able state + fn continue_request>( + &mut self, + cx: &mut SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let id = self.id; + let awaiting_parent = self.awaiting_parent.is_some(); + let downloaded_block_expected_blobs = self .block_request_state .state - .check_peer_disconnected(peer_id) - .is_err(); - let blob_peer_disconnected = self - .blob_request_state - .state - .check_peer_disconnected(peer_id) - .is_err(); + .peek_downloaded_data() + .map(|block| block.num_expected_blobs()); + let block_is_processed = self.block_request_state.state.is_processed(); + let request = R::request_state_mut(self); - if block_peer_disconnected || blob_peer_disconnected { - if let Err(e) = self.request_block_and_blobs(cx) { - debug!(log, "Single lookup failed on peer disconnection"; "block_root" => ?block_root, "error" => ?e); - return true; + // Attempt to progress awaiting downloads + if request.get_state().is_awaiting_download() { + // Verify the current request has not exceeded the maximum number of attempts. + let request_state = request.get_state(); + if request_state.failed_attempts() >= SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS { + let cannot_process = request_state.more_failed_processing_attempts(); + return Err(LookupRequestError::TooManyAttempts { cannot_process }); + } + + let Some(peer_id) = self.use_rand_available_peer() else { + // Allow lookup to not have any peers. In that case do nothing. If the lookup does + // not have peers for some time, it will be dropped. + return Ok(()); + }; + + let request = R::request_state_mut(self); + match request.make_request(id, peer_id, downloaded_block_expected_blobs, cx)? { + LookupRequestResult::RequestSent(req_id) => { + request.get_state_mut().on_download_start(req_id)? + } + LookupRequestResult::NoRequestNeeded => { + request.get_state_mut().on_completed_request()? + } + // Sync will receive a future event to make progress on the request, do nothing now + LookupRequestResult::Pending(reason) => { + request + .get_state_mut() + .update_awaiting_download_status(reason); + return Ok(()); + } + } + + // Otherwise, attempt to progress awaiting processing + // If this request is awaiting a parent lookup to be processed, do not send for processing. + // The request will be rejected with unknown parent error. + } else if !awaiting_parent + && (block_is_processed || matches!(R::response_type(), ResponseType::Block)) + { + // maybe_start_processing returns Some if state == AwaitingProcess. This pattern is + // useful to conditionally access the result data. + if let Some(result) = request.get_state_mut().maybe_start_processing() { + return R::send_for_processing(id, result, cx); } } - false + + Ok(()) } - /// Returns `true` if the block has already been downloaded. - pub(crate) fn block_already_downloaded(&self) -> bool { - if let Some(components) = self.child_components.as_ref() { - components.downloaded_block.is_some() - } else { - self.da_checker.has_block(&self.block_root()) - } + /// Get all unique peers that claim to have imported this set of block components + pub fn all_peers(&self) -> impl Iterator + '_ { + self.peers.iter() } - /// Updates the `requested_ids` field of the `BlockRequestState` with the most recent picture - /// of which blobs still need to be requested. Returns `true` if there are no more blobs to - /// request. - pub(crate) fn blobs_already_downloaded(&mut self) -> bool { - if matches!(self.blob_request_state.state.state, State::AwaitingDownload) { - self.update_blobs_request(); - } - self.blob_request_state.requested_ids.is_empty() + /// Add peer to all request states. The peer must be able to serve this request. + /// Returns true if the peer was newly inserted into some request state. + pub fn add_peer(&mut self, peer_id: PeerId) -> bool { + self.peers.insert(peer_id) } - /// Updates this request with the most recent picture of which blobs still need to be requested. - pub fn update_blobs_request(&mut self) { - self.blob_request_state.requested_ids = self.missing_blob_ids(); + /// Remove peer from available peers. Return true if there are no more available peers and all + /// requests are not expecting any future event (AwaitingDownload). + pub fn remove_peer(&mut self, peer_id: &PeerId) -> bool { + self.peers.remove(peer_id) } - /// If `child_components` is `Some`, we know block components won't hit the data - /// availability cache, so we don't check its processing cache unless `child_components` - /// is `None`. - pub(crate) fn missing_blob_ids(&self) -> MissingBlobs { - let block_root = self.block_root(); - if let Some(components) = self.child_components.as_ref() { - self.da_checker.get_missing_blob_ids( - block_root, - components.downloaded_block.as_ref().map(|b| b.as_ref()), - &components.downloaded_blobs, - ) - } else { - self.da_checker.get_missing_blob_ids_with(block_root) - } + /// Returns true if this lookup has zero peers + pub fn has_no_peers(&self) -> bool { + self.peers.is_empty() } - /// Penalizes a blob peer if it should have blobs but didn't return them to us. - pub fn penalize_blob_peer(&mut self, cx: &SyncNetworkContext) { - if let Ok(blob_peer) = self.blob_request_state.state.processing_peer() { - cx.report_peer( - blob_peer, - PeerAction::MidToleranceError, - "single_blob_failure", - ); - } - } - - /// This failure occurs on download, so register a failure downloading, penalize the peer - /// and clear the blob cache. - pub fn handle_consistency_failure(&mut self, cx: &SyncNetworkContext) { - self.penalize_blob_peer(cx); - if let Some(cached_child) = self.child_components.as_mut() { - cached_child.clear_blobs(); - } - self.blob_request_state.state.on_download_failure() - } - - /// This failure occurs after processing, so register a failure processing, penalize the peer - /// and clear the blob cache. - pub fn handle_availability_check_failure(&mut self, cx: &SyncNetworkContext) { - self.penalize_blob_peer(cx); - if let Some(cached_child) = self.child_components.as_mut() { - cached_child.clear_blobs(); - } - self.blob_request_state.state.on_processing_failure() + /// Selects a random peer from available peers if any + fn use_rand_available_peer(&mut self) -> Option { + self.peers.iter().choose(&mut rand::thread_rng()).copied() } } /// The state of the blob request component of a `SingleBlockLookup`. +#[derive(Derivative)] +#[derivative(Debug)] pub struct BlobRequestState { - /// The latest picture of which blobs still need to be requested. This includes information - /// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in - /// the data availability checker. - pub requested_ids: MissingBlobs, + #[derivative(Debug = "ignore")] pub block_root: Hash256, - /// Where we store blobs until we receive the stream terminator. - pub blob_download_queue: FixedBlobSidecarList, - pub state: SingleLookupRequestState, + pub state: SingleLookupRequestState>, } impl BlobRequestState { - pub fn new(block_root: Hash256, peer_source: &[PeerId], is_deneb: bool) -> Self { - let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); + pub fn new(block_root: Hash256) -> Self { Self { block_root, - requested_ids: default_ids, - blob_download_queue: <_>::default(), - state: SingleLookupRequestState::new(peer_source), + state: SingleLookupRequestState::new(), } } } /// The state of the block request component of a `SingleBlockLookup`. -pub struct BlockRequestState { +#[derive(Derivative)] +#[derivative(Debug)] +pub struct BlockRequestState { + #[derivative(Debug = "ignore")] pub requested_block_root: Hash256, - pub state: SingleLookupRequestState, + pub state: SingleLookupRequestState>>, } -impl BlockRequestState { - pub fn new(block_root: Hash256, peers: &[PeerId]) -> Self { +impl BlockRequestState { + pub fn new(block_root: Hash256) -> Self { Self { requested_block_root: block_root, - state: SingleLookupRequestState::new(peers), + state: SingleLookupRequestState::new(), } } } -/// This is the status of cached components for a lookup if they are required. It provides information -/// about whether we should send a responses immediately for processing, whether we require more -/// responses, or whether all cached components have been received and the reconstructed block -/// should be sent for processing. -pub enum CachedChild { - /// All child components have been received, this is the reconstructed block, including all. - /// It has been checked for consistency between blobs and block, but no consensus checks have - /// been performed and no kzg verification has been performed. - Ok(RpcBlock), - /// All child components have not yet been received. - DownloadIncomplete, - /// Child components should not be cached, send this directly for processing. - NotRequired, - /// There was an error during consistency checks between block and blobs. - Err(AvailabilityCheckError), +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct DownloadResult { + pub value: T, + pub block_root: Hash256, + pub seen_timestamp: Duration, + pub peer_id: PeerId, } -#[derive(Debug, PartialEq, Eq)] -pub enum State { - AwaitingDownload, - Downloading { peer_id: PeerId }, - Processing { peer_id: PeerId }, - Processed { peer_id: PeerId }, +#[derive(PartialEq, Eq, IntoStaticStr)] +pub enum State { + AwaitingDownload(&'static str), + Downloading(ReqId), + AwaitingProcess(DownloadResult), + /// Request is processing, sent by lookup sync + Processing(DownloadResult), + /// Request is processed + Processed, } /// Object representing the state of a single block or blob lookup request. -#[derive(PartialEq, Eq, Debug)] -pub struct SingleLookupRequestState { +#[derive(PartialEq, Eq, Derivative)] +#[derivative(Debug)] +pub struct SingleLookupRequestState { /// State of this request. - state: State, - /// Peers that should have this block or blob. - available_peers: HashSet, - /// Peers from which we have requested this block. - used_peers: HashSet, + state: State, /// How many times have we attempted to process this block or blob. failed_processing: u8, /// How many times have we attempted to download this block or blob. failed_downloading: u8, - /// Should be incremented everytime this request is retried. The purpose of this is to - /// differentiate retries of the same block/blob request within a lookup. We currently penalize - /// peers and retry requests prior to receiving the stream terminator. This means responses - /// from a prior request may arrive after a new request has been sent, this counter allows - /// us to differentiate these two responses. - req_counter: u32, } -impl SingleLookupRequestState { - pub fn new(peers: &[PeerId]) -> Self { - let mut available_peers = HashSet::default(); - for peer in peers.iter().copied() { - available_peers.insert(peer); - } - +impl SingleLookupRequestState { + pub fn new() -> Self { Self { - state: State::AwaitingDownload, - available_peers, - used_peers: HashSet::default(), + state: State::AwaitingDownload("not started"), failed_processing: 0, failed_downloading: 0, - req_counter: 0, } } - pub fn is_current_req_counter(&self, req_counter: u32) -> bool { - self.req_counter == req_counter - } - pub fn is_awaiting_download(&self) -> bool { - matches!(self.state, State::AwaitingDownload) - } - - pub fn is_downloaded(&self) -> bool { match self.state { - State::AwaitingDownload => false, - State::Downloading { .. } => false, - State::Processing { .. } => true, - State::Processed { .. } => true, + State::AwaitingDownload { .. } => true, + State::Downloading { .. } + | State::AwaitingProcess { .. } + | State::Processing { .. } + | State::Processed { .. } => false, } } pub fn is_processed(&self) -> bool { match self.state { - State::AwaitingDownload => false, - State::Downloading { .. } => false, - State::Processing { .. } => false, + State::AwaitingDownload { .. } + | State::Downloading { .. } + | State::AwaitingProcess { .. } + | State::Processing { .. } => false, State::Processed { .. } => true, } } - pub fn on_download_start(&mut self, peer_id: PeerId) -> u32 { - self.state = State::Downloading { peer_id }; - self.req_counter += 1; - self.req_counter + /// Returns true if we can expect some future event to progress this block component request + /// specifically. + pub fn is_awaiting_event(&self) -> bool { + match self.state { + // No event will progress this request specifically, but the request may be put on hold + // due to some external event + State::AwaitingDownload { .. } => false, + // Network will emit a download success / error event + State::Downloading { .. } => true, + // Not awaiting any external event + State::AwaitingProcess { .. } => false, + // Beacon processor will emit a processing result event + State::Processing { .. } => true, + // Request complete, no future event left + State::Processed { .. } => false, + } + } + + pub fn peek_downloaded_data(&self) -> Option<&T> { + match &self.state { + State::AwaitingDownload { .. } => None, + State::Downloading { .. } => None, + State::AwaitingProcess(result) => Some(&result.value), + State::Processing(result) => Some(&result.value), + State::Processed { .. } => None, + } + } + + /// Switch to `AwaitingProcessing` if the request is in `AwaitingDownload` state, otherwise + /// ignore. + pub fn insert_verified_response(&mut self, result: DownloadResult) -> bool { + if let State::AwaitingDownload { .. } = &self.state { + self.state = State::AwaitingProcess(result); + true + } else { + false + } + } + + /// Append metadata on why this request is in AwaitingDownload status. Very helpful to debug + /// stuck lookups. Not fallible as it's purely informational. + pub fn update_awaiting_download_status(&mut self, new_status: &'static str) { + if let State::AwaitingDownload(status) = &mut self.state { + *status = new_status + } + } + + /// Switch to `Downloading` if the request is in `AwaitingDownload` state, otherwise returns None. + pub fn on_download_start(&mut self, req_id: ReqId) -> Result<(), LookupRequestError> { + match &self.state { + State::AwaitingDownload { .. } => { + self.state = State::Downloading(req_id); + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_download_start expected AwaitingDownload got {other}" + ))), + } } /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong /// block. - pub fn on_download_failure(&mut self) { - self.failed_downloading = self.failed_downloading.saturating_add(1); - self.state = State::AwaitingDownload; - } - - pub fn on_download_success(&mut self) -> Result<(), String> { + pub fn on_download_failure(&mut self, req_id: ReqId) -> Result<(), LookupRequestError> { match &self.state { - State::Downloading { peer_id } => { - self.state = State::Processing { peer_id: *peer_id }; + State::Downloading(expected_req_id) => { + if req_id != *expected_req_id { + return Err(LookupRequestError::UnexpectedRequestId { + expected_req_id: *expected_req_id, + req_id, + }); + } + self.failed_downloading = self.failed_downloading.saturating_add(1); + self.state = State::AwaitingDownload("not started"); Ok(()) } - other => Err(format!( - "request bad state, expected downloading got {other}" - )), + other => Err(LookupRequestError::BadState(format!( + "Bad state on_download_failure expected Downloading got {other}" + ))), + } + } + + pub fn on_download_success( + &mut self, + req_id: ReqId, + result: DownloadResult, + ) -> Result<(), LookupRequestError> { + match &self.state { + State::Downloading(expected_req_id) => { + if req_id != *expected_req_id { + return Err(LookupRequestError::UnexpectedRequestId { + expected_req_id: *expected_req_id, + req_id, + }); + } + self.state = State::AwaitingProcess(result); + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_download_success expected Downloading got {other}" + ))), + } + } + + /// Switch to `Processing` if the request is in `AwaitingProcess` state, otherwise returns None. + pub fn maybe_start_processing(&mut self) -> Option> { + // For 2 lines replace state with placeholder to gain ownership of `result` + match &self.state { + State::AwaitingProcess(result) => { + let result = result.clone(); + self.state = State::Processing(result.clone()); + Some(result) + } + _ => None, + } + } + + /// Revert into `AwaitingProcessing`, if the payload if not invalid and can be submitted for + /// processing latter. + pub fn revert_to_awaiting_processing(&mut self) -> Result<(), LookupRequestError> { + match &self.state { + State::Processing(result) => { + self.state = State::AwaitingProcess(result.clone()); + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on revert_to_awaiting_processing expected Processing got {other}" + ))), } } /// Registers a failure in processing a block. - pub fn on_processing_failure(&mut self) { - self.failed_processing = self.failed_processing.saturating_add(1); - self.state = State::AwaitingDownload; + pub fn on_processing_failure(&mut self) -> Result { + match &self.state { + State::Processing(result) => { + let peer_id = result.peer_id; + self.failed_processing = self.failed_processing.saturating_add(1); + self.state = State::AwaitingDownload("not started"); + Ok(peer_id) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_processing_failure expected Processing got {other}" + ))), + } } - pub fn on_processing_success(&mut self) -> Result<(), String> { + pub fn on_processing_success(&mut self) -> Result<(), LookupRequestError> { match &self.state { - State::Processing { peer_id } => { - self.state = State::Processed { peer_id: *peer_id }; + State::Processing(_) => { + self.state = State::Processed; Ok(()) } - other => Err(format!("not in processing state: {}", other).to_string()), + other => Err(LookupRequestError::BadState(format!( + "Bad state on_processing_success expected Processing got {other}" + ))), + } + } + + /// Mark a request as complete without any download or processing + pub fn on_completed_request(&mut self) -> Result<(), LookupRequestError> { + match &self.state { + State::AwaitingDownload { .. } => { + self.state = State::Processed; + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_completed_request expected AwaitingDownload got {other}" + ))), } } @@ -480,109 +537,32 @@ impl SingleLookupRequestState { pub fn more_failed_processing_attempts(&self) -> bool { self.failed_processing >= self.failed_downloading } +} - /// This method should be used for peers wrapped in `PeerId::BlockAndBlobs`. - pub fn add_peer(&mut self, peer_id: &PeerId) { - self.available_peers.insert(*peer_id); - } - - /// If a peer disconnects, this request could be failed. If so, an error is returned - pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> { - self.available_peers.remove(dc_peer_id); - if let State::Downloading { peer_id } = &self.state { - if peer_id == dc_peer_id { - // Peer disconnected before providing a block - self.on_download_failure(); - return Err(()); - } - } - Ok(()) - } - - /// Returns the id peer we downloaded from if we have downloaded a verified block, otherwise - /// returns an error. - pub fn processing_peer(&self) -> Result { - match &self.state { - State::Processing { peer_id } | State::Processed { peer_id } => Ok(*peer_id), - other => Err(format!("not in processing state: {}", other).to_string()), - } - } - - pub fn get_used_peers(&self) -> impl Iterator { - self.used_peers.iter() - } - - /// Selects a random peer from available peers if any, inserts it in used peers and returns it. - pub fn use_rand_available_peer(&mut self) -> Option { - let peer_id = self - .available_peers - .iter() - .choose(&mut rand::thread_rng()) - .copied()?; - self.used_peers.insert(peer_id); - Some(peer_id) +// Display is used in the BadState assertions above +impl std::fmt::Display for State { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", Into::<&'static str>::into(self)) } } -impl slog::Value for SingleBlockLookup { - fn serialize( - &self, - _record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - serializer.emit_str("request", key)?; - serializer.emit_arguments("lookup_type", &format_args!("{:?}", self.lookup_type))?; - serializer.emit_arguments("hash", &format_args!("{}", self.block_root()))?; - serializer.emit_arguments( - "blob_ids", - &format_args!("{:?}", self.blob_request_state.requested_ids.indices()), - )?; - serializer.emit_arguments( - "block_request_state.state", - &format_args!("{:?}", self.block_request_state.state), - )?; - serializer.emit_arguments( - "blob_request_state.state", - &format_args!("{:?}", self.blob_request_state.state), - )?; - slog::Result::Ok(()) - } -} - -impl slog::Value for SingleLookupRequestState { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - serializer.emit_str("request_state", key)?; - match &self.state { - State::AwaitingDownload => { - "awaiting_download".serialize(record, "state", serializer)? - } - State::Downloading { peer_id } => { - serializer.emit_arguments("downloading_peer", &format_args!("{}", peer_id))? - } - State::Processing { peer_id } => { - serializer.emit_arguments("processing_peer", &format_args!("{}", peer_id))? - } - State::Processed { .. } => "processed".serialize(record, "state", serializer)?, - } - serializer.emit_u8("failed_downloads", self.failed_downloading)?; - serializer.emit_u8("failed_processing", self.failed_processing)?; - slog::Result::Ok(()) - } -} - -impl std::fmt::Display for State { +// Debug is used in the log_stuck_lookups print to include some more info. Implements custom Debug +// to not dump an entire block or blob to terminal which don't add valuable data. +impl std::fmt::Debug for State { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - State::AwaitingDownload => write!(f, "AwaitingDownload"), - State::Downloading { .. } => write!(f, "Downloading"), - State::Processing { .. } => write!(f, "Processing"), - State::Processed { .. } => write!(f, "Processed"), + Self::AwaitingDownload(status) => write!(f, "AwaitingDownload({:?})", status), + Self::Downloading(req_id) => write!(f, "Downloading({:?})", req_id), + Self::AwaitingProcess(d) => write!(f, "AwaitingProcess({:?})", d.peer_id), + Self::Processing(d) => write!(f, "Processing({:?})", d.peer_id), + Self::Processed { .. } => write!(f, "Processed"), } } } + +fn fmt_peer_set_as_len( + peer_set: &HashSet, + f: &mut std::fmt::Formatter, +) -> Result<(), std::fmt::Error> { + write!(f, "{}", peer_set.len()) +} diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 8e3b35ee5d..a607151bde 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,7 +1,9 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::RequestId; -use crate::sync::manager::{RequestId as SyncRequestId, SingleLookupReqId, SyncManager}; +use crate::sync::manager::{ + BlockProcessType, RequestId as SyncRequestId, SingleLookupReqId, SyncManager, +}; use crate::sync::SyncMessage; use crate::NetworkMessage; use std::sync::Arc; @@ -9,11 +11,17 @@ use std::sync::Arc; use super::*; use crate::sync::block_lookups::common::ResponseType; +use beacon_chain::blob_verification::GossipVerifiedBlob; +use beacon_chain::block_verification_types::{BlockImportData, RpcBlock}; use beacon_chain::builder::Witness; +use beacon_chain::data_availability_checker::Availability; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::test_utils::{ build_log, generate_rand_block_and_blobs, BeaconChainHarness, EphemeralHarnessType, NumBlobs, }; +use beacon_chain::{ + AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, +}; use beacon_processor::WorkEvent; use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; use lighthouse_network::types::SyncState; @@ -22,10 +30,12 @@ use slog::info; use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; use tokio::sync::mpsc; +use types::test_utils::TestRandom; use types::{ test_utils::{SeedableRng, XorShiftRng}, - BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, + BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; +use types::{BeaconState, BeaconStateBase}; type T = Witness, E, MemoryStore, MemoryStore>; @@ -57,6 +67,7 @@ type T = Witness, E, MemoryStore, Memo struct TestRig { /// Receiver for `BeaconProcessor` events (e.g. block processing results). beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx_queue: Vec>, /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) network_rx: mpsc::UnboundedReceiver>, /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) @@ -65,6 +76,8 @@ struct TestRig { sync_manager: SyncManager, /// To manipulate sync state and peer connection status network_globals: Arc>, + /// Beacon chain harness + harness: BeaconChainHarness>, /// `rng` for generating test blocks and blobs. rng: XorShiftRng, fork_name: ForkName, @@ -72,6 +85,7 @@ struct TestRig { } const D: Duration = Duration::new(0, 0); +const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; impl TestRig { fn test_setup() -> Self { @@ -114,6 +128,7 @@ impl TestRig { let rng = XorShiftRng::from_seed([42; 16]); TestRig { beacon_processor_rx, + beacon_processor_rx_queue: vec![], network_rx, network_rx_queue: vec![], rng, @@ -125,6 +140,7 @@ impl TestRig { sync_recv, log.clone(), ), + harness, fork_name, log, } @@ -194,11 +210,15 @@ impl TestRig { self.sync_manager.handle_message(sync_message); } + fn active_single_lookups(&self) -> Vec { + self.sync_manager.active_single_lookups() + } + fn active_single_lookups_count(&self) -> usize { self.sync_manager.active_single_lookups().len() } - fn active_parent_lookups(&self) -> Vec { + fn active_parent_lookups(&self) -> Vec> { self.sync_manager.active_parent_lookups() } @@ -206,22 +226,85 @@ impl TestRig { self.sync_manager.active_parent_lookups().len() } - fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { - self.sync_manager.failed_chains_contains(chain_hash) + fn assert_single_lookups_count(&self, count: usize) { + assert_eq!( + self.active_single_lookups_count(), + count, + "Unexpected count of single lookups. Current lookups: {:?}", + self.active_single_lookups() + ); } - #[track_caller] - fn assert_parent_lookups_consistency(&self) { - let hashes = self.active_parent_lookups(); - let expected = hashes.len(); + fn assert_parent_lookups_count(&self, count: usize) { assert_eq!( - expected, - hashes - .into_iter() - .collect::>() - .len(), - "duplicated chain hashes in parent queue" - ) + self.active_parent_lookups_count(), + count, + "Unexpected count of parent lookups. Parent lookups: {:?}. Current lookups: {:?}", + self.active_parent_lookups(), + self.active_single_lookups() + ); + } + + fn assert_lookup_is_active(&self, block_root: Hash256) { + let lookups = self.sync_manager.active_single_lookups(); + if !lookups.iter().any(|l| l.1 == block_root) { + panic!("Expected lookup {block_root} to be the only active: {lookups:?}"); + } + } + + fn assert_lookup_peers(&self, block_root: Hash256, mut expected_peers: Vec) { + let mut lookup = self + .sync_manager + .active_single_lookups() + .into_iter() + .find(|l| l.1 == block_root) + .unwrap_or_else(|| panic!("no lookup for {block_root}")); + lookup.3.sort(); + expected_peers.sort(); + assert_eq!( + lookup.3, expected_peers, + "unexpected peers on lookup {block_root}" + ); + } + + fn insert_failed_chain(&mut self, block_root: Hash256) { + self.sync_manager.insert_failed_chain(block_root); + } + + fn assert_not_failed_chain(&mut self, chain_hash: Hash256) { + let failed_chains = self.sync_manager.get_failed_chains(); + if failed_chains.contains(&chain_hash) { + panic!("failed chains contain {chain_hash:?}: {failed_chains:?}"); + } + } + + fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { + self.sync_manager.get_failed_chains().contains(chain_hash) + } + + fn find_single_lookup_for(&self, block_root: Hash256) -> Id { + self.active_single_lookups() + .iter() + .find(|l| l.1 == block_root) + .unwrap_or_else(|| panic!("no single block lookup found for {block_root}")) + .0 + } + + fn expect_no_active_single_lookups(&self) { + assert!( + self.active_single_lookups().is_empty(), + "expect no single block lookups: {:?}", + self.active_single_lookups() + ); + } + + fn expect_no_active_lookups(&self) { + self.expect_no_active_single_lookups(); + } + + fn expect_no_active_lookups_empty_network(&mut self) { + self.expect_no_active_lookups(); + self.expect_empty_network(); } fn new_connected_peer(&mut self) -> PeerId { @@ -233,27 +316,43 @@ impl TestRig { peer_id } - fn parent_chain_processed(&mut self, chain_hash: Hash256, result: BatchProcessResult) { - self.send_sync_message(SyncMessage::BatchProcessed { - sync_type: ChainSegmentProcessId::ParentLookup(chain_hash), - result, - }) + fn parent_chain_processed_success( + &mut self, + chain_hash: Hash256, + blocks: &[Arc>], + ) { + // Send import events for all pending parent blocks + for _ in blocks { + self.parent_block_processed_imported(chain_hash); + } + // Send final import event for the block that triggered the lookup + self.single_block_component_processed_imported(chain_hash); } - fn parent_chain_processed_success(&mut self, chain_hash: Hash256) { - self.parent_chain_processed( - chain_hash, - BatchProcessResult::Success { - was_non_empty: true, - }, - ) + /// Locate a parent lookup chain with tip hash `chain_hash` + fn find_oldest_parent_lookup(&self, chain_hash: Hash256) -> Hash256 { + let parent_chain = self + .active_parent_lookups() + .into_iter() + .find(|chain| chain.first() == Some(&chain_hash)) + .unwrap_or_else(|| { + panic!( + "No parent chain with chain_hash {chain_hash:?}: Parent lookups {:?} Single lookups {:?}", + self.active_parent_lookups(), + self.active_single_lookups(), + ) + }); + *parent_chain.last().unwrap() } fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { - self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::ParentLookup { chain_hash }, - result, - }); + let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); + self.single_block_component_processed(id, result); + } + + fn parent_blob_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { + let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); + self.single_blob_component_processed(id, result); } fn parent_block_processed_imported(&mut self, chain_hash: Hash256) { @@ -263,35 +362,24 @@ impl TestRig { ); } - fn single_block_component_processed( - &mut self, - id: SingleLookupReqId, - result: BlockProcessingResult, - ) { + fn single_block_component_processed(&mut self, id: Id, result: BlockProcessingResult) { self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::SingleBlock { id: id.id }, + process_type: BlockProcessType::SingleBlock { id }, result, }) } - fn single_block_component_processed_imported( - &mut self, - id: SingleLookupReqId, - block_root: Hash256, - ) { + fn single_block_component_processed_imported(&mut self, block_root: Hash256) { + let id = self.find_single_lookup_for(block_root); self.single_block_component_processed( id, BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), ) } - fn single_blob_component_processed( - &mut self, - id: SingleLookupReqId, - result: BlockProcessingResult, - ) { + fn single_blob_component_processed(&mut self, id: Id, result: BlockProcessingResult) { self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::SingleBlob { id: id.id }, + process_type: BlockProcessType::SingleBlob { id }, result, }) } @@ -302,6 +390,7 @@ impl TestRig { peer_id: PeerId, beacon_block: Option>>, ) { + self.log("parent_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { request_id: SyncRequestId::SingleBlock { id }, peer_id, @@ -316,6 +405,7 @@ impl TestRig { peer_id: PeerId, beacon_block: Option>>, ) { + self.log("single_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { request_id: SyncRequestId::SingleBlock { id }, peer_id, @@ -330,6 +420,10 @@ impl TestRig { peer_id: PeerId, blob_sidecar: Option>>, ) { + self.log(&format!( + "parent_lookup_blob_response {:?}", + blob_sidecar.as_ref().map(|b| b.index) + )); self.send_sync_message(SyncMessage::RpcBlob { request_id: SyncRequestId::SingleBlob { id }, peer_id, @@ -352,6 +446,72 @@ impl TestRig { }); } + fn complete_single_lookup_blob_download( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + blobs: Vec>, + ) { + for blob in blobs { + self.single_lookup_blob_response(id, peer_id, Some(blob.into())); + } + self.single_lookup_blob_response(id, peer_id, None); + } + + fn complete_single_lookup_blob_lookup_valid( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + blobs: Vec>, + import: bool, + ) { + let block_root = blobs.first().unwrap().block_root(); + let block_slot = blobs.first().unwrap().slot(); + self.complete_single_lookup_blob_download(id, peer_id, blobs); + self.expect_block_process(ResponseType::Blob); + self.single_blob_component_processed( + id.lookup_id, + if import { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) + } else { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + block_slot, block_root, + )) + }, + ); + } + + fn complete_lookup_block_download(&mut self, block: SignedBeaconBlock) { + let block_root = block.canonical_root(); + let id = self.expect_block_lookup_request(block_root); + self.expect_empty_network(); + let peer_id = self.new_connected_peer(); + self.single_lookup_block_response(id, peer_id, Some(block.into())); + self.single_lookup_block_response(id, peer_id, None); + } + + fn complete_lookup_block_import_valid(&mut self, block_root: Hash256, import: bool) { + self.expect_block_process(ResponseType::Block); + let id = self.find_single_lookup_for(block_root); + self.single_block_component_processed( + id, + if import { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) + } else { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + Slot::new(0), + block_root, + )) + }, + ) + } + + fn complete_single_lookup_block_valid(&mut self, block: SignedBeaconBlock, import: bool) { + let block_root = block.canonical_root(); + self.complete_lookup_block_download(block); + self.complete_lookup_block_import_valid(block_root, import) + } + fn parent_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { self.send_sync_message(SyncMessage::RpcError { peer_id, @@ -379,8 +539,27 @@ impl TestRig { }) } - fn peer_disconnected(&mut self, peer_id: PeerId) { - self.send_sync_message(SyncMessage::Disconnect(peer_id)); + fn peer_disconnected(&mut self, disconnected_peer_id: PeerId) { + self.send_sync_message(SyncMessage::Disconnect(disconnected_peer_id)); + } + + /// Return RPCErrors for all active requests of peer + fn rpc_error_all_active_requests(&mut self, disconnected_peer_id: PeerId) { + self.drain_network_rx(); + while let Ok(request_id) = self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request_id: RequestId::Sync(id), + .. + } if *peer_id == disconnected_peer_id => Some(*id), + _ => None, + }) { + self.send_sync_message(SyncMessage::RpcError { + peer_id: disconnected_peer_id, + request_id, + error: RPCError::Disconnected, + }); + } } fn drain_network_rx(&mut self) { @@ -389,6 +568,12 @@ impl TestRig { } } + fn drain_processor_rx(&mut self) { + while let Ok(event) = self.beacon_processor_rx.try_recv() { + self.beacon_processor_rx_queue.push(event); + } + } + fn pop_received_network_event) -> Option>( &mut self, predicate_transform: F, @@ -409,42 +594,75 @@ impl TestRig { } } - #[track_caller] - fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + fn pop_received_processor_event) -> Option>( + &mut self, + predicate_transform: F, + ) -> Result { + self.drain_processor_rx(); + + if let Some(index) = self + .beacon_processor_rx_queue + .iter() + .position(|x| predicate_transform(x).is_some()) + { + // Transform the item, knowing that it won't be None because we checked it in the position predicate. + let transformed = predicate_transform(&self.beacon_processor_rx_queue[index]).unwrap(); + self.beacon_processor_rx_queue.remove(index); + Ok(transformed) + } else { + Err(format!( + "current processor messages {:?}", + self.beacon_processor_rx_queue + ) + .to_string()) + } + } + + fn find_block_lookup_request( + &mut self, + for_block: Hash256, + ) -> Result { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, request: Request::BlocksByRoot(request), request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), - } if id.lookup_type == LookupType::Current - && request.block_roots().to_vec().contains(&for_block) => - { - Some(*id) - } + } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) - .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) } #[track_caller] - fn expect_blob_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + self.find_block_lookup_request(for_block) + .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) + } + + fn find_blob_lookup_request( + &mut self, + for_block: Hash256, + ) -> Result { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, request: Request::BlobsByRoot(request), request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), - } if id.lookup_type == LookupType::Current - && request - .blob_ids - .to_vec() - .iter() - .any(|r| r.block_root == for_block) => + } if request + .blob_ids + .to_vec() + .iter() + .any(|r| r.block_root == for_block) => { Some(*id) } _ => None, }) - .unwrap_or_else(|e| panic!("Expected blob request for {for_block:?}: {e}")) + } + + #[track_caller] + fn expect_blob_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + self.find_blob_lookup_request(for_block) + .unwrap_or_else(|e| panic!("Expected blob request for {for_block:?}: {e}")) } #[track_caller] @@ -454,16 +672,21 @@ impl TestRig { peer_id: _, request: Request::BlocksByRoot(request), request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), - } if id.lookup_type == LookupType::Parent - && request.block_roots().to_vec().contains(&for_block) => - { - Some(*id) - } + } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) .unwrap_or_else(|e| panic!("Expected block parent request for {for_block:?}: {e}")) } + fn expect_no_requests_for(&mut self, block_root: Hash256) { + if let Ok(request) = self.find_block_lookup_request(block_root) { + panic!("Expected no block request for {block_root:?} found {request:?}"); + } + if let Ok(request) = self.find_blob_lookup_request(block_root) { + panic!("Expected no blob request for {block_root:?} found {request:?}"); + } + } + #[track_caller] fn expect_blob_parent_request(&mut self, for_block: Hash256) -> SingleLookupReqId { self.pop_received_network_event(|ev| match ev { @@ -471,12 +694,11 @@ impl TestRig { peer_id: _, request: Request::BlobsByRoot(request), request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), - } if id.lookup_type == LookupType::Parent - && request - .blob_ids - .to_vec() - .iter() - .all(|r| r.block_root == for_block) => + } if request + .blob_ids + .to_vec() + .iter() + .all(|r| r.block_root == for_block) => { Some(*id) } @@ -485,41 +707,19 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected blob parent request for {for_block:?}: {e}")) } - fn expect_lookup_request_block_and_blobs(&mut self, block_root: Hash256) -> SingleLookupReqId { - let id = self.expect_block_lookup_request(block_root); - // If we're in deneb, a blob request should have been triggered as well, - // we don't require a response because we're generateing 0-blob blocks in this test. - if self.after_deneb() { - let _ = self.expect_blob_lookup_request(block_root); - } - id - } - - fn expect_parent_request_block_and_blobs(&mut self, block_root: Hash256) -> SingleLookupReqId { - let id = self.expect_block_parent_request(block_root); - // If we're in deneb, a blob request should have been triggered as well, - // we don't require a response because we're generateing 0-blob blocks in this test. - if self.after_deneb() { - let _ = self.expect_blob_parent_request(block_root); - } - id - } - #[track_caller] fn expect_block_process(&mut self, response_type: ResponseType) { match response_type { - ResponseType::Block => match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); - } - other => panic!("Expected block process, found {:?}", other), - }, - ResponseType::Blob => match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::RPC_BLOBS); - } - other => panic!("Expected blob process, found {:?}", other), - }, + ResponseType::Block => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::RPC_BLOCK).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected block work event: {e}")), + ResponseType::Blob => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::RPC_BLOBS).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected blobs work event: {e}")), } } @@ -544,9 +744,13 @@ impl TestRig { fn expect_parent_chain_process(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); + // Parent chain sends blocks one by one + assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); } - other => panic!("Expected chain segment process, found {:?}", other), + other => panic!( + "Expected rpc_block from chain segment process, found {:?}", + other + ), } } @@ -560,24 +764,37 @@ impl TestRig { #[track_caller] fn expect_empty_beacon_processor(&mut self) { - assert_eq!( - self.beacon_processor_rx.try_recv().expect_err("must err"), - mpsc::error::TryRecvError::Empty - ); + match self.beacon_processor_rx.try_recv() { + Err(mpsc::error::TryRecvError::Empty) => {} // ok + Ok(event) => panic!("expected empty beacon processor: {:?}", event), + other => panic!("unexpected err {:?}", other), + } } #[track_caller] - pub fn expect_penalty(&mut self, peer_id: PeerId) { - self.pop_received_network_event(|ev| match ev { - NetworkMessage::ReportPeer { peer_id: p_id, .. } if p_id == &peer_id => Some(()), - _ => None, - }) - .unwrap_or_else(|_| { - panic!( - "Expected peer penalty for {peer_id}: {:#?}", - self.network_rx_queue - ) - }) + pub fn expect_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { + let penalty_msg = self + .pop_received_network_event(|ev| match ev { + NetworkMessage::ReportPeer { + peer_id: p_id, msg, .. + } if p_id == &peer_id => Some(msg.to_owned()), + _ => None, + }) + .unwrap_or_else(|_| { + panic!( + "Expected '{expect_penalty_msg}' penalty for peer {peer_id}: {:#?}", + self.network_rx_queue + ) + }); + assert_eq!( + penalty_msg, expect_penalty_msg, + "Unexpected penalty msg for {peer_id}" + ); + } + + pub fn expect_single_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { + self.expect_penalty(peer_id, expect_penalty_msg); + self.expect_no_penalty_for(peer_id); } pub fn block_with_parent_and_blobs( @@ -595,17 +812,127 @@ impl TestRig { pub fn rand_blockchain(&mut self, depth: usize) -> Vec>> { let mut blocks = Vec::>>::with_capacity(depth); - while blocks.len() < depth { + for slot in 0..depth { let parent = blocks .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); let mut block = self.rand_block(); *block.message_mut().parent_root_mut() = parent; + *block.message_mut().slot_mut() = slot.into(); blocks.push(block.into()); } + self.log(&format!( + "Blockchain dump {:#?}", + blocks + .iter() + .map(|b| format!( + "block {} {} parent {}", + b.slot(), + b.canonical_root(), + b.parent_root() + )) + .collect::>() + )); blocks } + + fn insert_block_to_da_checker(&mut self, block: Arc>) { + let state = BeaconState::Base(BeaconStateBase::random_for_test(&mut self.rng)); + let parent_block = self.rand_block(); + let import_data = BlockImportData::::__new_for_test( + block.canonical_root(), + state, + parent_block.into(), + ); + let payload_verification_outcome = PayloadVerificationOutcome { + payload_verification_status: PayloadVerificationStatus::Verified, + is_valid_merge_transition_block: false, + }; + let executed_block = + AvailabilityPendingExecutedBlock::new(block, import_data, payload_verification_outcome); + match self + .harness + .chain + .data_availability_checker + .put_pending_executed_block(executed_block) + .unwrap() + { + Availability::Available(_) => panic!("block removed from da_checker, available"), + Availability::MissingComponents(block_root) => { + self.log(&format!("inserted block to da_checker {block_root:?}")) + } + }; + } + + fn insert_blob_to_da_checker(&mut self, blob: BlobSidecar) { + match self + .harness + .chain + .data_availability_checker + .put_gossip_blob(GossipVerifiedBlob::__assumed_valid(blob.into())) + .unwrap() + { + Availability::Available(_) => panic!("blob removed from da_checker, available"), + Availability::MissingComponents(block_root) => { + self.log(&format!("inserted blob to da_checker {block_root:?}")) + } + }; + } + + fn insert_block_to_processing_cache(&mut self, block: Arc>) { + self.harness + .chain + .reqresp_pre_import_cache + .write() + .insert(block.canonical_root(), block); + } + + fn simulate_block_gossip_processing_becomes_invalid(&mut self, block_root: Hash256) { + self.harness + .chain + .reqresp_pre_import_cache + .write() + .remove(&block_root); + + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: false, + }); + } + + fn simulate_block_gossip_processing_becomes_valid_missing_components( + &mut self, + block: Arc>, + ) { + let block_root = block.canonical_root(); + self.harness + .chain + .reqresp_pre_import_cache + .write() + .remove(&block_root); + + self.insert_block_to_da_checker(block); + + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: false, + }); + } +} + +#[test] +fn stable_rng() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let (block, _) = generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng); + assert_eq!( + block.canonical_root(), + Hash256::from_slice( + &hex::decode("adfd2e9e7a7976e8ccaed6eaf0257ed36a5b476732fee63ff44966602fd099ec") + .unwrap() + ), + "rng produces a consistent value" + ); } #[test] @@ -616,7 +943,7 @@ fn test_single_block_lookup_happy_path() { let block_root = block.canonical_root(); // Trigger the request rig.trigger_unknown_block_from_attestation(block_root, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_root); + let id = rig.expect_block_lookup_request(block_root); // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. @@ -630,27 +957,34 @@ fn test_single_block_lookup_happy_path() { // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. rig.single_lookup_block_response(id, peer_id, None); - rig.single_block_component_processed_imported(id, block_root); + rig.single_block_component_processed_imported(block_root); rig.expect_empty_network(); - assert_eq!(rig.active_single_lookups_count(), 0); + rig.expect_no_active_lookups(); } +// Tests that if a peer does not respond with a block, we downscore and retry the block only #[test] fn test_single_block_lookup_empty_response() { - let mut rig = TestRig::test_setup(); + let mut r = TestRig::test_setup(); - let block_hash = Hash256::random(); - let peer_id = rig.new_connected_peer(); + let block = r.rand_block(); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); // Trigger the request - rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_hash); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + let id = r.expect_block_lookup_request(block_root); // The peer does not have the block. It should be penalized. - rig.single_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id); - - rig.expect_block_lookup_request(block_hash); // it should be retried + r.single_lookup_block_response(id, peer_id, None); + r.expect_penalty(peer_id, "NoResponseReturned"); + // it should be retried + let id = r.expect_block_lookup_request(block_root); + // Send the right block this time. + r.single_lookup_block_response(id, peer_id, Some(block.into())); + r.expect_block_process(ResponseType::Block); + r.single_block_component_processed_imported(block_root); + r.expect_no_active_lookups(); } #[test] @@ -662,12 +996,12 @@ fn test_single_block_lookup_wrong_response() { // Trigger the request rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_hash); + let id = rig.expect_block_lookup_request(block_hash); // Peer sends something else. It should be penalized. let bad_block = rig.rand_block(); rig.single_lookup_block_response(id, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); rig.expect_block_lookup_request(block_hash); // should be retried // Send the stream termination. This should not produce an additional penalty. @@ -684,7 +1018,7 @@ fn test_single_block_lookup_failure() { // Trigger the request rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_hash); + let id = rig.expect_block_lookup_request(block_hash); // The request fails. RPC failures are handled elsewhere so we should not penalize the peer. rig.single_lookup_failed(id, peer_id, RPCError::UnsupportedProtocol); @@ -703,7 +1037,7 @@ fn test_single_block_lookup_becomes_parent_request() { // Trigger the request rig.trigger_unknown_block_from_attestation(block.canonical_root(), peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_root); + let id = rig.expect_block_parent_request(block_root); // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. @@ -717,11 +1051,11 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. rig.single_block_component_processed( - id, + id.lookup_id, BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), ); - assert_eq!(rig.active_single_lookups_count(), 1); - rig.expect_parent_request_block_and_blobs(parent_root); + assert_eq!(rig.active_single_lookups_count(), 2); // 2 = current + parent + rig.expect_block_parent_request(parent_root); rig.expect_empty_network(); assert_eq!(rig.active_parent_lookups_count(), 1); } @@ -735,21 +1069,25 @@ fn test_parent_lookup_happy_path() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - let id = rig.expect_parent_request_block_and_blobs(parent_root); + let id = rig.expect_block_parent_request(parent_root); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); + // No request of blobs because the block has not data + rig.expect_empty_network(); rig.expect_block_process(ResponseType::Block); rig.expect_empty_network(); + // Add peer to child lookup to prevent it being dropped + rig.trigger_unknown_block_from_attestation(block_root, peer_id); // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed( block_root, BlockError::BlockIsAlreadyKnown(block_root).into(), ); rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.parent_chain_processed_success(block_root, &[]); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -761,12 +1099,12 @@ fn test_parent_lookup_wrong_response() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - let id1 = rig.expect_parent_request_block_and_blobs(parent_root); + let id1 = rig.expect_block_parent_request(parent_root); // Peer sends the wrong block, peer should be penalized and the block re-requested. let bad_block = rig.rand_block(); rig.parent_lookup_block_response(id1, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); let id2 = rig.expect_block_parent_request(parent_root); // Send the stream termination for the first request. This should not produce extra penalties. @@ -777,38 +1115,13 @@ fn test_parent_lookup_wrong_response() { rig.parent_lookup_block_response(id2, peer_id, Some(parent.into())); rig.expect_block_process(ResponseType::Block); + // Add peer to child lookup to prevent it being dropped + rig.trigger_unknown_block_from_attestation(block_root, peer_id); // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed_imported(block_root); rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); -} - -#[test] -fn test_parent_lookup_empty_response() { - let mut rig = TestRig::test_setup(); - - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - let id1 = rig.expect_parent_request_block_and_blobs(parent_root); - - // Peer sends an empty response, peer should be penalized and the block re-requested. - rig.parent_lookup_block_response(id1, peer_id, None); - rig.expect_penalty(peer_id); - let id2 = rig.expect_block_parent_request(parent_root); - - // Send the right block this time. - rig.parent_lookup_block_response(id2, peer_id, Some(parent.into())); - rig.expect_block_process(ResponseType::Block); - - // Processing succeeds, now the rest of the chain should be sent for processing. - rig.parent_block_processed_imported(block_root); - rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.parent_chain_processed_success(block_root, &[]); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -820,21 +1133,23 @@ fn test_parent_lookup_rpc_failure() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - let id1 = rig.expect_parent_request_block_and_blobs(parent_root); + let id = rig.expect_block_parent_request(parent_root); // The request fails. It should be tried again. - rig.parent_lookup_failed_unavailable(id1, peer_id); - let id2 = rig.expect_block_parent_request(parent_root); + rig.parent_lookup_failed_unavailable(id, peer_id); + let id = rig.expect_block_parent_request(parent_root); // Send the right block this time. - rig.parent_lookup_block_response(id2, peer_id, Some(parent.into())); + rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); rig.expect_block_process(ResponseType::Block); + // Add peer to child lookup to prevent it being dropped + rig.trigger_unknown_block_from_attestation(block_root, peer_id); // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed_imported(block_root); rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.parent_chain_processed_success(block_root, &[]); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -847,12 +1162,9 @@ fn test_parent_lookup_too_many_attempts() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { + for i in 1..=PARENT_FAIL_TOLERANCE { let id = rig.expect_block_parent_request(parent_root); // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i == 1 { - let _ = rig.expect_blob_parent_request(parent_root); - } if i % 2 == 0 { // make sure every error is accounted for @@ -872,11 +1184,11 @@ fn test_parent_lookup_too_many_attempts() { // I'm unsure if this is how it should behave? // rig.parent_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); } } - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -888,13 +1200,9 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { + for i in 1..=PARENT_FAIL_TOLERANCE { assert!(!rig.failed_chains_contains(&block_root)); let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i == 1 { - let _ = rig.expect_blob_parent_request(parent_root); - } if i % 2 != 0 { // The request fails. It should be tried again. rig.parent_lookup_failed_unavailable(id, peer_id); @@ -902,18 +1210,18 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); rig.parent_lookup_block_response(id, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); } } - assert_eq!(rig.active_parent_lookups_count(), 0); assert!(!rig.failed_chains_contains(&block_root)); assert!(!rig.failed_chains_contains(&parent.canonical_root())); + rig.expect_no_active_lookups_empty_network(); } #[test] fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { - const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; + const PROCESSING_FAILURES: u8 = PARENT_FAIL_TOLERANCE / 2 + 1; let mut rig = TestRig::test_setup(); let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); let peer_id = rig.new_connected_peer(); @@ -922,39 +1230,32 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { rig.trigger_unknown_parent_block(peer_id, block.into()); rig.log("Fail downloading the block"); - for i in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + for _ in 0..(PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i == 0 { - let _ = rig.expect_blob_parent_request(parent_root); - } // The request fails. It should be tried again. rig.parent_lookup_failed_unavailable(id, peer_id); } rig.log("Now fail processing a block in the parent request"); - for i in 0..PROCESSING_FAILURES { + for _ in 0..PROCESSING_FAILURES { let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i != 0 { - let _ = rig.expect_blob_parent_request(parent_root); - } - assert!(!rig.failed_chains_contains(&block_root)); + // Blobs are only requested in the previous first iteration as this test only retries blocks + rig.assert_not_failed_chain(block_root); // send the right parent but fail processing rig.parent_lookup_block_response(id, peer_id, Some(parent.clone().into())); rig.parent_block_processed(block_root, BlockError::InvalidSignature.into()); rig.parent_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "lookup_block_processing_failure"); } - assert!(rig.failed_chains_contains(&block_root)); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.assert_not_failed_chain(block_root); + rig.expect_no_active_lookups_empty_network(); } #[test] fn test_parent_lookup_too_deep() { let mut rig = TestRig::test_setup(); - let mut blocks = rig.rand_blockchain(parent_lookup::PARENT_DEPTH_TOLERANCE); + let mut blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE); let peer_id = rig.new_connected_peer(); let trigger_block = blocks.pop().unwrap(); @@ -962,7 +1263,7 @@ fn test_parent_lookup_too_deep() { rig.trigger_unknown_parent_block(peer_id, trigger_block); for block in blocks.into_iter().rev() { - let id = rig.expect_parent_request_block_and_blobs(block.canonical_root()); + let id = rig.expect_block_parent_request(block.canonical_root()); // the block rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); // the stream termination @@ -976,19 +1277,80 @@ fn test_parent_lookup_too_deep() { ) } - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "chain_too_long"); assert!(rig.failed_chains_contains(&chain_hash)); } #[test] -fn test_parent_lookup_disconnection() { +fn test_lookup_peer_disconnected_no_peers_left_while_request() { let mut rig = TestRig::test_setup(); let peer_id = rig.new_connected_peer(); let trigger_block = rig.rand_block(); rig.trigger_unknown_parent_block(peer_id, trigger_block.into()); - rig.peer_disconnected(peer_id); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.rpc_error_all_active_requests(peer_id); + rig.expect_no_active_lookups(); +} + +#[test] +fn test_lookup_peer_disconnected_no_peers_left_not_while_request() { + let mut rig = TestRig::test_setup(); + let peer_id = rig.new_connected_peer(); + let trigger_block = rig.rand_block(); + rig.trigger_unknown_parent_block(peer_id, trigger_block.into()); + rig.peer_disconnected(peer_id); + // Note: this test case may be removed in the future. It's not strictly necessary to drop a + // lookup if there are no peers left. Lookup should only be dropped if it can not make progress + rig.expect_no_active_lookups(); +} + +#[test] +fn test_lookup_disconnection_peer_left() { + let mut rig = TestRig::test_setup(); + let peer_ids = (0..2).map(|_| rig.new_connected_peer()).collect::>(); + let disconnecting_peer = *peer_ids.first().unwrap(); + let block_root = Hash256::random(); + // lookup should have two peers associated with the same block + for peer_id in peer_ids.iter() { + rig.trigger_unknown_block_from_attestation(block_root, *peer_id); + } + // Disconnect the first peer only, which is the one handling the request + rig.peer_disconnected(disconnecting_peer); + rig.rpc_error_all_active_requests(disconnecting_peer); + rig.assert_single_lookups_count(1); +} + +#[test] +fn test_lookup_add_peers_to_parent() { + let mut r = TestRig::test_setup(); + let peer_id_1 = r.new_connected_peer(); + let peer_id_2 = r.new_connected_peer(); + let blocks = r.rand_blockchain(5); + let last_block_root = blocks.last().unwrap().canonical_root(); + // Create a chain of lookups + for block in &blocks { + r.trigger_unknown_parent_block(peer_id_1, block.clone()); + } + r.trigger_unknown_block_from_attestation(last_block_root, peer_id_2); + for block in blocks.iter().take(blocks.len() - 1) { + // Parent has the original unknown parent event peer + new peer + r.assert_lookup_peers(block.canonical_root(), vec![peer_id_1, peer_id_2]); + } + // Child lookup only has the unknown attestation peer + r.assert_lookup_peers(last_block_root, vec![peer_id_2]); +} + +#[test] +fn test_skip_creating_failed_parent_lookup() { + let mut rig = TestRig::test_setup(); + let (_, block, parent_root, _) = rig.rand_block_and_parent(); + let peer_id = rig.new_connected_peer(); + rig.insert_failed_chain(parent_root); + rig.trigger_unknown_parent_block(peer_id, block.into()); + // Expect single penalty for peer, despite dropping two lookups + rig.expect_single_penalty(peer_id, "failed_chain"); + // Both current and parent lookup should be rejected + rig.expect_no_active_lookups(); } #[test] @@ -1000,7 +1362,7 @@ fn test_single_block_lookup_ignored_response() { // Trigger the request rig.trigger_unknown_block_from_attestation(block.canonical_root(), peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block.canonical_root()); + let id = rig.expect_block_lookup_request(block.canonical_root()); // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. @@ -1015,9 +1377,8 @@ fn test_single_block_lookup_ignored_response() { // after processing. rig.single_lookup_block_response(id, peer_id, None); // Send an Ignored response, the request should be dropped - rig.single_block_component_processed(id, BlockProcessingResult::Ignored); - rig.expect_empty_network(); - assert_eq!(rig.active_single_lookups_count(), 0); + rig.single_block_component_processed(id.lookup_id, BlockProcessingResult::Ignored); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -1028,8 +1389,10 @@ fn test_parent_lookup_ignored_response() { let peer_id = rig.new_connected_peer(); // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - let id = rig.expect_parent_request_block_and_blobs(parent_root); + rig.trigger_unknown_parent_block(peer_id, block.clone().into()); + let id = rig.expect_block_parent_request(parent_root); + // Note: single block lookup for current `block` does not trigger any request because it does + // not have blobs, and the block is already cached // Peer sends the right block, it should be sent for processing. Peer should not be penalized. rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); @@ -1039,7 +1402,7 @@ fn test_parent_lookup_ignored_response() { // Return an Ignored result. The request should be dropped rig.parent_block_processed(block_root, BlockProcessingResult::Ignored); rig.expect_empty_network(); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.expect_no_active_lookups(); } /// This is a regression test. @@ -1056,8 +1419,8 @@ fn test_same_chain_race_condition() { let chain_hash = trigger_block.canonical_root(); rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); - for (i, block) in blocks.into_iter().rev().enumerate() { - let id = rig.expect_parent_request_block_and_blobs(block.canonical_root()); + for (i, block) in blocks.clone().into_iter().rev().enumerate() { + let id = rig.expect_block_parent_request(block.canonical_root()); // the block rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); // the stream termination @@ -1066,41 +1429,137 @@ fn test_same_chain_race_condition() { rig.expect_block_process(ResponseType::Block); // the processing result if i + 2 == depth { - // one block was removed + rig.log(&format!("Block {i} was removed and is already known")); rig.parent_block_processed( chain_hash, BlockError::BlockIsAlreadyKnown(block.canonical_root()).into(), ) } else { + rig.log(&format!("Block {i} ParentUnknown")); rig.parent_block_processed( chain_hash, BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), ) } - rig.assert_parent_lookups_consistency(); } - // Processing succeeds, now the rest of the chain should be sent for processing. - rig.expect_parent_chain_process(); - // Try to get this block again while the chain is being processed. We should not request it again. let peer_id = rig.new_connected_peer(); - rig.trigger_unknown_parent_block(peer_id, trigger_block); - rig.assert_parent_lookups_consistency(); + rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); + rig.expect_empty_network(); - rig.parent_chain_processed_success(chain_hash); - assert_eq!(rig.active_parent_lookups_count(), 0); + // Add a peer to the tip child lookup which has zero peers + rig.trigger_unknown_block_from_attestation(trigger_block.canonical_root(), peer_id); + + rig.log("Processing succeeds, now the rest of the chain should be sent for processing."); + for block in blocks.iter().skip(1).chain(&[trigger_block]) { + rig.expect_parent_chain_process(); + rig.single_block_component_processed_imported(block.canonical_root()); + } + rig.expect_no_active_lookups_empty_network(); +} + +#[test] +fn block_in_da_checker_skips_download() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + r.insert_block_to_da_checker(block.into()); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should not trigger block request + let id = r.expect_blob_lookup_request(block_root); + r.expect_empty_network(); + // Resolve blob and expect lookup completed + r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); + r.expect_no_active_lookups(); +} + +#[test] +fn block_in_processing_cache_becomes_invalid() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + r.insert_block_to_processing_cache(block.clone().into()); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should trigger blob request + let id = r.expect_blob_lookup_request(block_root); + // Should not trigger block request + r.expect_empty_network(); + // Simulate invalid block, removing it from processing cache + r.simulate_block_gossip_processing_becomes_invalid(block_root); + // Should download block, then issue blobs request + r.complete_lookup_block_download(block); + // Should not trigger block or blob request + r.expect_empty_network(); + r.complete_lookup_block_import_valid(block_root, false); + // Resolve blob and expect lookup completed + r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); + r.expect_no_active_lookups(); +} + +#[test] +fn block_in_processing_cache_becomes_valid_imported() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + r.insert_block_to_processing_cache(block.clone().into()); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should trigger blob request + let id = r.expect_blob_lookup_request(block_root); + // Should not trigger block request + r.expect_empty_network(); + // Resolve the block from processing step + r.simulate_block_gossip_processing_becomes_valid_missing_components(block.into()); + // Should not trigger block or blob request + r.expect_empty_network(); + // Resolve blob and expect lookup completed + r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); + r.expect_no_active_lookups(); +} + +// IGNORE: wait for change that delays blob fetching to knowing the block +#[ignore] +#[test] +fn blobs_in_da_checker_skip_download() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + for blob in blobs { + r.insert_blob_to_da_checker(blob); + } + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should download and process the block + r.complete_single_lookup_block_valid(block, true); + // Should not trigger blob request + r.expect_empty_network(); + r.expect_no_active_lookups(); } mod deneb_only { use super::*; - use beacon_chain::data_availability_checker::AvailabilityCheckError; + use beacon_chain::{ + block_verification_types::RpcBlock, data_availability_checker::AvailabilityCheckError, + }; use ssz_types::VariableList; + use std::collections::VecDeque; struct DenebTester { rig: TestRig, block: Arc>, blobs: Vec>>, + parent_block_roots: Vec, parent_block: VecDeque>>, parent_blobs: VecDeque>>>, unknown_parent_block: Option>>, @@ -1116,16 +1575,16 @@ mod deneb_only { enum RequestTrigger { AttestationUnknownBlock, - GossipUnknownParentBlock { num_parents: usize }, - GossipUnknownParentBlob { num_parents: usize }, + GossipUnknownParentBlock(usize), + GossipUnknownParentBlob(usize), } impl RequestTrigger { fn num_parents(&self) -> usize { match self { RequestTrigger::AttestationUnknownBlock => 0, - RequestTrigger::GossipUnknownParentBlock { num_parents } => *num_parents, - RequestTrigger::GossipUnknownParentBlob { num_parents } => *num_parents, + RequestTrigger::GossipUnknownParentBlock(num_parents) => *num_parents, + RequestTrigger::GossipUnknownParentBlob(num_parents) => *num_parents, } } } @@ -1143,6 +1602,7 @@ mod deneb_only { let num_parents = request_trigger.num_parents(); let mut parent_block_chain = VecDeque::with_capacity(num_parents); let mut parent_blobs_chain = VecDeque::with_capacity(num_parents); + let mut parent_block_roots = vec![]; for _ in 0..num_parents { // Set the current block as the parent. let parent_root = block.canonical_root(); @@ -1150,6 +1610,7 @@ mod deneb_only { let parent_blobs = blobs.clone(); parent_block_chain.push_front(parent_block); parent_blobs_chain.push_front(parent_blobs); + parent_block_roots.push(parent_root); // Create the next block. let (child_block, child_blobs) = @@ -1173,8 +1634,7 @@ mod deneb_only { peer_id, block_root, )); let block_req_id = rig.expect_block_lookup_request(block_root); - let blob_req_id = rig.expect_blob_lookup_request(block_root); - (Some(block_req_id), Some(blob_req_id), None, None) + (Some(block_req_id), None, None, None) } RequestTrigger::GossipUnknownParentBlock { .. } => { rig.send_sync_message(SyncMessage::UnknownParentBlock( @@ -1184,33 +1644,18 @@ mod deneb_only { )); let parent_root = block.parent_root(); - let blob_req_id = rig.expect_blob_lookup_request(block_root); let parent_block_req_id = rig.expect_block_parent_request(parent_root); - let parent_blob_req_id = rig.expect_blob_parent_request(parent_root); rig.expect_empty_network(); // expect no more requests - ( - None, - Some(blob_req_id), - Some(parent_block_req_id), - Some(parent_blob_req_id), - ) + (None, None, Some(parent_block_req_id), None) } RequestTrigger::GossipUnknownParentBlob { .. } => { let single_blob = blobs.first().cloned().unwrap(); let parent_root = single_blob.block_parent_root(); rig.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, single_blob)); - let block_req_id = rig.expect_block_lookup_request(block_root); - let blobs_req_id = rig.expect_blob_lookup_request(block_root); let parent_block_req_id = rig.expect_block_parent_request(parent_root); - let parent_blob_req_id = rig.expect_blob_parent_request(parent_root); rig.expect_empty_network(); // expect no more requests - ( - Some(block_req_id), - Some(blobs_req_id), - Some(parent_block_req_id), - Some(parent_blob_req_id), - ) + (None, None, Some(parent_block_req_id), None) } }; @@ -1220,6 +1665,7 @@ mod deneb_only { blobs, parent_block: parent_block_chain, parent_blobs: parent_blobs_chain, + parent_block_roots, unknown_parent_block: None, unknown_parent_blobs: None, peer_id, @@ -1232,6 +1678,18 @@ mod deneb_only { }) } + fn log(self, msg: &str) -> Self { + self.rig.log(msg); + self + } + + fn trigger_unknown_block_from_attestation(mut self) -> Self { + let block_root = self.block.canonical_root(); + self.rig + .trigger_unknown_block_from_attestation(block_root, self.peer_id); + self + } + fn parent_block_response(mut self) -> Self { self.rig.expect_empty_network(); let block = self.parent_block.pop_front().unwrap().clone(); @@ -1242,10 +1700,27 @@ mod deneb_only { Some(block), ); - assert_eq!(self.rig.active_parent_lookups_count(), 1); + self.rig.assert_parent_lookups_count(1); self } + fn parent_block_response_expect_blobs(mut self) -> Self { + self.rig.expect_empty_network(); + let block = self.parent_block.pop_front().unwrap().clone(); + let _ = self.unknown_parent_block.insert(block.clone()); + self.rig.parent_lookup_block_response( + self.parent_block_req_id.expect("parent request id"), + self.peer_id, + Some(block), + ); + + // Expect blobs request after sending block + let s = self.expect_parent_blobs_request(); + + s.rig.assert_parent_lookups_count(1); + s + } + fn parent_blob_response(mut self) -> Self { let blobs = self.parent_blobs.pop_front().unwrap(); let _ = self.unknown_parent_blobs.insert(blobs.clone()); @@ -1258,7 +1733,7 @@ mod deneb_only { assert_eq!(self.rig.active_parent_lookups_count(), 1); } self.rig.parent_lookup_blob_response( - self.parent_blob_req_id.expect("blob request id"), + self.parent_blob_req_id.expect("parent blob request id"), self.peer_id, None, ); @@ -1267,7 +1742,7 @@ mod deneb_only { } fn block_response_triggering_process(self) -> Self { - let mut me = self.block_response(); + let mut me = self.block_response_and_expect_blob_request(); me.rig.expect_block_process(ResponseType::Block); // The request should still be active. @@ -1275,7 +1750,7 @@ mod deneb_only { me } - fn block_response(mut self) -> Self { + fn block_response_and_expect_blob_request(mut self) -> Self { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. self.rig.single_lookup_block_response( @@ -1283,21 +1758,27 @@ mod deneb_only { self.peer_id, Some(self.block.clone()), ); - self.rig.expect_empty_network(); + // After responding with block the node will issue a blob request + let mut s = self.expect_blobs_request(); + + s.rig.expect_empty_network(); // The request should still be active. - assert_eq!(self.rig.active_single_lookups_count(), 1); - self + s.rig.assert_lookup_is_active(s.block.canonical_root()); + s } fn blobs_response(mut self) -> Self { + self.rig + .log(&format!("blobs response {}", self.blobs.len())); for blob in &self.blobs { self.rig.single_lookup_blob_response( self.blob_req_id.expect("blob request id"), self.peer_id, Some(blob.clone()), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + self.rig + .assert_lookup_is_active(self.block.canonical_root()); } self.rig.single_lookup_blob_response( self.blob_req_id.expect("blob request id"), @@ -1338,15 +1819,6 @@ mod deneb_only { self } - fn empty_parent_block_response(mut self) -> Self { - self.rig.parent_lookup_block_response( - self.parent_block_req_id.expect("block request id"), - self.peer_id, - None, - ); - self - } - fn empty_parent_blobs_response(mut self) -> Self { self.rig.parent_lookup_blob_response( self.parent_blob_req_id.expect("blob request id"), @@ -1356,29 +1828,88 @@ mod deneb_only { self } + fn block_missing_components(mut self) -> Self { + self.rig.single_block_component_processed( + self.block_req_id.expect("block request id").lookup_id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.block.slot(), + self.block_root, + )), + ); + self.rig.expect_empty_network(); + self.rig.assert_single_lookups_count(1); + self + } + + fn blob_imported(mut self) -> Self { + self.rig.single_blob_component_processed( + self.blob_req_id.expect("blob request id").lookup_id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), + ); + self.rig.expect_empty_network(); + self.rig.assert_single_lookups_count(0); + self + } + fn block_imported(mut self) -> Self { // Missing blobs should be the request is not removed, the outstanding blobs request should // mean we do not send a new request. self.rig.single_block_component_processed( - self.block_req_id.expect("block request id"), + self.block_req_id + .or(self.blob_req_id) + .expect("block request id") + .lookup_id, BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), ); self.rig.expect_empty_network(); - assert_eq!(self.rig.active_single_lookups_count(), 0); + self.rig.assert_single_lookups_count(0); self } fn parent_block_imported(mut self) -> Self { + let parent_root = *self.parent_block_roots.first().unwrap(); + self.rig + .log(&format!("parent_block_imported {parent_root:?}")); self.rig.parent_block_processed( self.block_root, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(parent_root)), ); - self.rig.expect_empty_network(); - assert_eq!(self.rig.active_parent_lookups_count(), 0); + self.rig.expect_no_requests_for(parent_root); + self.rig.assert_parent_lookups_count(0); + self + } + + fn parent_block_missing_components(mut self) -> Self { + let parent_root = *self.parent_block_roots.first().unwrap(); + self.rig + .log(&format!("parent_block_missing_components {parent_root:?}")); + self.rig.parent_block_processed( + self.block_root, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + Slot::new(0), + parent_root, + )), + ); + self.rig.expect_no_requests_for(parent_root); + self + } + + fn parent_blob_imported(mut self) -> Self { + let parent_root = *self.parent_block_roots.first().unwrap(); + self.rig + .log(&format!("parent_blob_imported {parent_root:?}")); + self.rig.parent_blob_processed( + self.block_root, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(parent_root)), + ); + + self.rig.expect_no_requests_for(parent_root); + self.rig.assert_parent_lookups_count(0); self } fn parent_block_unknown_parent(mut self) -> Self { + self.rig.log("parent_block_unknown_parent"); let block = self.unknown_parent_block.take().unwrap(); // Now this block is the one we expect requests from self.block = block.clone(); @@ -1407,50 +1938,69 @@ mod deneb_only { fn invalid_block_processed(mut self) -> Self { self.rig.single_block_component_processed( - self.block_req_id.expect("block request id"), + self.block_req_id.expect("block request id").lookup_id, BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + self.rig.assert_single_lookups_count(1); self } fn invalid_blob_processed(mut self) -> Self { - self.rig.single_block_component_processed( - self.blob_req_id.expect("blob request id"), + self.rig.log("invalid_blob_processed"); + self.rig.single_blob_component_processed( + self.blob_req_id.expect("blob request id").lookup_id, BlockProcessingResult::Err(BlockError::AvailabilityCheck( AvailabilityCheckError::KzgVerificationFailed, )), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + self.rig.assert_single_lookups_count(1); self } fn missing_components_from_block_request(mut self) -> Self { self.rig.single_block_component_processed( - self.block_req_id.expect("block request id"), + self.block_req_id.expect("block request id").lookup_id, BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( self.slot, self.block_root, )), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + // Add block to da_checker so blobs request can continue + self.rig.insert_block_to_da_checker(self.block.clone()); + + self.rig.assert_single_lookups_count(1); self } - fn missing_components_from_blob_request(mut self) -> Self { - self.rig.single_blob_component_processed( - self.blob_req_id.expect("blob request id"), - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - self.slot, - self.block_root, - )), - ); - assert_eq!(self.rig.active_single_lookups_count(), 1); - self + fn complete_current_block_and_blobs_lookup(self) -> Self { + self.expect_block_request() + .block_response_and_expect_blob_request() + .blobs_response() + // TODO: Should send blobs for processing + .expect_block_process() + .block_imported() } - fn expect_penalty(mut self) -> Self { - self.rig.expect_penalty(self.peer_id); + fn parent_block_then_empty_parent_blobs(self) -> Self { + self.log( + " Return empty blobs for parent, block errors with missing components, downscore", + ) + .parent_block_response() + .expect_parent_blobs_request() + .empty_parent_blobs_response() + .expect_penalty("NotEnoughResponsesReturned") + .log("Re-request parent blobs, succeed and import parent") + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_missing_components() + // Insert new peer into child request before completing parent + .trigger_unknown_block_from_attestation() + .parent_blob_imported() + } + + fn expect_penalty(mut self, expect_penalty_msg: &'static str) -> Self { + self.rig.expect_penalty(self.peer_id, expect_penalty_msg); self } fn expect_no_penalty(mut self) -> Self { @@ -1506,14 +2056,14 @@ mod deneb_only { self.blobs.push(first_blob); self } - fn expect_parent_chain_process(mut self) -> Self { - self.rig.expect_parent_chain_process(); - self - } fn expect_block_process(mut self) -> Self { self.rig.expect_block_process(ResponseType::Block); self } + fn expect_no_active_lookups(self) -> Self { + self.rig.expect_no_active_lookups(); + self + } fn search_parent_dup(mut self) -> Self { self.rig .trigger_unknown_parent_block(self.peer_id, self.block.clone()); @@ -1526,45 +2076,12 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester - .block_response_triggering_process() + .block_response_and_expect_blob_request() .blobs_response() + .block_missing_components() // blobs not yet imported .blobs_response_was_valid() - .block_imported(); - } - - #[test] - fn single_block_and_blob_lookup_blobs_returned_first_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .blobs_response() - .blobs_response_was_valid() - .block_response_triggering_process() - .block_imported(); - } - - #[test] - fn single_block_and_blob_lookup_empty_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .empty_block_response() - .expect_penalty() - .expect_block_request() - .expect_no_blobs_request() - .empty_blobs_response() - .expect_empty_beacon_processor() - .expect_no_penalty() - .expect_no_block_request() - .expect_no_blobs_request() - .block_response_triggering_process() - .missing_components_from_block_request(); + .blob_imported(); // now blobs resolve as imported } #[test] @@ -1572,48 +2089,28 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester - .block_response_triggering_process() + .block_response_and_expect_blob_request() .missing_components_from_block_request() .empty_blobs_response() - .missing_components_from_blob_request() - .expect_penalty() + .expect_penalty("NotEnoughResponsesReturned") .expect_blobs_request() .expect_no_block_request(); } - #[test] - fn single_blob_response_then_empty_block_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .blobs_response() - .blobs_response_was_valid() - .expect_no_penalty_and_no_requests() - .missing_components_from_blob_request() - .empty_block_response() - .expect_penalty() - .expect_block_request() - .expect_no_blobs_request(); - } - #[test] fn single_invalid_block_response_then_blob_response_attestation() { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .invalid_block_processed() - .expect_penalty() + .expect_penalty("lookup_block_processing_failure") .expect_block_request() .expect_no_blobs_request() .blobs_response() - .missing_components_from_blob_request() + // blobs not sent for processing until the block is processed .expect_no_penalty_and_no_requests(); } @@ -1622,13 +2119,12 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .missing_components_from_block_request() .blobs_response() .invalid_blob_processed() - .expect_penalty() + .expect_penalty("lookup_blobs_processing_failure") .expect_blobs_request() .expect_no_block_request(); } @@ -1638,14 +2134,12 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .missing_components_from_block_request() .invalidate_blobs_too_few() .blobs_response() - .missing_components_from_blob_request() - .expect_penalty() + .expect_penalty("NotEnoughResponsesReturned") .expect_blobs_request() .expect_no_block_request(); } @@ -1655,303 +2149,156 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .invalidate_blobs_too_many() .blobs_response() - .expect_penalty() - .expect_blobs_request() + .expect_penalty("TooManyResponses") + // Network context returns "download success" because the request has enough blobs + it + // downscores the peer for returning too many. .expect_no_block_request(); } - #[test] - fn too_few_blobs_response_then_block_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .invalidate_blobs_too_few() - .blobs_response() - .blobs_response_was_valid() - .expect_no_penalty_and_no_requests() - .block_response_triggering_process(); - } - - #[test] - fn too_many_blobs_response_then_block_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty() - .expect_blobs_request() - .expect_no_block_request() - .block_response_triggering_process(); - } - + // Test peer returning block that has unknown parent, and a new lookup is created #[test] fn parent_block_unknown_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester - .blobs_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .parent_block_unknown_parent() .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor(); } + // Test peer returning invalid (processing) block, expect retry #[test] fn parent_block_invalid_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester - .blobs_response() - .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .invalid_parent_processed() - .expect_penalty() + .expect_penalty("lookup_block_processing_failure") .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor(); } + // Tests that if a peer does not respond with a block, we downscore and retry the block only #[test] - fn parent_block_and_blob_lookup_parent_returned_first() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + fn empty_block_is_retried() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester - .parent_block_response() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .blobs_response() - .expect_parent_chain_process(); - } - - #[test] - fn parent_block_and_blob_lookup_child_returned_first() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { - return; - }; - - tester - .blobs_response() - .expect_no_penalty_and_no_requests() - .parent_block_response() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .expect_parent_chain_process(); - } - - #[test] - fn empty_parent_block_then_parent_blob() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { - return; - }; - - tester - .empty_parent_block_response() - .expect_penalty() - .expect_parent_block_request() + .empty_block_response() + .expect_penalty("NoResponseReturned") + .expect_block_request() .expect_no_blobs_request() - .parent_blob_response() - .expect_empty_beacon_processor() - .parent_block_response() - .expect_block_process() - .parent_block_imported() + .block_response_and_expect_blob_request() .blobs_response() - .expect_parent_chain_process(); + .block_imported() + .expect_no_active_lookups(); } #[test] - fn empty_parent_blobs_then_parent_block() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + fn parent_block_then_empty_parent_blobs() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester - .blobs_response() - .empty_parent_blobs_response() - .expect_no_penalty_and_no_requests() - .parent_block_response() - .expect_penalty() - .expect_parent_blobs_request() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .expect_parent_chain_process(); + .parent_block_then_empty_parent_blobs() + .log("resolve original block trigger blobs request and import") + // Should not have block request, it is cached + .expect_blobs_request() + // TODO: Should send blobs for processing + .block_imported() + .expect_no_active_lookups(); } #[test] fn parent_blob_unknown_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester - .block_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .parent_block_unknown_parent() .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor(); } #[test] fn parent_blob_invalid_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester - .block_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .invalid_parent_processed() - .expect_penalty() + .expect_penalty("lookup_block_processing_failure") .expect_parent_block_request() - .expect_parent_blobs_request() + // blobs are not sent until block is processed .expect_empty_beacon_processor(); } #[test] fn parent_block_and_blob_lookup_parent_returned_first_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester .parent_block_response() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .block_response() - .expect_parent_chain_process(); - } - - #[test] - fn parent_block_and_blob_lookup_child_returned_first_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { - return; - }; - - tester - .block_response() - .expect_no_penalty_and_no_requests() - .parent_block_response() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .expect_parent_chain_process(); - } - - #[test] - fn empty_parent_block_then_parent_blob_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { - return; - }; - - tester - .empty_parent_block_response() - .expect_penalty() - .expect_parent_block_request() - .expect_no_blobs_request() - .parent_blob_response() - .expect_empty_beacon_processor() - .parent_block_response() - .expect_block_process() - .parent_block_imported() - .block_response() - .expect_parent_chain_process(); - } - - #[test] - fn empty_parent_blobs_then_parent_block_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { - return; - }; - - tester - .block_response() - .empty_parent_blobs_response() - .expect_no_penalty_and_no_requests() - .parent_block_response() - .expect_penalty() .expect_parent_blobs_request() .parent_blob_response() .expect_block_process() + .trigger_unknown_block_from_attestation() .parent_block_imported() - .expect_parent_chain_process(); + .complete_current_block_and_blobs_lookup() + .expect_no_active_lookups(); + } + + #[test] + fn parent_block_then_empty_parent_blobs_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { + return; + }; + tester + .parent_block_then_empty_parent_blobs() + .log("resolve original block trigger blobs request and import") + .complete_current_block_and_blobs_lookup() + .expect_no_active_lookups(); } #[test] fn parent_blob_unknown_parent_chain() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 2 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(2)) else { return; }; - tester - .block_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_no_penalty() .expect_block_process() .parent_block_unknown_parent() .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor() .parent_block_response() + .expect_parent_blobs_request() .parent_blob_response() .expect_no_penalty() .expect_block_process(); @@ -1959,12 +2306,9 @@ mod deneb_only { #[test] fn unknown_parent_block_dup() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester .search_parent_dup() .expect_no_blobs_request() @@ -1973,18 +2317,18 @@ mod deneb_only { #[test] fn unknown_parent_blob_dup() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester .search_parent_dup() .expect_no_blobs_request() .expect_no_block_request(); } + // This test no longer applies, we don't issue requests for child lookups + // Keep for after updating rules on fetching blocks only first + #[ignore] #[test] fn no_peer_penalty_when_rpc_response_already_known_from_gossip() { let Some(mut r) = TestRig::test_setup_after_deneb() else { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 6a3b568c1c..d159733cbc 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -130,4 +130,30 @@ mod tests { assert!(info.is_finished()); info.into_responses().unwrap(); } + + #[test] + fn empty_blobs_into_responses() { + let mut info = BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::BlocksAndBlobs); + let mut rng = XorShiftRng::from_seed([42; 16]); + let blocks = (0..4) + .map(|_| { + // Always generate some blobs. + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Number(3), &mut rng).0 + }) + .collect::>(); + + // Send blocks and complete terminate response + for block in blocks { + info.add_block_response(Some(block.into())); + } + info.add_block_response(None); + // Expect no blobs returned + info.add_sidecar_response(None); + + // Assert response is finished and RpcBlocks can be constructed, even if blobs weren't returned. + // This makes sure we don't expect blobs here when they have expired. Checking this logic should + // be hendled elsewhere. + assert!(info.is_finished()); + info.into_responses().unwrap(); + } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 9c17c6a151..4c1a1e6b67 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -34,7 +34,6 @@ //! search for the block and subsequently search for parents if needed. use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; -use super::block_lookups::common::LookupType; use super::block_lookups::BlockLookups; use super::network_context::{BlockOrBlob, RangeRequestId, RpcEvent, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; @@ -42,11 +41,13 @@ use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use crate::sync::block_lookups::{BlobRequestState, BlockRequestState}; +use crate::sync::block_lookups::{ + BlobRequestState, BlockComponent, BlockRequestState, DownloadResult, +}; use crate::sync::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::ChildComponents; +use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, }; @@ -55,13 +56,12 @@ use lighthouse_network::rpc::RPCError; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, warn, Logger}; -use std::ops::IndexMut; +use lru_cache::LRUTimeCache; +use slog::{crit, debug, error, info, o, trace, warn, Logger}; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::blob_sidecar::FixedBlobSidecarList; use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync @@ -73,13 +73,17 @@ use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// blocks for. pub const SLOT_IMPORT_TOLERANCE: usize = 32; +/// Suppress duplicated `UnknownBlockHashFromAttestation` events for some duration of time. In +/// practice peers are likely to send the same root during a single slot. 30 seconds is a rather +/// arbitrary number that covers a full slot, but allows recovery if sync get stuck for a few slots. +const NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS: u64 = 30; + pub type Id = u32; #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct SingleLookupReqId { - pub id: Id, - pub req_counter: Id, - pub lookup_type: LookupType, + pub lookup_id: Id, + pub req_id: Id, } /// Id of rpc requests sent by sync to the network. @@ -146,6 +150,9 @@ pub enum SyncMessage { process_type: BlockProcessType, result: BlockProcessingResult, }, + + /// A block from gossip has completed processing, + GossipBlockProcessResult { block_root: Hash256, imported: bool }, } /// The type of processing specified for a received block. @@ -153,7 +160,6 @@ pub enum SyncMessage { pub enum BlockProcessType { SingleBlock { id: Id }, SingleBlob { id: Id }, - ParentLookup { chain_hash: Hash256 }, } #[derive(Debug)] @@ -199,6 +205,10 @@ pub struct SyncManager { backfill_sync: BackFillSync, block_lookups: BlockLookups, + /// debounce duplicated `UnknownBlockHashFromAttestation` for the same root peer tuple. A peer + /// may forward us thousands of a attestations, each one triggering an individual event. Only + /// one event is useful, the rest generating log noise and wasted cycles + notified_unknown_roots: LRUTimeCache<(PeerId, Hash256)>, /// The logger for the import manager. log: Logger, @@ -252,29 +262,45 @@ impl SyncManager { beacon_chain.clone(), log.clone(), ), - range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), - backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals, log.clone()), - block_lookups: BlockLookups::new( - beacon_chain.data_availability_checker.clone(), - log.clone(), + range_sync: RangeSync::new( + beacon_chain.clone(), + log.new(o!("service" => "range_sync")), ), + backfill_sync: BackFillSync::new( + beacon_chain.clone(), + network_globals, + log.new(o!("service" => "backfill_sync")), + ), + block_lookups: BlockLookups::new(log.new(o!("service"=> "lookup_sync"))), + notified_unknown_roots: LRUTimeCache::new(Duration::from_secs( + NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS, + )), log: log.clone(), } } #[cfg(test)] - pub(crate) fn active_single_lookups(&self) -> Vec { + pub(crate) fn active_single_lookups(&self) -> Vec { self.block_lookups.active_single_lookups() } #[cfg(test)] - pub(crate) fn active_parent_lookups(&self) -> Vec { - self.block_lookups.active_parent_lookups() + pub(crate) fn active_parent_lookups(&self) -> Vec> { + self.block_lookups + .active_parent_lookups() + .iter() + .map(|c| c.chain.clone()) + .collect() } #[cfg(test)] - pub(crate) fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { - self.block_lookups.failed_chains_contains(chain_hash) + pub(crate) fn get_failed_chains(&mut self) -> Vec { + self.block_lookups.get_failed_chains() + } + + #[cfg(test)] + pub(crate) fn insert_failed_chain(&mut self, block_root: Hash256) { + self.block_lookups.insert_failed_chain(block_root); } fn network_globals(&self) -> &NetworkGlobals { @@ -353,12 +379,9 @@ impl SyncManager { fn peer_disconnect(&mut self, peer_id: &PeerId) { self.range_sync.peer_disconnect(&mut self.network, peer_id); - self.block_lookups - .peer_disconnected(peer_id, &mut self.network); + self.block_lookups.peer_disconnected(peer_id); // Regardless of the outcome, we update the sync status. - let _ = self - .backfill_sync - .peer_disconnected(peer_id, &mut self.network); + let _ = self.backfill_sync.peer_disconnected(peer_id); self.update_sync_state(); } @@ -537,6 +560,11 @@ impl SyncManager { futures::stream::iter(ee_responsiveness_watch.await).flatten() }; + // min(LOOKUP_MAX_DURATION_*) is 15 seconds. The cost of calling prune_lookups more often is + // one iteration over the single lookups HashMap. This map is supposed to be very small < 10 + // unless there is a bug. + let mut prune_lookups_interval = tokio::time::interval(Duration::from_secs(15)); + // process any inbound messages loop { tokio::select! { @@ -546,6 +574,9 @@ impl SyncManager { Some(engine_state) = check_ee_stream.next(), if check_ee => { self.handle_new_execution_engine_state(engine_state); } + _ = prune_lookups_interval.tick() => { + self.block_lookups.prune_lookups(); + } } } } @@ -578,32 +609,38 @@ impl SyncManager { block_root, parent_root, block_slot, - block.into(), + BlockComponent::Block(DownloadResult { + value: block.block_cloned(), + block_root, + seen_timestamp: timestamp_now(), + peer_id, + }), ); } SyncMessage::UnknownParentBlob(peer_id, blob) => { let blob_slot = blob.slot(); let block_root = blob.block_root(); let parent_root = blob.block_parent_root(); - let blob_index = blob.index; - if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { - warn!(self.log, "Peer sent blob with invalid index"; "index" => blob_index, "peer_id" => %peer_id); - return; - } - let mut blobs = FixedBlobSidecarList::default(); - *blobs.index_mut(blob_index as usize) = Some(blob); debug!(self.log, "Received unknown parent blob message"; "block_root" => %block_root, "parent_root" => %parent_root); self.handle_unknown_parent( peer_id, block_root, parent_root, blob_slot, - ChildComponents::new(block_root, None, Some(blobs)), + BlockComponent::Blob(DownloadResult { + value: blob, + block_root, + seen_timestamp: timestamp_now(), + peer_id, + }), ); } SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { - debug!(self.log, "Received unknown block hash message"; "block_root" => %block_root); - self.handle_unknown_block_root(peer_id, block_root); + if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { + self.notified_unknown_roots.insert((peer_id, block_root)); + debug!(self.log, "Received unknown block hash message"; "block_root" => ?block_root, "peer" => ?peer_id); + self.handle_unknown_block_root(peer_id, block_root); + } } SyncMessage::Disconnect(peer_id) => { debug!(self.log, "Received disconnected message"; "peer_id" => %peer_id); @@ -617,25 +654,17 @@ impl SyncManager { SyncMessage::BlockComponentProcessed { process_type, result, - } => match process_type { - BlockProcessType::SingleBlock { id } => self - .block_lookups - .single_block_component_processed::( - id, - result, - &mut self.network, - ), - BlockProcessType::SingleBlob { id } => self - .block_lookups - .single_block_component_processed::>( - id, - result, - &mut self.network, - ), - BlockProcessType::ParentLookup { chain_hash } => self - .block_lookups - .parent_block_processed(chain_hash, result, &mut self.network), - }, + } => self + .block_lookups + .on_processing_result(process_type, result, &mut self.network), + SyncMessage::GossipBlockProcessResult { + block_root, + imported, + } => self.block_lookups.on_external_processing_result( + block_root, + imported, + &mut self.network, + ), SyncMessage::BatchProcessed { sync_type, result } => match sync_type { ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { self.range_sync.handle_block_process_result( @@ -661,9 +690,6 @@ impl SyncManager { } } } - ChainSegmentProcessId::ParentLookup(chain_hash) => self - .block_lookups - .parent_chain_processed(chain_hash, result, &mut self.network), }, } } @@ -674,23 +700,16 @@ impl SyncManager { block_root: Hash256, parent_root: Hash256, slot: Slot, - child_components: ChildComponents, + block_component: BlockComponent, ) { match self.should_search_for_block(Some(slot), &peer_id) { Ok(_) => { - self.block_lookups.search_parent( - slot, + self.block_lookups.search_child_and_parent( block_root, - parent_root, + block_component, peer_id, &mut self.network, ); - self.block_lookups.search_child_block( - block_root, - child_components, - &[peer_id], - &mut self.network, - ); } Err(reason) => { debug!(self.log, "Ignoring unknown parent request"; "block_root" => %block_root, "parent_root" => %parent_root, "reason" => reason); @@ -702,7 +721,7 @@ impl SyncManager { match self.should_search_for_block(None, &peer_id) { Ok(_) => { self.block_lookups - .search_block(block_root, &[peer_id], &mut self.network); + .search_unknown_block(block_root, &[peer_id], &mut self.network); } Err(reason) => { debug!(self.log, "Ignoring unknown block request"; "block_root" => %block_root, "reason" => reason); @@ -774,11 +793,6 @@ impl SyncManager { let dropped_single_blocks_requests = self.block_lookups.drop_single_block_requests(); - // - Parent lookups: - // Disabled while in this state. We drop current requests and don't search for new - // blocks. - let dropped_parent_chain_requests = self.block_lookups.drop_parent_chain_requests(); - // - Range: // We still send found peers to range so that it can keep track of potential chains // with respect to our current peers. Range will stop processing batches in the @@ -787,10 +801,9 @@ impl SyncManager { // - Backfill: Not affected by ee states, nothing to do. // Some logs. - if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { + if dropped_single_blocks_requests > 0 { debug!(self.log, "Execution engine not online. Dropping active requests."; "dropped_single_blocks_requests" => dropped_single_blocks_requests, - "dropped_parent_chain_requests" => dropped_parent_chain_requests, ); } } @@ -828,47 +841,14 @@ impl SyncManager { peer_id: PeerId, block: RpcEvent>>, ) { - if let Some(resp) = self.network.on_single_block_response(id, block) { - match resp { - Ok((block, seen_timestamp)) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_lookup_response::( - id, - peer_id, - block, - seen_timestamp, - &mut self.network, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_response::( - id, - peer_id, - block, - seen_timestamp, - &mut self.network, - ), - }, - Err(error) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_block_lookup_failed::( - id, - &peer_id, - &mut self.network, - error, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_failed::( - id, - &peer_id, - &mut self.network, - error, - ), - }, - } + if let Some(resp) = self.network.on_single_block_response(id, peer_id, block) { + self.block_lookups + .on_download_response::>( + id, + peer_id, + resp, + &mut self.network, + ) } } @@ -903,48 +883,14 @@ impl SyncManager { peer_id: PeerId, blob: RpcEvent>>, ) { - if let Some(resp) = self.network.on_single_blob_response(id, blob) { - match resp { - Ok((blobs, seen_timestamp)) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_lookup_response::>( - id, - peer_id, - blobs, - seen_timestamp, - &mut self.network, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_response::>( - id, - peer_id, - blobs, - seen_timestamp, - &mut self.network, - ), - }, - - Err(error) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_block_lookup_failed::>( - id, - &peer_id, - &mut self.network, - error, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_failed::>( - id, - &peer_id, - &mut self.network, - error, - ), - }, - } + if let Some(resp) = self.network.on_single_blob_response(id, peer_id, blob) { + self.block_lookups + .on_download_response::>( + id, + peer_id, + resp, + &mut self.network, + ) } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index fc91270c1d..f3f82ee011 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -9,22 +9,22 @@ use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; -use crate::sync::manager::SingleLookupReqId; +use crate::sync::block_lookups::SingleLookupId; +use crate::sync::manager::{BlockProcessType, SingleLookupReqId}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::validator_monitor::timestamp_now; -use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; pub use requests::LookupVerifyError; -use slog::{debug, trace, warn}; +use slog::{debug, error, trace, warn}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock}; mod requests; @@ -52,34 +52,62 @@ pub enum RpcEvent { RPCError(RPCError), } -pub type RpcProcessingResult = Option>; +pub type RpcResponseResult = Result<(T, Duration), RpcResponseError>; -pub enum LookupFailure { +pub enum RpcResponseError { RpcError(RPCError), - LookupVerifyError(LookupVerifyError), + VerifyError(LookupVerifyError), } -impl std::fmt::Display for LookupFailure { +#[derive(Debug, PartialEq, Eq)] +pub enum RpcRequestSendError { + /// Network channel send failed + NetworkSendError, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum SendErrorProcessor { + SendError, + ProcessorNotAvailable, +} + +impl std::fmt::Display for RpcResponseError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - LookupFailure::RpcError(e) => write!(f, "RPC Error: {:?}", e), - LookupFailure::LookupVerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), + RpcResponseError::RpcError(e) => write!(f, "RPC Error: {:?}", e), + RpcResponseError::VerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), } } } -impl From for LookupFailure { +impl From for RpcResponseError { fn from(e: RPCError) -> Self { - LookupFailure::RpcError(e) + RpcResponseError::RpcError(e) } } -impl From for LookupFailure { +impl From for RpcResponseError { fn from(e: LookupVerifyError) -> Self { - LookupFailure::LookupVerifyError(e) + RpcResponseError::VerifyError(e) } } +/// Sequential ID that uniquely identifies ReqResp outgoing requests +pub type ReqId = u32; + +pub enum LookupRequestResult { + /// A request is sent. Sync MUST receive an event from the network in the future for either: + /// completed response or failed request + RequestSent(ReqId), + /// No request is sent, and no further action is necessary to consider this request completed + NoRequestNeeded, + /// No request is sent, but the request is not completed. Sync MUST receive some future event + /// that makes progress on the request. For example: request is processing from a different + /// source (i.e. block received from gossip) and sync MUST receive an event with that processing + /// result. + Pending(&'static str), +} + /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. @@ -193,7 +221,7 @@ impl SyncNetworkContext { peer_id: PeerId, batch_type: ByRangeRequestType, request: BlocksByRangeRequest, - ) -> Result { + ) -> Result { let id = self.next_id(); trace!( self.log, @@ -202,11 +230,13 @@ impl SyncNetworkContext { "count" => request.count(), "peer" => %peer_id, ); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlocksByRange(request.clone()), - request_id: RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - })?; + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlocksByRange(request.clone()), + request_id: RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { debug!( @@ -218,14 +248,16 @@ impl SyncNetworkContext { ); // Create the blob request based on the blocks request. - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlobsByRange(BlobsByRangeRequest { - start_slot: *request.start_slot(), - count: *request.count(), - }), - request_id: RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - })?; + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlobsByRange(BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }), + request_id: RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; } Ok(id) @@ -238,7 +270,7 @@ impl SyncNetworkContext { batch_type: ByRangeRequestType, request: BlocksByRangeRequest, sender_id: RangeRequestId, - ) -> Result { + ) -> Result { let id = self.blocks_by_range_request(peer_id, batch_type, request)?; self.range_blocks_and_blobs_requests .insert(id, (sender_id, BlocksAndBlobsRequestInfo::new(batch_type))); @@ -295,59 +327,135 @@ impl SyncNetworkContext { } } + /// Request block of `block_root` if necessary by checking: + /// - If the da_checker has a pending block from gossip or a previous request + /// + /// Returns false if no request was made, because the block is already imported pub fn block_lookup_request( &mut self, - id: SingleLookupReqId, + lookup_id: SingleLookupId, peer_id: PeerId, - request: BlocksByRootSingleRequest, - ) -> Result<(), &'static str> { + block_root: Hash256, + ) -> Result { + match self.chain.get_block_process_status(&block_root) { + // Unknown block, continue request to download + BlockProcessStatus::Unknown => {} + // Block is known are currently processing, expect a future event with the result of + // processing. + BlockProcessStatus::NotValidated { .. } => { + return Ok(LookupRequestResult::Pending("block in processing cache")) + } + // Block is fully validated. If it's not yet imported it's waiting for missing block + // components. Consider this request completed and do nothing. + BlockProcessStatus::ExecutionValidated { .. } => { + return Ok(LookupRequestResult::NoRequestNeeded) + } + } + + let req_id = self.next_id(); + let id = SingleLookupReqId { lookup_id, req_id }; + debug!( self.log, "Sending BlocksByRoot Request"; "method" => "BlocksByRoot", - "block_root" => ?request.0, + "block_root" => ?block_root, "peer" => %peer_id, "id" => ?id ); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), - request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), - })?; + let request = BlocksByRootSingleRequest(block_root); + + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), + request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; self.blocks_by_root_requests .insert(id, ActiveBlocksByRootRequest::new(request)); - Ok(()) + Ok(LookupRequestResult::RequestSent(req_id)) } + /// Request necessary blobs for `block_root`. Requests only the necessary blobs by checking: + /// - If we have a downloaded but not yet processed block + /// - If the da_checker has a pending block + /// - If the da_checker has pending blobs from gossip + /// + /// Returns false if no request was made, because we don't need to import (more) blobs. pub fn blob_lookup_request( &mut self, - id: SingleLookupReqId, + lookup_id: SingleLookupId, peer_id: PeerId, - request: BlobsByRootSingleBlockRequest, - ) -> Result<(), &'static str> { + block_root: Hash256, + downloaded_block_expected_blobs: Option, + ) -> Result { + let Some(expected_blobs) = downloaded_block_expected_blobs.or_else(|| { + // If the block is already being processed or fully validated, retrieve how many blobs + // it expects. Consider any stage of the block. If the block root has been validated, we + // can assert that this is the correct value of `blob_kzg_commitments_count`. + match self.chain.get_block_process_status(&block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.num_expected_blobs()), + } + }) else { + // Wait to download the block before downloading blobs. Then we can be sure that the + // block has data, so there's no need to do "blind" requests for all possible blobs and + // latter handle the case where if the peer sent no blobs, penalize. + // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. + // - if `num_expected_blobs` returns Some = block is processed. + return Ok(LookupRequestResult::Pending("waiting for block download")); + }; + + let imported_blob_indexes = self + .chain + .data_availability_checker + .imported_blob_indexes(&block_root) + .unwrap_or_default(); + // Include only the blob indexes not yet imported (received through gossip) + let indices = (0..expected_blobs as u64) + .filter(|index| !imported_blob_indexes.contains(index)) + .collect::>(); + + if indices.is_empty() { + // No blobs required, do not issue any request + return Ok(LookupRequestResult::NoRequestNeeded); + } + + let req_id = self.next_id(); + let id = SingleLookupReqId { lookup_id, req_id }; + debug!( self.log, "Sending BlobsByRoot Request"; "method" => "BlobsByRoot", - "block_root" => ?request.block_root, - "blob_indices" => ?request.indices, + "block_root" => ?block_root, + "blob_indices" => ?indices, "peer" => %peer_id, "id" => ?id ); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), - })?; + let request = BlobsByRootSingleBlockRequest { + block_root, + indices, + }; + + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; self.blobs_by_root_requests .insert(id, ActiveBlobsByRootRequest::new(request)); - Ok(()) + Ok(LookupRequestResult::RequestSent(req_id)) } pub fn is_execution_engine_online(&self) -> bool { @@ -457,13 +565,14 @@ impl SyncNetworkContext { pub fn on_single_block_response( &mut self, request_id: SingleLookupReqId, + peer_id: PeerId, block: RpcEvent>>, - ) -> RpcProcessingResult>> { + ) -> Option>>> { let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { return None; }; - Some(match block { + let resp = match block { RpcEvent::Response(block, seen_timestamp) => { match request.get_mut().add_response(block) { Ok(block) => Ok((block, seen_timestamp)), @@ -482,43 +591,119 @@ impl SyncNetworkContext { request.remove(); Err(e.into()) } - }) + }; + + if let Err(RpcResponseError::VerifyError(e)) = &resp { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); + } + Some(resp) } pub fn on_single_blob_response( &mut self, request_id: SingleLookupReqId, + peer_id: PeerId, blob: RpcEvent>>, - ) -> RpcProcessingResult> { + ) -> Option>> { let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { return None; }; - Some(match blob { - RpcEvent::Response(blob, _) => match request.get_mut().add_response(blob) { - Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, timestamp_now())) - .map_err(Into::into), - Ok(None) => return None, - Err(e) => { - request.remove(); - Err(e.into()) + let resp = match blob { + RpcEvent::Response(blob, seen_timestamp) => { + let request = request.get_mut(); + match request.add_response(blob) { + Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) + .map(|blobs| (blobs, seen_timestamp)) + .map_err(|e| (e.into(), request.resolve())), + Ok(None) => return None, + Err(e) => Err((e.into(), request.resolve())), } + } + RpcEvent::StreamTermination => match request.remove().terminate() { + Ok(_) => return None, + // (err, false = not resolved) because terminate returns Ok() if resolved + Err(e) => Err((e.into(), false)), }, - RpcEvent::StreamTermination => { - // Stream terminator - match request.remove().terminate() { - Some(blobs) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, timestamp_now())) - .map_err(Into::into), - None => return None, + RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), + }; + + match resp { + Ok(resp) => Some(Ok(resp)), + // Track if this request has already returned some value downstream. Ensure that + // downstream code only receives a single Result per request. If the serving peer does + // multiple penalizable actions per request, downscore and return None. This allows to + // catch if a peer is returning more blobs than requested or if the excess blobs are + // invalid. + Err((e, resolved)) => { + if let RpcResponseError::VerifyError(e) = &e { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); + } + if resolved { + None + } else { + Some(Err(e)) } } - RpcEvent::RPCError(e) => { - request.remove(); - Err(e.into()) - } - }) + } + } + + pub fn send_block_for_processing( + &self, + id: Id, + block_root: Hash256, + block: RpcBlock, + duration: Duration, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(self.log, "Sending block for processing"; "block" => ?block_root, "id" => id); + beacon_processor + .send_rpc_beacon_block( + block_root, + block, + duration, + BlockProcessType::SingleBlock { id }, + ) + .map_err(|e| { + error!( + self.log, + "Failed to send sync block to processor"; + "error" => ?e + ); + SendErrorProcessor::SendError + }) + } + + pub fn send_blobs_for_processing( + &self, + id: Id, + block_root: Hash256, + blobs: FixedBlobSidecarList, + duration: Duration, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(self.log, "Sending blobs for processing"; "block" => ?block_root, "id" => id); + beacon_processor + .send_rpc_blobs( + block_root, + blobs, + duration, + BlockProcessType::SingleBlob { id }, + ) + .map_err(|e| { + error!( + self.log, + "Failed to send sync blobs to processor"; + "error" => ?e + ); + SendErrorProcessor::SendError + }) } } diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 0522b7fa38..cd73b4beba 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -9,6 +9,7 @@ use types::{ #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { NoResponseReturned, + NotEnoughResponsesReturned { expected: usize, actual: usize }, TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedBlobIndex(u64), @@ -139,11 +140,20 @@ impl ActiveBlobsByRootRequest { } } - pub fn terminate(self) -> Option>>> { + pub fn terminate(self) -> Result<(), LookupVerifyError> { if self.resolved { - None + Ok(()) } else { - Some(self.blobs) + Err(LookupVerifyError::NotEnoughResponsesReturned { + expected: self.request.indices.len(), + actual: self.blobs.len(), + }) } } + + /// Mark request as resolved (= has returned something downstream) while marking this status as + /// true for future calls. + pub fn resolve(&mut self) -> bool { + std::mem::replace(&mut self.resolved, true) + } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index c60cdb2cc9..63cafa9aca 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -174,30 +174,8 @@ impl SyncingChain { /// Removes a peer from the chain. /// If the peer has active batches, those are considered failed and re-requested. - pub fn remove_peer( - &mut self, - peer_id: &PeerId, - network: &mut SyncNetworkContext, - ) -> ProcessingResult { - if let Some(batch_ids) = self.peers.remove(peer_id) { - // fail the batches - for id in batch_ids { - if let Some(batch) = self.batches.get_mut(&id) { - if let BatchOperationOutcome::Failed { blacklist } = - batch.download_failed(true)? - { - return Err(RemoveChain::ChainFailed { - blacklist, - failing_batch: id, - }); - } - self.retry_batch_download(network, id)?; - } else { - debug!(self.log, "Batch not found while removing peer"; - "peer" => %peer_id, "batch" => id) - } - } - } + pub fn remove_peer(&mut self, peer_id: &PeerId) -> ProcessingResult { + self.peers.remove(peer_id); if self.peers.is_empty() { Err(RemoveChain::EmptyPeerPool) @@ -945,7 +923,7 @@ impl SyncingChain { Err(e) => { // NOTE: under normal conditions this shouldn't happen but we handle it anyway warn!(self.log, "Could not send batch request"; - "batch_id" => batch_id, "error" => e, &batch); + "batch_id" => batch_id, "error" => ?e, &batch); // register the failed download and check if the batch can be retried batch.start_downloading_from_peer(peer, 1)?; // fake request_id is not relevant self.peers diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index c8e8266684..fe48db35b4 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -278,9 +278,8 @@ where /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain. fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { - for (removed_chain, sync_type, remove_reason) in self - .chains - .call_all(|chain| chain.remove_peer(peer_id, network)) + for (removed_chain, sync_type, remove_reason) in + self.chains.call_all(|chain| chain.remove_peer(peer_id)) { self.on_chain_removed( removed_chain, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 818cdbd460..dbfda2d530 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,12 +1,16 @@ -use clap::{App, Arg, ArgGroup}; +use std::time::Duration; + +use clap::{builder::ArgPredicate, crate_version, Arg, ArgAction, ArgGroup, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use strum::VariantNames; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("beacon_node") - .visible_aliases(&["b", "bn", "beacon"]) +pub fn cli_app() -> Command { + Command::new("beacon_node") + .display_order(0) + .visible_aliases(["b", "bn", "beacon"]) .version(crate_version!()) .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("The primary component which connects to the Ethereum 2.0 P2P network and \ downloads, verifies and stores blocks. Provides a HTTP API for querying \ the beacon chain and publishing messages to the network.") @@ -14,68 +18,91 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { * Configuration directory locations. */ .arg( - Arg::with_name("network-dir") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("network-dir") .long("network-dir") .value_name("DIR") .help("Data directory for network keys. Defaults to network/ inside the beacon node \ dir.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("freezer-dir") + Arg::new("freezer-dir") .long("freezer-dir") .value_name("DIR") .help("Data directory for the freezer database.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("blobs-dir") + Arg::new("blobs-dir") .long("blobs-dir") .value_name("DIR") .help("Data directory for the blobs database.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Network parameters. */ .arg( - Arg::with_name("subscribe-all-subnets") + Arg::new("subscribe-all-subnets") .long("subscribe-all-subnets") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Subscribe to all subnets regardless of validator count. \ This will also advertise the beacon node as being long-lived subscribed to all subnets.") - .takes_value(false), + .display_order(0) ) .arg( - Arg::with_name("import-all-attestations") + Arg::new("import-all-attestations") .long("import-all-attestations") .help("Import and aggregate all attestations, regardless of validator subscriptions. \ This will only import attestations from already-subscribed subnets, use with \ --subscribe-all-subnets to ensure all attestations are received for import.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("disable-packet-filter") + Arg::new("disable-packet-filter") .long("disable-packet-filter") .help("Disables the discovery packet filter. Useful for testing in smaller networks") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("shutdown-after-sync") + Arg::new("shutdown-after-sync") .long("shutdown-after-sync") .help("Shutdown beacon node as soon as sync is completed. Backfill sync will \ not be performed before shutdown.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("zero-ports") + Arg::new("zero-ports") .long("zero-ports") - .short("z") + .short('z') .help("Sets all listening TCP/UDP ports to 0, allowing the OS to choose some \ arbitrary free ports.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. To listen \ @@ -86,13 +113,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ IPv4 and IPv6. The order of the given addresses is not relevant. However, \ multiple IPv4, or multiple IPv6 addresses will not be accepted.") - .multiple(true) - .max_values(2) + .action(ArgAction::Append) + .num_args(0..=2) .default_value("0.0.0.0") - .takes_value(true) + .display_order(0) ) .arg( - Arg::with_name("port") + Arg::new("port") .long("port") .value_name("PORT") .help("The TCP/UDP ports to listen on. There are two UDP ports. \ @@ -100,134 +127,153 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { --discovery-port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 and IPv6 the --port flag \ will apply to the IPv4 address and --port6 to the IPv6 address.") .default_value("9000") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("port6") + Arg::new("port6") .long("port6") .value_name("PORT") .help("The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and \ IPv6. Defaults to 9090 when required. The Quic UDP port will be set to this value + 1.") .default_value("9090") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("discovery-port") + Arg::new("discovery-port") .long("discovery-port") .value_name("PORT") .help("The UDP port that discovery will listen on. Defaults to `port`") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("quic-port") + Arg::new("quic-port") .long("quic-port") .value_name("PORT") .help("The UDP port that quic will listen on. Defaults to `port` + 1") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("discovery-port6") + Arg::new("discovery-port6") .long("discovery-port6") .value_name("PORT") .help("The UDP port that discovery will listen on over IPv6 if listening over \ both IPv4 and IPv6. Defaults to `port6`") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("quic-port6") + Arg::new("quic-port6") .long("quic-port6") .value_name("PORT") .help("The UDP port that quic will listen on over IPv6 if listening over \ both IPv4 and IPv6. Defaults to `port6` + 1") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("target-peers") + Arg::new("target-peers") .long("target-peers") .help("The target number of peers.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("boot-nodes") + Arg::new("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) .value_name("ENR/MULTIADDR LIST") .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("network-load") + Arg::new("network-load") .long("network-load") .value_name("INTEGER") .help("Lighthouse's network can be tuned for bandwidth/performance. Setting this to a high value, will increase the bandwidth lighthouse uses, increasing the likelihood of redundant information in exchange for faster communication. This can increase profit of validators marginally by receiving messages faster on the network. Lower values decrease bandwidth usage, but makes communication slower which can lead to validator performance reduction. Values are in the range [1,5].") .default_value("3") - .set(clap::ArgSettings::Hidden) - .takes_value(true), + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-upnp") + Arg::new("disable-upnp") .long("disable-upnp") .help("Disables UPnP support. Setting this will prevent Lighthouse from attempting to automatically establish external port mappings.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("private") + Arg::new("private") .long("private") .help("Prevents sending various client identification information.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("enr-udp-port") + Arg::new("enr-udp-port") .long("enr-udp-port") .value_name("PORT") .help("The UDP4 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-quic-port") + Arg::new("enr-quic-port") .long("enr-quic-port") .value_name("PORT") .help("The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-udp6-port") + Arg::new("enr-udp6-port") .long("enr-udp6-port") .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-quic6-port") + Arg::new("enr-quic6-port") .long("enr-quic6-port") .value_name("PORT") .help("The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-tcp-port") + Arg::new("enr-tcp-port") .long("enr-tcp-port") .value_name("PORT") .help("The TCP4 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4. The --port flag is \ used if this is not set.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-tcp6-port") + Arg::new("enr-tcp6-port") .long("enr-tcp6-port") .value_name("PORT") .help("The TCP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6. The --port6 flag is \ used if this is not set.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-address") + Arg::new("enr-address") .long("enr-address") .value_name("ADDRESS") .help("The IP address/ DNS address to broadcast to other peers on how to reach \ @@ -236,76 +282,110 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { discovery. Set this only if you are sure other nodes can connect to your \ local node on this address. This will update the `ip4` or `ip6` ENR fields \ accordingly. To update both, set this flag twice with the different values.") - .multiple(true) - .max_values(2) - .takes_value(true), + .action(ArgAction::Append) + .num_args(1..=2) + .display_order(0) ) .arg( - Arg::with_name("enr-match") - .short("e") + Arg::new("enr-match") + .short('e') .long("enr-match") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Sets the local ENR IP address and port to match those set for lighthouse. \ Specifically, the IP address will be the value of --listen-address and the \ UDP port will be --discovery-port.") + .display_order(0) ) .arg( - Arg::with_name("disable-enr-auto-update") - .short("x") + Arg::new("disable-enr-auto-update") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("disable-enr-auto-update") .help("Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. \ - This disables this feature, fixing the ENR's IP/PORT to those specified on boot."), + This disables this feature, fixing the ENR's IP/PORT to those specified on boot.") + .display_order(0) ) .arg( - Arg::with_name("libp2p-addresses") + Arg::new("libp2p-addresses") .long("libp2p-addresses") .value_name("MULTIADDR") .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer \ without an ENR.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) - // NOTE: This is hidden because it is primarily a developer feature for testnets and + // NOTE: This is hide because it is primarily a developer feature for testnets and // debugging. We remove it from the list to avoid clutter. .arg( - Arg::with_name("disable-discovery") + Arg::new("disable-discovery") .long("disable-discovery") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("disable-quic") + Arg::new("disable-quic") .long("disable-quic") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disables the quic transport. The node will rely solely on the TCP transport for libp2p connections.") + .display_order(0) ) .arg( - Arg::with_name("disable-peer-scoring") + Arg::new("disable-peer-scoring") .long("disable-peer-scoring") .help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \ Using this flag on a real network may cause your node to become eclipsed and see a different view of the network") - .takes_value(false) - .hidden(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("trusted-peers") + Arg::new("trusted-peers") .long("trusted-peers") .value_name("TRUSTED_PEERS") .help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) + .display_order(0) ) .arg( - Arg::with_name("genesis-backfill") + Arg::new("genesis-backfill") .long("genesis-backfill") .help("Attempts to download blocks all the way back to genesis when checkpoint syncing.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("enable-private-discovery") + Arg::new("enable-private-discovery") .long("enable-private-discovery") .help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("self-limiter") + Arg::new("self-limiter") .long("self-limiter") + .help( + "Enables the outbound rate limiter (requests made by this node). \ + Use the self-limiter-protocol flag to set per protocol configurations. \ + If the self rate limiter is enabled and a protocol is not \ + present in the configuration, the quotas used for the inbound rate limiter will be \ + used." + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("self-limiter-protocols") + .long("self-limiter-protocols") .help( "Enables the outbound rate limiter (requests made by this node).\ \ @@ -315,69 +395,89 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { present in the configuration, the quotas used for the inbound rate limiter will be \ used." ) - .min_values(0) - .hidden(true) + .action(ArgAction::Append) + .value_delimiter(';') + .requires("self-limiter") + .display_order(0) ) .arg( - Arg::with_name("proposer-only") + Arg::new("proposer-only") .long("proposer-only") .help("Sets this beacon node at be a block proposer only node. \ This will run the beacon node in a minimal configuration that is sufficient for block publishing only. This flag should be used \ for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("inbound-rate-limiter") - .long("inbound-rate-limiter") + Arg::new("disable-inbound-rate-limiter") + .long("disable-inbound-rate-limiter") + .help( + "Disables the inbound rate limiter (requests received by this node)." + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("inbound-rate-limiter-protocols") + .long("inbound-rate-limiter-protocols") .help( "Configures the inbound rate limiter (requests received by this node).\ \ Rate limit quotas per protocol can be set in the form of \ :/. To set quotas for multiple protocols, \ - separate them by ';'. If the inbound rate limiter is enabled and a protocol is not \ - present in the configuration, the default quotas will be used. \ + separate them by ';'. \ \ - This is enabled by default, using default quotas. To disable rate limiting pass \ - `disabled` to this option instead." + This is enabled by default, using default quotas. To disable rate limiting use \ + the disable-inbound-rate-limiter flag instead." ) - .takes_value(true) - .hidden(true) + .action(ArgAction::Set) + .conflicts_with("disable-inbound-rate-limiter") + .display_order(0) ) .arg( - Arg::with_name("disable-backfill-rate-limiting") + Arg::new("disable-backfill-rate-limiting") .long("disable-backfill-rate-limiting") .help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \ as possible, however it can result in resource contention which degrades staking performance. Stakers \ should generally choose to avoid this flag since backfill sync is not required for staking.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* REST API related arguments */ .arg( - Arg::with_name("http") + Arg::new("http") .long("http") .help("Enable the RESTful HTTP API server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("http-address") + Arg::new("http-address") .long("http-address") .requires("enable_http") .value_name("ADDRESS") .help("Set the listen address for the RESTful HTTP API server.") - .default_value_if("enable_http", None, "127.0.0.1") - .takes_value(true), + .default_value_if("enable_http", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-port") + Arg::new("http-port") .long("http-port") .requires("enable_http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("enable_http", None, "5052") - .takes_value(true), + .default_value_if("enable_http", ArgPredicate::IsPresent, "5052") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-origin") + Arg::new("http-allow-origin") .long("http-allow-origin") .requires("enable_http") .value_name("ORIGIN") @@ -385,71 +485,82 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5052).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-spec-fork") + Arg::new("http-spec-fork") .long("http-spec-fork") .requires("enable_http") .value_name("FORK") .help("This flag is deprecated and has no effect.") - .takes_value(true) - .hidden(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-enable-tls") + Arg::new("http-enable-tls") .long("http-enable-tls") .help("Serves the RESTful HTTP API server over TLS. This feature is currently \ experimental.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .requires("http-tls-cert") .requires("http-tls-key") + .display_order(0) ) .arg( - Arg::with_name("http-tls-cert") + Arg::new("http-tls-cert") .long("http-tls-cert") .requires("enable_http") .help("The path of the certificate to be used when serving the HTTP API server \ over TLS.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-tls-key") + Arg::new("http-tls-key") .long("http-tls-key") .requires("enable_http") .help("The path of the private key to be used when serving the HTTP API server \ over TLS. Must not be password-protected.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-sync-stalled") + Arg::new("http-allow-sync-stalled") .long("http-allow-sync-stalled") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .requires("enable_http") .help("This flag is deprecated and has no effect.") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("http-sse-capacity-multiplier") + Arg::new("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") .requires("enable_http") - .takes_value(true) - .default_value_if("enable_http", None, "1") + .action(ArgAction::Set) + .default_value_if("enable_http", ArgPredicate::IsPresent, "1") .value_name("N") .help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \ Increasing this value can prevent messages from being dropped.") + .display_order(0) ) .arg( - Arg::with_name("http-duplicate-block-status") + Arg::new("http-duplicate-block-status") .long("http-duplicate-block-status") .requires("enable_http") - .takes_value(true) - .default_value_if("enable_http", None, "202") + .action(ArgAction::Set) + .default_value_if("enable_http", ArgPredicate::IsPresent, "202") .value_name("STATUS_CODE") .help("Status code to send when a block that is already known is POSTed to the \ HTTP API.") + .display_order(0) ) .arg( - Arg::with_name("http-enable-beacon-processor") + Arg::new("http-enable-beacon-processor") .long("http-enable-beacon-processor") .requires("enable_http") .value_name("BOOLEAN") @@ -457,36 +568,41 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { DoS protection. When set to \"true\", HTTP API requests will be queued and scheduled \ alongside other tasks. When set to \"false\", HTTP API responses will be executed \ immediately.") - .takes_value(true) - .default_value_if("enable_http", None, "true") + .action(ArgAction::Set) + .display_order(0) + .default_value_if("enable_http", ArgPredicate::IsPresent, "true") ) /* Prometheus metrics HTTP server related arguments */ .arg( - Arg::with_name("metrics") + Arg::new("metrics") .long("metrics") .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("metrics-address") + Arg::new("metrics-address") .long("metrics-address") .value_name("ADDRESS") .requires("metrics") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "127.0.0.1") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-port") + Arg::new("metrics-port") .long("metrics-port") .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "5054") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "5054") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-allow-origin") + Arg::new("metrics-allow-origin") .long("metrics-allow-origin") .value_name("ORIGIN") .requires("metrics") @@ -494,15 +610,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5054).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("shuffling-cache-size") + Arg::new("shuffling-cache-size") .long("shuffling-cache-size") .help("Some HTTP API requests can be optimised by caching the shufflings at each epoch. \ This flag allows the user to set the shuffling cache size in epochs. \ Shufflings are dependent on validator count and setting this value to a large number can consume a large amount of memory.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* @@ -510,7 +628,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { */ .arg( - Arg::with_name("monitoring-endpoint") + Arg::new("monitoring-endpoint") .long("monitoring-endpoint") .value_name("ADDRESS") .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ @@ -519,16 +637,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ and never provide an untrusted URL.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("monitoring-endpoint-period") + Arg::new("monitoring-endpoint-period") .long("monitoring-endpoint-period") .value_name("SECONDS") .help("Defines how many seconds to wait between each message sent to \ the monitoring-endpoint. Default: 60s") .requires("monitoring-endpoint") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) /* @@ -536,122 +656,143 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { */ .arg( - Arg::with_name("staking") + Arg::new("staking") .long("staking") .help("Standard option for a staking beacon node. This will enable the HTTP server \ on localhost:5052 and import deposit logs from the execution node. This is \ equivalent to `--http` on merge-ready networks, or `--http --eth1` pre-merge") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Eth1 Integration */ .arg( - Arg::with_name("eth1") + Arg::new("eth1") .long("eth1") .help("If present the node will connect to an eth1 node. This is required for \ block production, you must use this flag if you wish to serve a validator.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("dummy-eth1") + Arg::new("dummy-eth1") .long("dummy-eth1") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .conflicts_with("eth1") .help("If present, uses an eth1 backend that generates static dummy data.\ Identical to the method used at the 2019 Canada interop.") + .display_order(0) ) .arg( - Arg::with_name("eth1-purge-cache") + Arg::new("eth1-purge-cache") .long("eth1-purge-cache") .value_name("PURGE-CACHE") .help("Purges the eth1 block and deposit caches") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("eth1-blocks-per-log-query") + Arg::new("eth1-blocks-per-log-query") .long("eth1-blocks-per-log-query") .value_name("BLOCKS") .help("Specifies the number of blocks that a deposit log query should span. \ This will reduce the size of responses from the Eth1 endpoint.") .default_value("1000") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("eth1-cache-follow-distance") + Arg::new("eth1-cache-follow-distance") .long("eth1-cache-follow-distance") .value_name("BLOCKS") .help("Specifies the distance between the Eth1 chain head and the last block which \ should be imported into the cache. Setting this value lower can help \ compensate for irregular Proof-of-Work block times, but setting it too low \ can make the node vulnerable to re-orgs.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slots-per-restore-point") + Arg::new("slots-per-restore-point") .long("slots-per-restore-point") .value_name("SLOT_COUNT") .help("Specifies how often a freezer DB restore point should be stored. \ Cannot be changed after initialization. \ [default: 8192 (mainnet) or 64 (minimal)]") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("epochs-per-migration") + Arg::new("epochs-per-migration") .long("epochs-per-migration") .value_name("N") .help("The number of epochs to wait between running the migration of data from the \ hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \ writes") .default_value("1") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("block-cache-size") + Arg::new("block-cache-size") .long("block-cache-size") .value_name("SIZE") - .help("Specifies how many blocks the database should cache in memory [default: 5]") - .takes_value(true) + .help("Specifies how many blocks the database should cache in memory") + .default_value("5") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("historic-state-cache-size") + Arg::new("historic-state-cache-size") .long("historic-state-cache-size") .value_name("SIZE") - .help("Specifies how many states from the freezer database should cache in memory [default: 1]") - .takes_value(true) + .help("Specifies how many states from the freezer database should cache in memory") + .default_value("1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("state-cache-size") + Arg::new("state-cache-size") .long("state-cache-size") .value_name("STATE_CACHE_SIZE") - .help("Specifies the size of the snapshot cache [default: 3]") - .takes_value(true) + .help("Specifies the size of the state cache") + .default_value("128") + .action(ArgAction::Set) + .display_order(0) ) /* * Execution Layer Integration */ .arg( - Arg::with_name("execution-endpoint") + Arg::new("execution-endpoint") .long("execution-endpoint") .value_name("EXECUTION-ENDPOINT") .alias("execution-endpoints") .help("Server endpoint for an execution layer JWT-authenticated HTTP \ JSON-RPC connection. Uses the same endpoint to populate the \ deposit cache.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt") + Arg::new("execution-jwt") .long("execution-jwt") .value_name("EXECUTION-JWT") .alias("jwt-secrets") .help("File path which contains the hex-encoded JWT secret for the \ execution endpoint provided in the --execution-endpoint flag.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-secret-key") + Arg::new("execution-jwt-secret-key") .long("execution-jwt-secret-key") .value_name("EXECUTION-JWT-SECRET-KEY") .alias("jwt-secret-key") @@ -659,10 +800,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { execution endpoint provided in the --execution-endpoint flag.") .requires("execution-endpoint") .conflicts_with("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-id") + Arg::new("execution-jwt-id") .long("execution-jwt-id") .value_name("EXECUTION-JWT-ID") .alias("jwt-id") @@ -670,10 +812,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ Set to empty by default") .requires("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-version") + Arg::new("execution-jwt-version") .long("execution-jwt-version") .value_name("EXECUTION-JWT-VERSION") .alias("jwt-version") @@ -681,119 +824,162 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ Set to empty by default") .requires("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("suggested-fee-recipient") + Arg::new("suggested-fee-recipient") .long("suggested-fee-recipient") .value_name("SUGGESTED-FEE-RECIPIENT") .help("Emergency fallback fee recipient for use in case the validator client does \ not have one configured. You should set this flag on the validator \ client instead of (or in addition to) setting it here.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder") + Arg::new("builder") .long("builder") .alias("payload-builder") .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-timeout-multiplier") + Arg::new("execution-timeout-multiplier") .long("execution-timeout-multiplier") .value_name("NUM") .help("Unsigned integer to multiply the default execution timeouts by.") .default_value("1") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("builder-header-timeout") + .long("builder-header-timeout") + .value_name("MILLISECONDS") + .help("Defines a timeout value (in milliseconds) to use when \ + fetching a block header from the builder API.") + .default_value("1000") + .value_parser(|timeout: &str| { + match timeout + .parse::() + .ok() + .map(Duration::from_millis) + { + Some(val) => { + if val > Duration::from_secs(3) { + return Err("builder-header-timeout cannot exceed 3000ms") + } + Ok(timeout.to_string()) + }, + None => Err("builder-header-timeout must be a number"), + } + }) + .requires("builder") + .action(ArgAction::Set) + .display_order(0) ) /* Deneb settings */ .arg( - Arg::with_name("trusted-setup-file-override") + Arg::new("trusted-setup-file-override") .long("trusted-setup-file-override") .value_name("FILE") .help("Path to a json file containing the trusted setup params. \ NOTE: This will override the trusted setup that is generated \ from the mainnet kzg ceremony. Use with caution") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Database purging and compaction. */ .arg( - Arg::with_name("purge-db") + Arg::new("purge-db") .long("purge-db") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, the chain database will be deleted. Use with caution.") + .display_order(0) ) .arg( - Arg::with_name("compact-db") + Arg::new("compact-db") .long("compact-db") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, apply compaction to the database on start-up. Use with caution. \ It is generally not recommended unless auto-compaction is disabled.") + .display_order(0) ) .arg( - Arg::with_name("auto-compact-db") + Arg::new("auto-compact-db") .long("auto-compact-db") .help("Enable or disable automatic compaction of the database on finalization.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("prune-payloads") + Arg::new("prune-payloads") .long("prune-payloads") .help("Prune execution payloads from Lighthouse's database. This saves space but \ imposes load on the execution client, as payloads need to be \ reconstructed and sent to syncing peers.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("prune-blobs") + Arg::new("prune-blobs") .long("prune-blobs") .value_name("BOOLEAN") .help("Prune blobs from Lighthouse's database when they are older than the data \ data availability boundary relative to the current epoch.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("epochs-per-blob-prune") + Arg::new("epochs-per-blob-prune") .long("epochs-per-blob-prune") .value_name("EPOCHS") .help("The epoch interval with which to prune blobs from Lighthouse's \ database when they are older than the data availability boundary \ relative to the current epoch.") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") + .display_order(0) ) .arg( - Arg::with_name("blob-prune-margin-epochs") + Arg::new("blob-prune-margin-epochs") .long("blob-prune-margin-epochs") .value_name("EPOCHS") .help("The margin for blob pruning in epochs. The oldest blobs are pruned \ up until data_availability_boundary - blob_prune_margin_epochs.") - .takes_value(true) + .action(ArgAction::Set) .default_value("0") + .display_order(0) ) /* * Misc. */ .arg( - Arg::with_name("graffiti") + Arg::new("graffiti") .long("graffiti") .help( "Specify your custom graffiti to be included in blocks. \ Defaults to the current version and commit, truncated to fit in 32 bytes. " ) .value_name("GRAFFITI") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("max-skip-slots") + Arg::new("max-skip-slots") .long("max-skip-slots") .help( "Refuse to skip more than this many slots when processing an attestation. \ @@ -801,43 +987,48 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { but could also cause unnecessary consensus failures, so is disabled by default." ) .value_name("NUM_SLOTS") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Slasher. */ .arg( - Arg::with_name("slasher") + Arg::new("slasher") .long("slasher") .help( "Run a slasher alongside the beacon node. It is currently only recommended for \ expert users because of the immaturity of the slasher UX and the extra \ resources required." ) - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("slasher-dir") + Arg::new("slasher-dir") .long("slasher-dir") .help( "Set the slasher's database directory." ) .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .requires("slasher") + .display_order(0) ) .arg( - Arg::with_name("slasher-update-period") + Arg::new("slasher-update-period") .long("slasher-update-period") .help( "Configure how often the slasher runs batch processing." ) .value_name("SECONDS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-slot-offset") + Arg::new("slasher-slot-offset") .long("slasher-slot-offset") .help( "Set the delay from the start of the slot at which the slasher should ingest \ @@ -846,10 +1037,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .value_name("SECONDS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-history-length") + Arg::new("slasher-history-length") .long("slasher-history-length") .help( "Configure how many epochs of history the slasher keeps. Immutable after \ @@ -857,65 +1049,72 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .value_name("EPOCHS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-max-db-size") + Arg::new("slasher-max-db-size") .long("slasher-max-db-size") .help( "Maximum size of the MDBX database used by the slasher." ) .value_name("GIGABYTES") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-att-cache-size") + Arg::new("slasher-att-cache-size") .long("slasher-att-cache-size") .help("Set the maximum number of attestation roots for the slasher to cache") .value_name("COUNT") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-chunk-size") + Arg::new("slasher-chunk-size") .long("slasher-chunk-size") .help( "Number of epochs per validator per chunk stored on disk." ) .value_name("EPOCHS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-validator-chunk-size") + Arg::new("slasher-validator-chunk-size") .long("slasher-validator-chunk-size") .help( "Number of validators per chunk stored on disk." ) .value_name("NUM_VALIDATORS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-broadcast") + Arg::new("slasher-broadcast") .long("slasher-broadcast") .help("Broadcast slashings found by the slasher to the rest of the network \ [Enabled by default].") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("slasher-backend") + Arg::new("slasher-backend") .long("slasher-backend") .value_name("DATABASE") .help("Set the database backend to be used by the slasher.") - .takes_value(true) - .possible_values(slasher::DatabaseBackend::VARIANTS) + .action(ArgAction::Set) + .value_parser(slasher::DatabaseBackend::VARIANTS.to_vec()) .requires("slasher") + .display_order(0) ) .arg( - Arg::with_name("wss-checkpoint") + Arg::new("wss-checkpoint") .long("wss-checkpoint") .help( "Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify \ @@ -924,94 +1123,109 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { state use --checkpoint-sync-url." ) .value_name("WSS_CHECKPOINT") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("checkpoint-state") + Arg::new("checkpoint-state") .long("checkpoint-state") .help("Set a checkpoint state to start syncing from. Must be aligned and match \ --checkpoint-block. Using --checkpoint-sync-url instead is recommended.") .value_name("STATE_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-block") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-block") + Arg::new("checkpoint-block") .long("checkpoint-block") .help("Set a checkpoint block to start syncing from. Must be aligned and match \ --checkpoint-state. Using --checkpoint-sync-url instead is recommended.") .value_name("BLOCK_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-state") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-blobs") + Arg::new("checkpoint-blobs") .long("checkpoint-blobs") .help("Set the checkpoint blobs to start syncing from. Must be aligned and match \ --checkpoint-block. Using --checkpoint-sync-url instead is recommended.") .value_name("BLOBS_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-block") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-sync-url") + Arg::new("checkpoint-sync-url") .long("checkpoint-sync-url") .help("Set the remote beacon node HTTP endpoint to use for checkpoint sync.") .value_name("BEACON_NODE") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("checkpoint-state") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-sync-url-timeout") + Arg::new("checkpoint-sync-url-timeout") .long("checkpoint-sync-url-timeout") .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") .value_name("SECONDS") - .takes_value(true) + .action(ArgAction::Set) .default_value("180") + .display_order(0) ) .arg( - Arg::with_name("allow-insecure-genesis-sync") + Arg::new("allow-insecure-genesis-sync") .long("allow-insecure-genesis-sync") .help("Enable syncing from genesis, which is generally insecure and incompatible with data availability checks. \ Checkpoint syncing is the preferred method for syncing a node. \ Only use this flag when testing. DO NOT use on mainnet!") .conflicts_with("checkpoint-sync-url") .conflicts_with("checkpoint-state") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("reconstruct-historic-states") + Arg::new("reconstruct-historic-states") .long("reconstruct-historic-states") .help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-auto") + Arg::new("validator-monitor-auto") .long("validator-monitor-auto") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Enables the automatic detection and monitoring of validators connected to the \ HTTP API and using the subnet subscription endpoint. This generally has the \ effect of providing additional logging and metrics for locally controlled \ validators.") + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-pubkeys") + Arg::new("validator-monitor-pubkeys") .long("validator-monitor-pubkeys") .help("A comma-separated list of 0x-prefixed validator public keys. \ These validators will receive special monitoring and additional \ logging.") .value_name("PUBKEYS") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-file") + Arg::new("validator-monitor-file") .long("validator-monitor-file") .help("As per --validator-monitor-pubkeys, but the comma-separated list is \ contained within a file at the given path.") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-individual-tracking-threshold") + Arg::new("validator-monitor-individual-tracking-threshold") .long("validator-monitor-individual-tracking-threshold") .help("Once the validator monitor reaches this number of local validators \ it will stop collecting per-validator Prometheus metrics and issuing \ @@ -1019,59 +1233,73 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { This avoids infeasibly high cardinality in the Prometheus database and \ high log volume when using many validators. Defaults to 64.") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-lock-timeouts") + Arg::new("disable-lock-timeouts") .long("disable-lock-timeouts") .help("Disable the timeouts applied to some internal locks by default. This can \ lead to less spurious failures on slow hardware but is considered \ experimental as it may obscure performance issues.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-proposer-reorgs") + Arg::new("disable-proposer-reorgs") .long("disable-proposer-reorgs") .help("Do not attempt to reorg late blocks from other validators when proposing.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-threshold") + Arg::new("proposer-reorg-threshold") .long("proposer-reorg-threshold") + .action(ArgAction::Set) .value_name("PERCENT") .help("Percentage of head vote weight below which to attempt a proposer reorg. \ Default: 20%") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-parent-threshold") + Arg::new("proposer-reorg-parent-threshold") .long("proposer-reorg-parent-threshold") .value_name("PERCENT") .help("Percentage of parent vote weight above which to attempt a proposer reorg. \ Default: 160%") .conflicts_with("disable-proposer-reorgs") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-epochs-since-finalization") + Arg::new("proposer-reorg-epochs-since-finalization") .long("proposer-reorg-epochs-since-finalization") + .action(ArgAction::Set) .value_name("EPOCHS") .help("Maximum number of epochs since finalization at which proposer reorgs are \ allowed. Default: 2") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-cutoff") + Arg::new("proposer-reorg-cutoff") .long("proposer-reorg-cutoff") .value_name("MILLISECONDS") + .action(ArgAction::Set) .help("Maximum delay after the start of the slot at which to propose a reorging \ block. Lower values can prevent failed reorgs by ensuring the block has \ ample time to propagate and be processed by the network. The default is \ 1/12th of a slot (1 second on mainnet)") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-disallowed-offsets") + Arg::new("proposer-reorg-disallowed-offsets") .long("proposer-reorg-disallowed-offsets") + .action(ArgAction::Set) .value_name("N1,N2,...") .help("Comma-separated list of integer offsets which can be used to avoid \ proposing reorging blocks at certain slots. An offset of N means that \ @@ -1080,66 +1308,75 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { avoided. Any offsets supplied with this flag will impose additional \ restrictions.") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("prepare-payload-lookahead") + Arg::new("prepare-payload-lookahead") .long("prepare-payload-lookahead") .value_name("MILLISECONDS") .help("The time before the start of a proposal slot at which payload attributes \ should be sent. Low values are useful for execution nodes which don't \ improve their payload after the first call, and high values are useful \ for ensuring the EL is given ample notice. Default: 1/3 of a slot.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("always-prepare-payload") + Arg::new("always-prepare-payload") .long("always-prepare-payload") .help("Send payload attributes with every fork choice update. This is intended for \ use by block builders, relays and developers. You should set a fee \ recipient on this BN and also consider adjusting the \ --prepare-payload-lookahead flag.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("fork-choice-before-proposal-timeout") + Arg::new("fork-choice-before-proposal-timeout") .long("fork-choice-before-proposal-timeout") .help("Set the maximum number of milliseconds to wait for fork choice before \ proposing a block. You can prevent waiting at all by setting the timeout \ to 0, however you risk proposing atop the wrong parent block.") .default_value("250") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("paranoid-block-proposal") + Arg::new("paranoid-block-proposal") .long("paranoid-block-proposal") .help("Paranoid enough to be reading the source? Nice. This flag reverts some \ block proposal optimisations and forces the node to check every attestation \ it includes super thoroughly. This may be useful in an emergency, but not \ otherwise.") - .hidden(true) - .takes_value(false) + .hide(true) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-skips") + Arg::new("builder-fallback-skips") .long("builder-fallback-skips") .help("If this node is proposing a block and has seen this number of skip slots \ on the canonical chain in a row, it will NOT query any connected builders, \ and will use the local execution engine for payload construction.") .default_value("3") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-skips-per-epoch") + Arg::new("builder-fallback-skips-per-epoch") .long("builder-fallback-skips-per-epoch") .help("If this node is proposing a block and has seen this number of skip slots \ on the canonical chain in the past `SLOTS_PER_EPOCH`, it will NOT query \ any connected builders, and will use the local execution engine for \ payload construction.") .default_value("8") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-epochs-since-finalization") + Arg::new("builder-fallback-epochs-since-finalization") .long("builder-fallback-epochs-since-finalization") .help("If this node is proposing a block and the chain has not finalized within \ this number of epochs, it will NOT query any connected builders, \ @@ -1149,152 +1386,180 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { if there are skips slots at the start of an epoch, right before this node \ is set to propose.") .default_value("3") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-disable-checks") + Arg::new("builder-fallback-disable-checks") .long("builder-fallback-disable-checks") .help("This flag disables all checks related to chain health. This means the builder \ API will always be used for payload construction, regardless of recent chain \ conditions.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-profit-threshold") + Arg::new("builder-profit-threshold") .long("builder-profit-threshold") .value_name("WEI_VALUE") .help("This flag is deprecated and has no effect.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-user-agent") + Arg::new("builder-user-agent") .long("builder-user-agent") .value_name("STRING") .help("The HTTP user agent to send alongside requests to the builder URL. The \ default is Lighthouse's version string.") .requires("builder") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("reset-payload-statuses") + Arg::new("reset-payload-statuses") .long("reset-payload-statuses") .help("When present, Lighthouse will forget the payload statuses of any \ already-imported blocks. This can assist in the recovery from a consensus \ failure caused by the execution layer.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-deposit-contract-sync") + Arg::new("disable-deposit-contract-sync") .long("disable-deposit-contract-sync") .help("Explicitly disables syncing of deposit logs from the execution node. \ This overrides any previous option that depends on it. \ Useful if you intend to run a non-validating beacon node.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-optimistic-finalized-sync") + Arg::new("disable-optimistic-finalized-sync") .long("disable-optimistic-finalized-sync") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Force Lighthouse to verify every execution block hash with the execution \ client during finalized sync. By default block hashes will be checked in \ Lighthouse and only passed to the EL if initial verification fails.") + .display_order(0) ) .arg( - Arg::with_name("light-client-server") + Arg::new("light-client-server") .long("light-client-server") .help("Act as a full node supporting light clients on the p2p network \ [experimental]") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("gui") + Arg::new("gui") .long("gui") .help("Enable the graphical user interface and all its requirements. \ This enables --http and --validator-monitor-auto and enables SSE logging.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("always-prefer-builder-payload") + Arg::new("always-prefer-builder-payload") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("always-prefer-builder-payload") .help("This flag is deprecated and has no effect.") + .display_order(0) ) .arg( - Arg::with_name("invalid-gossip-verified-blocks-path") + Arg::new("invalid-gossip-verified-blocks-path") + .action(ArgAction::Set) .long("invalid-gossip-verified-blocks-path") .value_name("PATH") .help("If a block succeeds gossip validation whilst failing full validation, store \ the block SSZ as a file at this path. This feature is only recommended for \ developers. This directory is not pruned, users should be careful to avoid \ filling up their disks.") + .display_order(0) ) .arg( - Arg::with_name("progressive-balances") + Arg::new("progressive-balances") .long("progressive-balances") .value_name("MODE") .help("Deprecated. This optimisation is now the default and cannot be disabled.") - .takes_value(true) - .possible_values(&["fast", "disabled", "checked", "strict"]) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-max-workers") + Arg::new("beacon-processor-max-workers") .long("beacon-processor-max-workers") .value_name("INTEGER") .help("Specifies the maximum concurrent tasks for the task scheduler. Increasing \ this value may increase resource consumption. Reducing the value \ may result in decreased resource usage and diminished performance. The \ default value is the number of logical CPU cores on the host.") - .hidden(true) - .takes_value(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-work-queue-len") + Arg::new("beacon-processor-work-queue-len") .long("beacon-processor-work-queue-len") .value_name("INTEGER") .help("Specifies the length of the inbound event queue. \ Higher values may prevent messages from being dropped while lower values \ may help protect the node from becoming overwhelmed.") .default_value("16384") - .hidden(true) - .takes_value(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-reprocess-queue-len") + Arg::new("beacon-processor-reprocess-queue-len") .long("beacon-processor-reprocess-queue-len") .value_name("INTEGER") .help("Specifies the length of the queue for messages requiring delayed processing. \ Higher values may prevent messages from being dropped while lower values \ may help protect the node from becoming overwhelmed.") - .hidden(true) + .hide(true) .default_value("12288") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-attestation-batch-size") + Arg::new("beacon-processor-attestation-batch-size") .long("beacon-processor-attestation-batch-size") .value_name("INTEGER") .help("Specifies the number of gossip attestations in a signature verification batch. \ Higher values may reduce CPU usage in a healthy network whilst lower values may \ increase CPU usage in an unhealthy or hostile network.") - .hidden(true) + .hide(true) .default_value("64") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-aggregate-batch-size") + Arg::new("beacon-processor-aggregate-batch-size") .long("beacon-processor-aggregate-batch-size") .value_name("INTEGER") .help("Specifies the number of gossip aggregate attestations in a signature \ verification batch. \ Higher values may reduce CPU usage in a healthy network while lower values may \ increase CPU usage in an unhealthy or hostile network.") - .hidden(true) + .hide(true) .default_value("64") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-duplicate-warn-logs") + Arg::new("disable-duplicate-warn-logs") .long("disable-duplicate-warn-logs") .help("This flag is deprecated and has no effect.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) - .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) + .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9a1d7df124..35fad0718c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -5,9 +5,9 @@ use beacon_chain::chain_config::{ }; use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; -use clap::ArgMatches; +use clap::{parser::ValueSource, ArgMatches, Id}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; -use clap_utils::parse_required; +use clap_utils::{parse_flag, parse_required}; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; @@ -50,7 +50,7 @@ pub fn get_config( client_config.set_data_dir(get_data_dir(cli_args)); // If necessary, remove any existing database and configuration - if client_config.data_dir().exists() && cli_args.is_present("purge-db") { + if client_config.data_dir().exists() && cli_args.get_flag("purge-db") { // Remove the chain_db. let chain_db = client_config.get_db_path(); if chain_db.exists() { @@ -96,7 +96,7 @@ pub fn get_config( * Note: the config values set here can be overwritten by other more specific cli params */ - if cli_args.is_present("staking") { + if cli_args.get_flag("staking") { client_config.http_api.enabled = true; client_config.sync_eth1_chain = true; } @@ -105,22 +105,22 @@ pub fn get_config( * Http API server */ - if cli_args.is_present("enable_http") { + if cli_args.get_one::("enable_http").is_some() { client_config.http_api.enabled = true; - if let Some(address) = cli_args.value_of("http-address") { + if let Some(address) = cli_args.get_one::("http-address") { client_config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("http-port") { + if let Some(port) = cli_args.get_one::("http-port") { client_config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -129,7 +129,7 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-spec-fork") { + if cli_args.get_one::("http-spec-fork").is_some() { warn!( log, "Ignoring --http-spec-fork"; @@ -137,22 +137,22 @@ pub fn get_config( ); } - if cli_args.is_present("http-enable-tls") { + if cli_args.get_flag("http-enable-tls") { client_config.http_api.tls_config = Some(TlsConfig { cert: cli_args - .value_of("http-tls-cert") + .get_one::("http-tls-cert") .ok_or("--http-tls-cert was not provided.")? .parse::() .map_err(|_| "http-tls-cert is not a valid path name.")?, key: cli_args - .value_of("http-tls-key") + .get_one::("http-tls-key") .ok_or("--http-tls-key was not provided.")? .parse::() .map_err(|_| "http-tls-key is not a valid path name.")?, }); } - if cli_args.is_present("http-allow-sync-stalled") { + if cli_args.get_flag("http-allow-sync-stalled") { warn!( log, "Ignoring --http-allow-sync-stalled"; @@ -170,10 +170,10 @@ pub fn get_config( parse_required(cli_args, "http-duplicate-block-status")?; client_config.http_api.enable_light_client_server = - cli_args.is_present("light-client-server"); + cli_args.get_flag("light-client-server"); } - if cli_args.is_present("light-client-server") { + if cli_args.get_flag("light-client-server") { client_config.chain.enable_light_client_server = true; } @@ -185,23 +185,23 @@ pub fn get_config( * Prometheus metrics HTTP server */ - if cli_args.is_present("metrics") { + if cli_args.get_flag("metrics") { client_config.http_metrics.enabled = true; } - if let Some(address) = cli_args.value_of("metrics-address") { + if let Some(address) = cli_args.get_one::("metrics-address") { client_config.http_metrics.listen_addr = address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("metrics-port") { + if let Some(port) = cli_args.get_one::("metrics-port") { client_config.http_metrics.listen_port = port .parse::() .map_err(|_| "metrics-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -213,7 +213,7 @@ pub fn get_config( /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { let update_period_secs = clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; @@ -227,7 +227,7 @@ pub fn get_config( // Log a warning indicating an open HTTP server if it wasn't specified explicitly // (e.g. using the --staking flag). - if cli_args.is_present("staking") { + if cli_args.get_flag("staking") { warn!( log, "Running HTTP server on port {}", client_config.http_api.listen_port @@ -235,7 +235,7 @@ pub fn get_config( } // Do not scrape for malloc metrics if we've disabled tuning malloc as it may cause panics. - if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if cli_args.get_flag(DISABLE_MALLOC_TUNING_FLAG) { client_config.http_metrics.allocator_metrics_enabled = false; } @@ -246,24 +246,24 @@ pub fn get_config( // When present, use an eth1 backend that generates deterministic junk. // // Useful for running testnets without the overhead of a deposit contract. - if cli_args.is_present("dummy-eth1") { + if cli_args.get_flag("dummy-eth1") { client_config.dummy_eth1_backend = true; } // When present, attempt to sync to an eth1 node. // // Required for block production. - if cli_args.is_present("eth1") { + if cli_args.get_flag("eth1") { client_config.sync_eth1_chain = true; } - if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { + if let Some(val) = cli_args.get_one::("eth1-blocks-per-log-query") { client_config.eth1.blocks_per_log_query = val .parse() .map_err(|_| "eth1-blocks-per-log-query is not a valid integer".to_string())?; } - if cli_args.is_present("eth1-purge-cache") { + if cli_args.get_flag("eth1-purge-cache") { client_config.eth1.purge_cache = true; } @@ -273,7 +273,7 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if let Some(endpoints) = cli_args.value_of("execution-endpoint") { + if let Some(endpoints) = cli_args.get_one::("execution-endpoint") { let mut el_config = execution_layer::Config::default(); // Always follow the deposit contract when there is an execution endpoint. @@ -296,13 +296,14 @@ pub fn get_config( let secret_file: PathBuf; // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. - if let Some(secret_files) = cli_args.value_of("execution-jwt") { + if let Some(secret_files) = cli_args.get_one::("execution-jwt") { secret_file = parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; // Check if the JWT secret key is passed directly via cli flag and persist it to the default // file location. - } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { + } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") + { use std::fs::File; use std::io::Write; secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); @@ -321,23 +322,27 @@ pub fn get_config( } // Parse and set the payload builder, if any. - if let Some(endpoint) = cli_args.value_of("builder") { + if let Some(endpoint) = cli_args.get_one::("builder") { let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; el_config.builder_url = Some(payload_builder); el_config.builder_user_agent = clap_utils::parse_optional(cli_args, "builder-user-agent")?; + + el_config.builder_header_timeout = + clap_utils::parse_optional(cli_args, "builder-header-timeout")? + .map(Duration::from_millis); } - if cli_args.is_present("builder-profit-threshold") { + if parse_flag(cli_args, "builder-profit-threshold") { warn!( log, "Ignoring --builder-profit-threshold"; "info" => "this flag is deprecated and will be removed" ); } - if cli_args.is_present("always-prefer-builder-payload") { + if cli_args.get_flag("always-prefer-builder-payload") { warn!( log, "Ignoring --always-prefer-builder-payload"; @@ -380,7 +385,8 @@ pub fn get_config( .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; // Override default trusted setup file if required - if let Some(trusted_setup_file_path) = cli_args.value_of("trusted-setup-file-override") { + if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") + { let file = std::fs::File::open(trusted_setup_file_path) .map_err(|e| format!("Failed to open trusted setup file: {}", e))?; let trusted_setup: TrustedSetup = serde_json::from_reader(file) @@ -388,11 +394,11 @@ pub fn get_config( client_config.trusted_setup = Some(trusted_setup); } - if let Some(freezer_dir) = cli_args.value_of("freezer-dir") { + if let Some(freezer_dir) = cli_args.get_one::("freezer-dir") { client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } - if let Some(blobs_db_dir) = cli_args.value_of("blobs-dir") { + if let Some(blobs_db_dir) = cli_args.get_one::("blobs-dir") { client_config.blobs_db_path = Some(PathBuf::from(blobs_db_dir)); } @@ -400,24 +406,27 @@ pub fn get_config( client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; - if let Some(block_cache_size) = cli_args.value_of("block-cache-size") { + if let Some(block_cache_size) = cli_args.get_one::("block-cache-size") { client_config.store.block_cache_size = block_cache_size .parse() .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } - if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { - client_config.store.state_cache_size = cache_size; + if let Some(cache_size) = cli_args.get_one::("state-cache-size") { + client_config.store.state_cache_size = cache_size + .parse() + .map_err(|_| "state-cache-size is not a valid integer".to_string())?; } - if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { + if let Some(historic_state_cache_size) = cli_args.get_one::("historic-state-cache-size") + { client_config.store.historic_state_cache_size = historic_state_cache_size .parse() .map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?; } - client_config.store.compact_on_init = cli_args.is_present("compact-db"); - if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") { + client_config.store.compact_on_init = cli_args.get_flag("compact-db"); + if let Some(compact_on_prune) = cli_args.get_one::("auto-compact-db") { client_config.store.compact_on_prune = compact_on_prune .parse() .map_err(|_| "auto-compact-db takes a boolean".to_string())?; @@ -458,7 +467,7 @@ pub fn get_config( * from lighthouse. * Discovery address is set to localhost by default. */ - if cli_args.is_present("zero-ports") { + if cli_args.get_flag("zero-ports") { client_config.http_api.listen_port = 0; client_config.http_metrics.listen_port = 0; } @@ -524,14 +533,14 @@ pub fn get_config( None }; - client_config.allow_insecure_genesis_sync = cli_args.is_present("allow-insecure-genesis-sync"); + client_config.allow_insecure_genesis_sync = cli_args.get_flag("allow-insecure-genesis-sync"); client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path), opt_initial_blobs_path) = ( - cli_args.value_of("checkpoint-state"), - cli_args.value_of("checkpoint-block"), - cli_args.value_of("checkpoint-blobs"), + cli_args.get_one::("checkpoint-state"), + cli_args.get_one::("checkpoint-block"), + cli_args.get_one::("checkpoint-blobs"), ) { let read = |path: &str| { use std::fs::File; @@ -547,14 +556,14 @@ pub fn get_config( let anchor_state_bytes = read(initial_state_path)?; let anchor_block_bytes = read(initial_block_path)?; - let anchor_blobs_bytes = opt_initial_blobs_path.map(read).transpose()?; + let anchor_blobs_bytes = opt_initial_blobs_path.map(|s| read(s)).transpose()?; ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, anchor_blobs_bytes, } - } else if let Some(remote_bn_url) = cli_args.value_of("checkpoint-sync-url") { + } else if let Some(remote_bn_url) = cli_args.get_one::("checkpoint-sync-url") { let url = SensitiveUrl::parse(remote_bn_url) .map_err(|e| format!("Invalid checkpoint sync URL: {:?}", e))?; @@ -563,7 +572,7 @@ pub fn get_config( ClientGenesis::GenesisState } } else { - if cli_args.is_present("checkpoint-state") || cli_args.is_present("checkpoint-sync-url") { + if parse_flag(cli_args, "checkpoint-state") || parse_flag(cli_args, "checkpoint-sync-url") { return Err( "Checkpoint sync is not available for this network as no genesis state is known" .to_string(), @@ -572,14 +581,14 @@ pub fn get_config( ClientGenesis::DepositContract }; - if cli_args.is_present("reconstruct-historic-states") { + if cli_args.get_flag("reconstruct-historic-states") { client_config.chain.reconstruct_historic_states = true; client_config.chain.genesis_backfill = true; } - let beacon_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { + let beacon_graffiti = if let Some(graffiti) = cli_args.get_one::("graffiti") { GraffitiOrigin::UserSpecified(GraffitiString::from_str(graffiti)?.into()) - } else if cli_args.is_present("private") { + } else if cli_args.get_flag("private") { // When 'private' flag is present, use a zero-initialized bytes array. GraffitiOrigin::UserSpecified(GraffitiString::empty().into()) } else { @@ -588,7 +597,7 @@ pub fn get_config( }; client_config.beacon_graffiti = beacon_graffiti; - if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") { + if let Some(wss_checkpoint) = cli_args.get_one::("wss-checkpoint") { let mut split = wss_checkpoint.split(':'); let root_str = split .next() @@ -623,8 +632,8 @@ pub fn get_config( client_config.chain.weak_subjectivity_checkpoint = Some(Checkpoint { epoch, root }) } - if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") { - client_config.chain.import_max_skip_slots = match max_skip_slots { + if let Some(max_skip_slots) = cli_args.get_one::("max-skip-slots") { + client_config.chain.import_max_skip_slots = match max_skip_slots.as_str() { "none" => None, n => Some( n.parse() @@ -638,8 +647,8 @@ pub fn get_config( spec.gossip_max_size as usize, ); - if cli_args.is_present("slasher") { - let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { + if cli_args.get_flag("slasher") { + let slasher_dir = if let Some(slasher_dir) = cli_args.get_one::("slasher-dir") { PathBuf::from(slasher_dir) } else { client_config.data_dir().join("slasher_db") @@ -704,11 +713,11 @@ pub fn get_config( client_config.slasher = Some(slasher_config); } - if cli_args.is_present("validator-monitor-auto") { + if cli_args.get_flag("validator-monitor-auto") { client_config.validator_monitor.auto_register = true; } - if let Some(pubkeys) = cli_args.value_of("validator-monitor-pubkeys") { + if let Some(pubkeys) = cli_args.get_one::("validator-monitor-pubkeys") { let pubkeys = pubkeys .split(',') .map(PublicKeyBytes::from_str) @@ -720,7 +729,7 @@ pub fn get_config( .extend_from_slice(&pubkeys); } - if let Some(path) = cli_args.value_of("validator-monitor-file") { + if let Some(path) = cli_args.get_one::("validator-monitor-file") { let string = fs::read(path) .map_err(|e| format!("Unable to read --validator-monitor-file: {}", e)) .and_then(|bytes| { @@ -747,11 +756,11 @@ pub fn get_config( .individual_tracking_threshold = count; } - if cli_args.is_present("disable-lock-timeouts") { + if cli_args.get_flag("disable-lock-timeouts") { client_config.chain.enable_lock_timeouts = false; } - if cli_args.is_present("disable-proposer-reorgs") { + if cli_args.get_flag("disable-proposer-reorgs") { client_config.chain.re_org_head_threshold = None; client_config.chain.re_org_parent_threshold = None; } else { @@ -789,7 +798,7 @@ pub fn get_config( } // Note: This overrides any previous flags that enable this option. - if cli_args.is_present("disable-deposit-contract-sync") { + if cli_args.get_flag("disable-deposit-contract-sync") { client_config.sync_eth1_chain = false; } @@ -801,7 +810,7 @@ pub fn get_config( / DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR }); - client_config.chain.always_prepare_payload = cli_args.is_present("always-prepare-payload"); + client_config.chain.always_prepare_payload = cli_args.get_flag("always-prepare-payload"); if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? @@ -809,10 +818,9 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - client_config.chain.always_reset_payload_statuses = - cli_args.is_present("reset-payload-statuses"); + client_config.chain.always_reset_payload_statuses = cli_args.get_flag("reset-payload-statuses"); - client_config.chain.paranoid_block_proposal = cli_args.is_present("paranoid-block-proposal"); + client_config.chain.paranoid_block_proposal = cli_args.get_flag("paranoid-block-proposal"); /* * Builder fallback configs. @@ -826,32 +834,32 @@ pub fn get_config( .builder_fallback_epochs_since_finalization = clap_utils::parse_required(cli_args, "builder-fallback-epochs-since-finalization")?; client_config.chain.builder_fallback_disable_checks = - cli_args.is_present("builder-fallback-disable-checks"); + cli_args.get_flag("builder-fallback-disable-checks"); // Graphical user interface config. - if cli_args.is_present("gui") { + if cli_args.get_flag("gui") { client_config.http_api.enabled = true; client_config.validator_monitor.auto_register = true; } // Optimistic finalized sync. client_config.chain.optimistic_finalized_sync = - !cli_args.is_present("disable-optimistic-finalized-sync"); + !cli_args.get_flag("disable-optimistic-finalized-sync"); - if cli_args.is_present("genesis-backfill") { + if cli_args.get_flag("genesis-backfill") { client_config.chain.genesis_backfill = true; } // Backfill sync rate-limiting client_config.beacon_processor.enable_backfill_rate_limiting = - !cli_args.is_present("disable-backfill-rate-limiting"); + !cli_args.get_flag("disable-backfill-rate-limiting"); if let Some(path) = clap_utils::parse_optional(cli_args, "invalid-gossip-verified-blocks-path")? { client_config.network.invalid_block_storage = Some(path); } - if cli_args.is_present("progressive-balances") { + if cli_args.get_one::("progressive-balances").is_some() { warn!( log, "Progressive balances mode is deprecated"; @@ -890,10 +898,9 @@ pub fn parse_listening_addresses( log: &Logger, ) -> Result { let listen_addresses_str = cli_args - .values_of("listen-address") + .get_many::("listen-address") .expect("--listen_addresses has a default value"); - - let use_zero_ports = cli_args.is_present("zero-ports"); + let use_zero_ports = parse_flag(cli_args, "zero-ports"); // parse the possible ips let mut maybe_ipv4 = None; @@ -927,28 +934,28 @@ pub fn parse_listening_addresses( // parse the possible tcp ports let port = cli_args - .value_of("port") + .get_one::("port") .expect("--port has a default value") .parse::() .map_err(|parse_error| format!("Failed to parse --port as an integer: {parse_error}"))?; let port6 = cli_args - .value_of("port6") - .map(str::parse::) + .get_one::("port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? .unwrap_or(9090); // parse the possible discovery ports. let maybe_disc_port = cli_args - .value_of("discovery-port") - .map(str::parse::) + .get_one::("discovery-port") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --discovery-port as an integer: {parse_error}") })?; let maybe_disc6_port = cli_args - .value_of("discovery-port6") - .map(str::parse::) + .get_one::("discovery-port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --discovery-port6 as an integer: {parse_error}") @@ -956,8 +963,8 @@ pub fn parse_listening_addresses( // parse the possible quic port. let maybe_quic_port = cli_args - .value_of("quic-port") - .map(str::parse::) + .get_one::("quic-port") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --quic-port as an integer: {parse_error}") @@ -965,8 +972,8 @@ pub fn parse_listening_addresses( // parse the possible quic port. let maybe_quic6_port = cli_args - .value_of("quic-port6") - .map(str::parse::) + .get_one::("quic-port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --quic6-port as an integer: {parse_error}") @@ -980,10 +987,10 @@ pub fn parse_listening_addresses( } (None, Some(ipv6)) => { // A single ipv6 address was provided. Set the ports - - if cli_args.is_present("port6") { - warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored.") + if cli_args.value_source("port6") == Some(ValueSource::CommandLine) { + warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); } + // use zero ports if required. If not, use the given port. let tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) @@ -1117,41 +1124,41 @@ pub fn set_network_config( log: &Logger, ) -> Result<(), String> { // If a network dir has been specified, override the `datadir` definition. - if let Some(dir) = cli_args.value_of("network-dir") { + if let Some(dir) = cli_args.get_one::("network-dir") { config.network_dir = PathBuf::from(dir); } else { config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; - if cli_args.is_present("subscribe-all-subnets") { + if parse_flag(cli_args, "subscribe-all-subnets") { config.subscribe_all_subnets = true; } - if cli_args.is_present("import-all-attestations") { + if parse_flag(cli_args, "import-all-attestations") { config.import_all_attestations = true; } - if cli_args.is_present("shutdown-after-sync") { + if parse_flag(cli_args, "shutdown-after-sync") { config.shutdown_after_sync = true; } config.set_listening_addr(parse_listening_addresses(cli_args, log)?); // A custom target-peers command will overwrite the --proposer-only default. - if let Some(target_peers_str) = cli_args.value_of("target-peers") { + if let Some(target_peers_str) = cli_args.get_one::("target-peers") { config.target_peers = target_peers_str .parse::() .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; } - if let Some(value) = cli_args.value_of("network-load") { + if let Some(value) = cli_args.get_one::("network-load") { let network_load = value .parse::() .map_err(|_| format!("Invalid integer: {}", value))?; config.network_load = network_load; } - if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { + if let Some(boot_enr_str) = cli_args.get_one::("boot-nodes") { let mut enrs: Vec = vec![]; let mut multiaddrs: Vec = vec![]; for addr in boot_enr_str.split(',') { @@ -1176,7 +1183,7 @@ pub fn set_network_config( config.boot_nodes_multiaddr = multiaddrs; } - if let Some(libp2p_addresses_str) = cli_args.value_of("libp2p-addresses") { + if let Some(libp2p_addresses_str) = cli_args.get_one::("libp2p-addresses") { config.libp2p_nodes = libp2p_addresses_str .split(',') .map(|multiaddr| { @@ -1187,11 +1194,11 @@ pub fn set_network_config( .collect::, _>>()?; } - if cli_args.is_present("disable-peer-scoring") { + if parse_flag(cli_args, "disable-peer-scoring") { config.disable_peer_scoring = true; } - if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") { + if let Some(trusted_peers_str) = cli_args.get_one::("trusted-peers") { config.trusted_peers = trusted_peers_str .split(',') .map(|peer_id| { @@ -1205,7 +1212,7 @@ pub fn set_network_config( } } - if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { + if let Some(enr_udp_port_str) = cli_args.get_one::("enr-udp-port") { config.enr_udp4_port = Some( enr_udp_port_str .parse::() @@ -1213,7 +1220,7 @@ pub fn set_network_config( ); } - if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic-port") { + if let Some(enr_quic_port_str) = cli_args.get_one::("enr-quic-port") { config.enr_quic4_port = Some( enr_quic_port_str .parse::() @@ -1221,7 +1228,7 @@ pub fn set_network_config( ); } - if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { + if let Some(enr_tcp_port_str) = cli_args.get_one::("enr-tcp-port") { config.enr_tcp4_port = Some( enr_tcp_port_str .parse::() @@ -1229,7 +1236,7 @@ pub fn set_network_config( ); } - if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { + if let Some(enr_udp_port_str) = cli_args.get_one::("enr-udp6-port") { config.enr_udp6_port = Some( enr_udp_port_str .parse::() @@ -1237,7 +1244,7 @@ pub fn set_network_config( ); } - if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic6-port") { + if let Some(enr_quic_port_str) = cli_args.get_one::("enr-quic6-port") { config.enr_quic6_port = Some( enr_quic_port_str .parse::() @@ -1245,7 +1252,7 @@ pub fn set_network_config( ); } - if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { + if let Some(enr_tcp_port_str) = cli_args.get_one::("enr-tcp6-port") { config.enr_tcp6_port = Some( enr_tcp_port_str .parse::() @@ -1253,7 +1260,7 @@ pub fn set_network_config( ); } - if cli_args.is_present("enr-match") { + if parse_flag(cli_args, "enr-match") { // Match the IP and UDP port in the ENR. if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { @@ -1291,7 +1298,7 @@ pub fn set_network_config( } } - if let Some(enr_addresses) = cli_args.values_of("enr-address") { + if let Some(enr_addresses) = cli_args.get_many::("enr-address") { let mut enr_ip4 = None; let mut enr_ip6 = None; let mut resolved_enr_ip4 = None; @@ -1369,79 +1376,78 @@ pub fn set_network_config( } } - if cli_args.is_present("disable-enr-auto-update") { + if parse_flag(cli_args, "disable-enr-auto-update") { config.discv5_config.enr_update = false; } - if cli_args.is_present("disable-packet-filter") { + if parse_flag(cli_args, "disable-packet-filter") { warn!(log, "Discv5 packet filter is disabled"); config.discv5_config.enable_packet_filter = false; } - if cli_args.is_present("disable-discovery") { + if parse_flag(cli_args, "disable-discovery") { config.disable_discovery = true; warn!(log, "Discovery is disabled. New peers will not be found"); } - if cli_args.is_present("disable-quic") { + if parse_flag(cli_args, "disable-quic") { config.disable_quic_support = true; } - if cli_args.is_present("disable-upnp") { + if parse_flag(cli_args, "disable-upnp") { config.upnp_enabled = false; } - if cli_args.is_present("private") { + if parse_flag(cli_args, "private") { config.private = true; } - if cli_args.is_present("metrics") { + if parse_flag(cli_args, "metrics") { config.metrics_enabled = true; } - if cli_args.is_present("enable-private-discovery") { + if parse_flag(cli_args, "enable-private-discovery") { config.discv5_config.table_filter = |_| true; } // Light client server config. - config.enable_light_client_server = cli_args.is_present("light-client-server"); + config.enable_light_client_server = parse_flag(cli_args, "light-client-server"); - // The self limiter is disabled by default. - // This flag can be used both with or without a value. Try to parse it first with a value, if - // no value is defined but the flag is present, use the default params. - config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; - if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() { - config.outbound_rate_limiter_config = Some(Default::default()); + // The self limiter is disabled by default. If the `self-limiter` flag is provided + // without the `self-limiter-protocols` flag, the default params will be used. + if parse_flag(cli_args, "self-limiter") { + config.outbound_rate_limiter_config = + if let Some(protocols) = cli_args.get_one::("self-limiter-protocols") { + Some(protocols.parse()?) + } else { + Some(Default::default()) + }; } // Proposer-only mode overrides a number of previous configuration parameters. // Specifically, we avoid subscribing to long-lived subnets and wish to maintain a minimal set // of peers. - if cli_args.is_present("proposer-only") { + if parse_flag(cli_args, "proposer-only") { config.subscribe_all_subnets = false; - if cli_args.value_of("target-peers").is_none() { + if cli_args.get_one::("target-peers").is_none() { // If a custom value is not set, change the default to 15 config.target_peers = 15; } config.proposer_only = true; warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); } - // The inbound rate limiter is enabled by default unless `disabled` is passed to the - // `inbound-rate-limiter` flag. Any other value should be parsed as a configuration string. - config.inbound_rate_limiter_config = match cli_args.value_of("inbound-rate-limiter") { - None => { - // Enabled by default, with default values + // The inbound rate limiter is enabled by default unless `disabled` via the + // `disable-inbound-rate-limiter` flag. + config.inbound_rate_limiter_config = if parse_flag(cli_args, "disable-inbound-rate-limiter") { + None + } else { + // Use the default unless values are provided via the `inbound-rate-limiter-protocols` + if let Some(protocols) = cli_args.get_one::("inbound-rate-limiter-protocols") { + Some(protocols.parse()?) + } else { Some(Default::default()) } - Some("disabled") => { - // Explicitly disabled - None - } - Some(config_str) => { - // Enabled with a custom configuration - Some(config_str.parse()?) - } }; Ok(()) } @@ -1454,7 +1460,7 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { // directory and the testnet name onto it. cli_args - .value_of("datadir") + .get_one::("datadir") .map(|path| PathBuf::from(path).join(DEFAULT_BEACON_NODE_DIR)) .or_else(|| { dirs::home_dir().map(|home| { diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index ee782c650e..4ca084c316 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -1,4 +1,3 @@ -#[macro_use] extern crate clap; mod cli; @@ -44,7 +43,7 @@ impl ProductionBeaconNode { /// configurations hosted remotely. pub async fn new_from_cli( context: RuntimeContext, - matches: ArgMatches<'static>, + matches: ArgMatches, ) -> Result { let client_config = get_config::(&matches, &context)?; Self::new(context, client_config).await diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b782267007..7bf1ef76be 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -25,7 +25,3 @@ lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } strum = { workspace = true } -safe_arith = { workspace = true } -bls = { workspace = true } -smallvec = { workspace = true } -logging = { workspace = true } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 484a1139bf..9c247c983a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2485,6 +2485,57 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + + /// Prune states from the hot database which are prior to the split. + /// + /// This routine is important for cleaning up advanced states which are stored in the database + /// with a temporary flag. + pub fn prune_old_hot_states(&self) -> Result<(), Error> { + let split = self.get_split_info(); + debug!( + self.log, + "Database state pruning started"; + "split_slot" => split.slot, + ); + let mut state_delete_batch = vec![]; + for res in self + .hot_db + .iter_column::(DBColumn::BeaconStateSummary) + { + let (state_root, summary_bytes) = res?; + let summary = HotStateSummary::from_ssz_bytes(&summary_bytes)?; + + if summary.slot <= split.slot { + let old = summary.slot < split.slot; + let non_canonical = summary.slot == split.slot + && state_root != split.state_root + && !split.state_root.is_zero(); + if old || non_canonical { + let reason = if old { + "old dangling state" + } else { + "non-canonical" + }; + debug!( + self.log, + "Deleting state"; + "state_root" => ?state_root, + "slot" => summary.slot, + "reason" => reason, + ); + state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot))); + } + } + } + let num_deleted_states = state_delete_batch.len(); + self.do_atomically_with_block_and_blobs_cache(state_delete_batch)?; + debug!( + self.log, + "Database state pruning complete"; + "num_deleted_states" => num_deleted_states, + ); + Ok(()) + } } /// Advance the split point of the store, moving new finalized states to the freezer. diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index c2a15c0266..e56d0580ac 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -118,6 +118,29 @@ where #[ssz(skip_serializing, skip_deserializing)] #[superstruct(only(Capella, Deneb, Electra))] pub historical_summaries: Option>, + + // Electra + #[superstruct(only(Electra))] + pub deposit_receipts_start_index: u64, + #[superstruct(only(Electra))] + pub deposit_balance_to_consume: u64, + #[superstruct(only(Electra))] + pub exit_balance_to_consume: u64, + #[superstruct(only(Electra))] + pub earliest_exit_epoch: Epoch, + #[superstruct(only(Electra))] + pub consolidation_balance_to_consume: u64, + #[superstruct(only(Electra))] + pub earliest_consolidation_epoch: Epoch, + + // TODO(electra) should these be optional? + #[superstruct(only(Electra))] + pub pending_balance_deposits: List, + #[superstruct(only(Electra))] + pub pending_partial_withdrawals: + List, + #[superstruct(only(Electra))] + pub pending_consolidations: List, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -261,7 +284,16 @@ impl PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - next_withdrawal_validator_index + next_withdrawal_validator_index, + deposit_receipts_start_index, + deposit_balance_to_consume, + exit_balance_to_consume, + earliest_exit_epoch, + consolidation_balance_to_consume, + earliest_consolidation_epoch, + pending_balance_deposits, + pending_partial_withdrawals, + pending_consolidations ], [historical_summaries] ), @@ -525,7 +557,16 @@ impl TryInto> for PartialBeaconState { inactivity_scores, latest_execution_payload_header, next_withdrawal_index, - next_withdrawal_validator_index + next_withdrawal_validator_index, + deposit_receipts_start_index, + deposit_balance_to_consume, + exit_balance_to_consume, + earliest_exit_epoch, + consolidation_balance_to_consume, + earliest_consolidation_epoch, + pending_balance_deposits, + pending_partial_withdrawals, + pending_consolidations ], [historical_summaries] ), diff --git a/book/.markdownlint.yml b/book/.markdownlint.yml new file mode 100644 index 0000000000..5d6bda29f1 --- /dev/null +++ b/book/.markdownlint.yml @@ -0,0 +1,28 @@ +# MD010: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md010---hard-tabs +MD010: + # Set code blocks to false so that code blocks will be ignored, default is true + code_blocks: false + +#MD013 line length: https://github.com/DavidAnson/markdownlint/blob/main/doc/md013.md +# Set to false as this will also interfere with help_x.md files, and it is not necessary to comply with the line length of 80 +MD013: false + +# MD028: set to false to allow blank line between blockquote: https://github.com/DavidAnson/markdownlint/blob/main/doc/md028.md +# This is because the blockquotes are shown separatedly (a deisred outcome) when having a blank line in between +MD028: false + +# MD024: set siblings_only to true so that same headings with different parent headings are allowed +# https://github.com/DavidAnson/markdownlint/blob/main/doc/md024.md +MD024: + siblings_only: true + +# MD033 in-line html: https://github.com/DavidAnson/markdownlint/blob/main/doc/md033.md +# In-line html is fine in the markdown files, so this is set to false +MD033: false + +# MD036 set to false to preserve the emphasis on deprecation notice on key-management.md (a heading is not necessary) +MD036: false + +# MD040 code blocks should have a language specified: https://github.com/DavidAnson/markdownlint/blob/main/doc/md040.md +# Set to false as the help_x.md files are code blocks without a language specified, which is fine and does not need to change +MD040: false \ No newline at end of file diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 1a35d9d139..7fb0b2f4e7 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,66 +2,66 @@ * [Introduction](./intro.md) * [Installation](./installation.md) - * [Pre-Built Binaries](./installation-binaries.md) - * [Docker](./docker.md) - * [Build from Source](./installation-source.md) - * [Raspberry Pi 4](./pi.md) - * [Cross-Compiling](./cross-compiling.md) - * [Homebrew](./homebrew.md) - * [Update Priorities](./installation-priorities.md) + * [Pre-Built Binaries](./installation-binaries.md) + * [Docker](./docker.md) + * [Build from Source](./installation-source.md) + * [Raspberry Pi 4](./pi.md) + * [Cross-Compiling](./cross-compiling.md) + * [Homebrew](./homebrew.md) + * [Update Priorities](./installation-priorities.md) * [Run a Node](./run_a_node.md) * [Become a Validator](./mainnet-validator.md) * [Validator Management](./validator-management.md) - * [The `validator-manager` Command](./validator-manager.md) - * [Creating validators](./validator-manager-create.md) - * [Moving validators](./validator-manager-move.md) - * [Slashing Protection](./slashing-protection.md) - * [Voluntary Exits](./voluntary-exit.md) - * [Partial Withdrawals](./partial-withdrawal.md) - * [Validator Monitoring](./validator-monitoring.md) - * [Doppelganger Protection](./validator-doppelganger.md) - * [Suggested Fee Recipient](./suggested-fee-recipient.md) - * [Validator Graffiti](./graffiti.md) + * [The `validator-manager` Command](./validator-manager.md) + * [Creating validators](./validator-manager-create.md) + * [Moving validators](./validator-manager-move.md) + * [Slashing Protection](./slashing-protection.md) + * [Voluntary Exits](./voluntary-exit.md) + * [Partial Withdrawals](./partial-withdrawal.md) + * [Validator Monitoring](./validator-monitoring.md) + * [Doppelganger Protection](./validator-doppelganger.md) + * [Suggested Fee Recipient](./suggested-fee-recipient.md) + * [Validator Graffiti](./graffiti.md) * [APIs](./api.md) - * [Beacon Node API](./api-bn.md) - * [Lighthouse API](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) - * [Validator Client API](./api-vc.md) - * [Endpoints](./api-vc-endpoints.md) - * [Authorization Header](./api-vc-auth-header.md) - * [Signature Header](./api-vc-sig-header.md) - * [Prometheus Metrics](./advanced_metrics.md) + * [Beacon Node API](./api-bn.md) + * [Lighthouse API](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Endpoints](./api-vc-endpoints.md) + * [Authorization Header](./api-vc-auth-header.md) + * [Signature Header](./api-vc-sig-header.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) - * [Installation](./ui-installation.md) - * [Authentication](./ui-authentication.md) - * [Configuration](./ui-configuration.md) - * [Usage](./ui-usage.md) - * [FAQs](./ui-faqs.md) + * [Installation](./ui-installation.md) + * [Authentication](./ui-authentication.md) + * [Configuration](./ui-configuration.md) + * [Usage](./ui-usage.md) + * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) - * [Checkpoint Sync](./checkpoint-sync.md) - * [Custom Data Directories](./advanced-datadir.md) - * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) - * [Remote Signing with Web3Signer](./validator-web3signer.md) - * [Database Configuration](./advanced_database.md) - * [Database Migrations](./database-migrations.md) - * [Key Management (Deprecated)](./key-management.md) - * [Key Recovery](./key-recovery.md) - * [Advanced Networking](./advanced_networking.md) - * [Running a Slasher](./slasher.md) - * [Redundancy](./redundancy.md) - * [Release Candidates](./advanced-release-candidates.md) - * [MEV](./builders.md) - * [Merge Migration](./merge-migration.md) - * [Late Block Re-orgs](./late-block-re-orgs.md) - * [Blobs](./advanced-blobs.md) + * [Checkpoint Sync](./checkpoint-sync.md) + * [Custom Data Directories](./advanced-datadir.md) + * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) + * [Remote Signing with Web3Signer](./validator-web3signer.md) + * [Database Configuration](./advanced_database.md) + * [Database Migrations](./database-migrations.md) + * [Key Management (Deprecated)](./key-management.md) + * [Key Recovery](./key-recovery.md) + * [Advanced Networking](./advanced_networking.md) + * [Running a Slasher](./slasher.md) + * [Redundancy](./redundancy.md) + * [Release Candidates](./advanced-release-candidates.md) + * [MEV](./builders.md) + * [Merge Migration](./merge-migration.md) + * [Late Block Re-orgs](./late-block-re-orgs.md) + * [Blobs](./advanced-blobs.md) * [Built-In Documentation](./help_general.md) - * [Beacon Node](./help_bn.md) - * [Validator Client](./help_vc.md) - * [Validator Manager](./help_vm.md) - * [Create](./help_vm_create.md) - * [Import](./help_vm_import.md) - * [Move](./help_vm_move.md) + * [Beacon Node](./help_bn.md) + * [Validator Client](./help_vc.md) + * [Validator Manager](./help_vm.md) + * [Create](./help_vm_create.md) + * [Import](./help_vm_import.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) - * [Development Environment](./setup.md) + * [Development Environment](./setup.md) * [FAQs](./faq.md) -* [Protocol Developers](./developers.md) \ No newline at end of file +* [Protocol Developers](./developers.md) diff --git a/book/src/advanced-blobs.md b/book/src/advanced-blobs.md index eee404a9be..785bd5797d 100644 --- a/book/src/advanced-blobs.md +++ b/book/src/advanced-blobs.md @@ -1,8 +1,8 @@ # Blobs -In the Deneb network upgrade, one of the changes is the implementation of EIP-4844, also known as [Proto-danksharding](https://blog.ethereum.org/2024/02/27/dencun-mainnet-announcement). Alongside with this, a new term named `blob` (binary large object) is introduced. Blobs are "side-cars" carrying transaction data in a block. They are mainly used by Ethereum layer 2 operators. As far as stakers are concerned, the main difference with the introduction of blobs is the increased storage requirement. +In the Deneb network upgrade, one of the changes is the implementation of EIP-4844, also known as [Proto-danksharding](https://blog.ethereum.org/2024/02/27/dencun-mainnet-announcement). Alongside with this, a new term named `blob` (binary large object) is introduced. Blobs are "side-cars" carrying transaction data in a block. They are mainly used by Ethereum layer 2 operators. As far as stakers are concerned, the main difference with the introduction of blobs is the increased storage requirement. -### FAQ +## FAQ 1. What is the storage requirement for blobs? @@ -10,33 +10,32 @@ In the Deneb network upgrade, one of the changes is the implementation of EIP-48 One blob is 128 KB in size. Each block can carry a maximum of 6 blobs. Blobs will be kept for 4096 epochs and pruned afterwards. This means that the maximum increase in storage requirement will be: - ``` + ```text 2**17 bytes / blob * 6 blobs / block * 32 blocks / epoch * 4096 epochs = 96 GB ``` However, the blob base fee targets 3 blobs per block and it works similarly to how EIP-1559 operates in the Ethereum gas fee. Therefore, practically it is very likely to average to 3 blobs per blocks, which translates to a storage requirement of 48 GB. - 1. Do I have to add any flags for blobs? - No, you can use the default values for blob-related flags, which means you do not need add or remove any flags. + No, you can use the default values for blob-related flags, which means you do not need add or remove any flags. 1. What if I want to keep all blobs? Use the flag `--prune-blobs false` in the beacon node. The storage requirement will be: - ``` + ```text 2**17 bytes * 3 blobs / block * 7200 blocks / day * 30 days = 79GB / month or 948GB / year ``` - + To keep blobs for a custom period, you may use the flag `--blob-prune-margin-epochs ` which keeps blobs for 4096+EPOCHS specified in the flag. 1. How to see the info of the blobs database? - We can call the API: + We can call the API: ```bash curl "http://localhost:5052/lighthouse/database/info" | jq ``` - Refer to [Lighthouse API](./api-lighthouse.md#lighthousedatabaseinfo) for an example response. \ No newline at end of file + Refer to [Lighthouse API](./api-lighthouse.md#lighthousedatabaseinfo) for an example response. diff --git a/book/src/advanced-datadir.md b/book/src/advanced-datadir.md index 074857346e..7ad993a107 100644 --- a/book/src/advanced-datadir.md +++ b/book/src/advanced-datadir.md @@ -1,4 +1,4 @@ -## Custom Data Directories +# Custom Data Directories Users can override the default Lighthouse data directories (e.g., `~/.lighthouse/mainnet`) using the `--datadir` flag. The custom data directory mirrors the structure of any network specific default directory (e.g. `~/.lighthouse/mainnet`). @@ -11,10 +11,11 @@ lighthouse --network mainnet --datadir /var/lib/my-custom-dir account validator lighthouse --network mainnet --datadir /var/lib/my-custom-dir bn --staking lighthouse --network mainnet --datadir /var/lib/my-custom-dir vc ``` + The first step creates a `validators` directory under `/var/lib/my-custom-dir` which contains the imported keys and [`validator_definitions.yml`](./validator-management.md). After that, we simply run the beacon chain and validator client with the custom dir path. -### Relative Paths +## Relative Paths [#2682]: https://github.com/sigp/lighthouse/pull/2682 [#2846]: https://github.com/sigp/lighthouse/pull/2846 @@ -40,7 +41,7 @@ be applied. On start-up, if a split directory scenario is detected (i.e. `~/here Lighthouse will continue to operate with split directories. In such a scenario, the following harmless log will show: -``` +```text WARN Legacy datadir location location: "/home/user/datadir/beacon", msg: this occurs when using relative paths for a datadir location ``` diff --git a/book/src/advanced-proposer-only.md b/book/src/advanced-proposer-only.md index c3347e044b..1ea3610988 100644 --- a/book/src/advanced-proposer-only.md +++ b/book/src/advanced-proposer-only.md @@ -2,7 +2,7 @@ Lighthouse allows for more exotic setups that can minimize attack vectors by adding redundant beacon nodes and dividing the roles of attesting and block -production between them. +production between them. The purpose of this is to minimize attack vectors where malicious users obtain the network identities (IP addresses) of beacon @@ -24,7 +24,7 @@ harder to identify as a potential node to attack and will also consume less resources. Specifically, this flag reduces the default peer count (to a safe minimal -number as maintaining peers on attestation subnets do not need to be considered), +number as maintaining peers on attestation subnets do not need to be considered), prevents the node from subscribing to any attestation-subnets or sync-committees which is a primary way for attackers to de-anonymize validators. @@ -34,7 +34,6 @@ validators. > normal beacon node, the validator may fail to handle its duties correctly and > result in a loss of income. - ## The Validator Client The validator client can be given a list of HTTP API endpoints representing @@ -53,7 +52,6 @@ these nodes for added security). > producing a more profitable block. Any block builders should therefore be > attached to the `--beacon-nodes` and not necessarily the `--proposer-nodes`. - ## Setup Overview The intended set-up to take advantage of this mechanism is to run one (or more) diff --git a/book/src/advanced-release-candidates.md b/book/src/advanced-release-candidates.md index a539aa489c..9f00da9ae9 100644 --- a/book/src/advanced-release-candidates.md +++ b/book/src/advanced-release-candidates.md @@ -20,7 +20,7 @@ you're looking for stable Lighthouse**. From time to time, Lighthouse may use the terms "release candidate" and "pre release" interchangeably. A pre release is identical to a release candidate. -### Examples +## Examples [`v1.4.0-rc.0`] has `rc` in the version string and is therefore a release candidate. This release is *not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). @@ -36,9 +36,8 @@ Users may wish to try a release candidate for the following reasons: - To help detect bugs and regressions before they reach production. - To provide feedback on annoyances before they make it into a release and become harder to change or revert. -There can also be a scenario that a bug has been found and requires an urgent fix. An example of incidence is [v4.0.2-rc.0](https://github.com/sigp/lighthouse/releases/tag/v4.0.2-rc.0) which contains a hot-fix to address high CPU usage experienced after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023. In this scenario, we will announce the release candidate on [Github](https://github.com/sigp/lighthouse/releases) and also on [Discord](https://discord.gg/cyAszAh) to recommend users to update to the release candidate version. +There can also be a scenario that a bug has been found and requires an urgent fix. An example of incidence is [v4.0.2-rc.0](https://github.com/sigp/lighthouse/releases/tag/v4.0.2-rc.0) which contains a hot-fix to address high CPU usage experienced after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023. In this scenario, we will announce the release candidate on [Github](https://github.com/sigp/lighthouse/releases) and also on [Discord](https://discord.gg/cyAszAh) to recommend users to update to the release candidate version. ## When *not* to use a release candidate Other than the above scenarios, it is generally not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). To test new release candidate features, try one of the testnets (e.g., Holesky). - diff --git a/book/src/advanced.md b/book/src/advanced.md index 21e732afa1..1a882835a4 100644 --- a/book/src/advanced.md +++ b/book/src/advanced.md @@ -15,7 +15,7 @@ tips about how things work under the hood. * [Key Management](./key-management.md): explore how to generate wallet with Lighthouse. * [Key Recovery](./key-recovery.md): explore how to recover wallet and validator with Lighthouse. * [Advanced Networking](./advanced_networking.md): open your ports to have a diverse and healthy set of peers. -* [Running a Slasher](./slasher.md): contribute to the health of the network by running a slasher. +* [Running a Slasher](./slasher.md): contribute to the health of the network by running a slasher. * [Redundancy](./redundancy.md): want to have more than one beacon node as backup? This is for you. * [Release Candidates](./advanced-release-candidates.md): latest release of Lighthouse to get feedback from users. * [Maximal Extractable Value](./builders.md): use external builders for a potential higher rewards during block proposals diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index f65fb10415..345fff6981 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -29,7 +29,7 @@ some example values. | Enthusiast (prev. default) | 2048 | hundreds of GB | 10.2 s | | Validator only (default) | 8192 | tens of GB | 41 s | -*Last update: Dec 2023. +*Last update: Dec 2023. As we can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP @@ -40,9 +40,9 @@ The default value is 8192 for databases synced from scratch using Lighthouse v2. The values shown in the table are approximate, calculated using a simple heuristic: each `BeaconState` consumes around 145MB of disk space, and each block replayed takes around 5ms. The -**Yearly Disk Usage** column shows the approximate size of the freezer DB _alone_ (hot DB not included), calculated proportionally using the total freezer database disk usage. +**Yearly Disk Usage** column shows the approximate size of the freezer DB _alone_ (hot DB not included), calculated proportionally using the total freezer database disk usage. The **Load Historical State** time is the worst-case load time for a state in the last slot -before a restore point. +before a restore point. To run a full archival node with fast access to beacon states and a SPRP of 32, the disk usage will be more than 10 TB per year, which is impractical for many users. As such, users may consider running the [tree-states](https://github.com/sigp/lighthouse/releases/tag/v5.0.111-exp) release, which only uses less than 200 GB for a full archival node. The caveat is that it is currently experimental and in alpha release (as of Dec 2023), thus not recommended for running mainnet validators. Nevertheless, it is suitable to be used for analysis purposes, and if you encounter any issues in tree-states, we do appreciate any feedback. We plan to have a stable release of tree-states in 1H 2024. diff --git a/book/src/advanced_metrics.md b/book/src/advanced_metrics.md index 3141f336a1..323ba8f58a 100644 --- a/book/src/advanced_metrics.md +++ b/book/src/advanced_metrics.md @@ -30,7 +30,6 @@ curl localhost:5054/metrics ## Validator Client Metrics - By default, these metrics are disabled but can be enabled with the `--metrics` flag. Use the `--metrics-address`, `--metrics-port` and `--metrics-allow-origin` flags to customize the metrics server. @@ -78,7 +77,7 @@ You can adjust the frequency at which Lighthouse sends metrics to the remote ser `--monitoring-endpoint-period` flag. It takes an integer value in seconds, defaulting to 60 seconds. -``` +```bash lighthouse bn --monitoring-endpoint-period 60 --monitoring-endpoint "https://url" ``` diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 5fabf57d56..732b4f51e6 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -5,8 +5,7 @@ be adjusted to handle a variety of network situations. This section outlines some of these configuration parameters and their consequences at the networking level and their general intended use. - -### Target Peers +## Target Peers The beacon node has a `--target-peers` CLI parameter. This allows you to instruct the beacon node how many peers it should try to find and maintain. @@ -38,7 +37,7 @@ large peer count will not speed up sync. For these reasons, we recommend users do not modify the `--target-peers` count drastically and use the (recommended) default. -### NAT Traversal (Port Forwarding) +## NAT Traversal (Port Forwarding) Lighthouse, by default, uses port 9000 for both TCP and UDP. Since v4.5.0, Lighthouse will also attempt to make QUIC connections via UDP port 9001 by default. Lighthouse will still function if it is behind a NAT without any port mappings. Although @@ -62,36 +61,39 @@ TCP and UDP ports (9000 TCP/UDP, and 9001 UDP by default). > explicitly specify them using the `--enr-tcp-port` and `--enr-udp-port` as > explained in the following section. -### How to Open Ports +## How to Open Ports The steps to do port forwarding depends on the router, but the general steps are given below: + 1. Determine the default gateway IP: -- On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. -- On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. -- On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. - The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. + - On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. + - On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. + - On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. -2. Login to the router management page. The login credentials are usually available in the manual or the router, or it can be found on a sticker underneath the router. You can also try the login credentials for some common router brands listed [here](https://www.noip.com/support/knowledgebase/general-port-forwarding-guide/). + The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. -3. Navigate to the port forward settings in your router. The exact step depends on the router, but typically it will fall under the "Advanced" section, under the name "port forwarding" or "virtual server". +1. Login to the router management page. The login credentials are usually available in the manual or the router, or it can be found on a sticker underneath the router. You can also try the login credentials for some common router brands listed [here](https://www.noip.com/support/knowledgebase/general-port-forwarding-guide/). -4. Configure a port forwarding rule as below: -- Protocol: select `TCP/UDP` or `BOTH` -- External port: `9000` -- Internal port: `9000` -- IP address: Usually there is a dropdown list for you to select the device. Choose the device that is running Lighthouse. +1. Navigate to the port forward settings in your router. The exact step depends on the router, but typically it will fall under the "Advanced" section, under the name "port forwarding" or "virtual server". -Since V4.5.0 port 9001/UDP is also used for QUIC support. +1. Configure a port forwarding rule as below: -- Protocol: select `UDP` -- External port: `9001` -- Internal port: `9001` -- IP address: Choose the device that is running Lighthouse. + - Protocol: select `TCP/UDP` or `BOTH` + - External port: `9000` + - Internal port: `9000` + - IP address: Usually there is a dropdown list for you to select the device. Choose the device that is running Lighthouse. -5. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). + Since V4.5.0 port 9001/UDP is also used for QUIC support. -### ENR Configuration + - Protocol: select `UDP` + - External port: `9001` + - Internal port: `9001` + - IP address: Choose the device that is running Lighthouse. + +1. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). + +## ENR Configuration Lighthouse has a number of CLI parameters for constructing and modifying the local Ethereum Node Record (ENR). Examples are `--enr-address`, @@ -113,8 +115,7 @@ harder for peers to find you or potentially making it harder for other peers to find each other. We recommend not touching these settings unless for a more advanced use case. - -### IPv6 support +## IPv6 support As noted in the previous sections, two fundamental parts to ensure good connectivity are: The parameters that configure the sockets over which @@ -122,7 +123,7 @@ Lighthouse listens for connections, and the parameters used to tell other peers how to connect to your node. This distinction is relevant and applies to most nodes that do not run directly on a public network. -#### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack +### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack To listen over only IPv6 use the same parameters as done when listening over IPv4 only: @@ -136,6 +137,7 @@ TCP and UDP. This can be configured with `--quic-port`. To listen over both IPv4 and IPv6: + - Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note @@ -149,7 +151,7 @@ To listen over both IPv4 and IPv6: UDP over IPv6. This will default to the value given to `--port6` + 1. This flag has no effect when listening over IPv6 only. -##### Configuration Examples +#### Configuration Examples > When using `--listen-address :: --listen-address 0.0.0.0 --port 9909`, listening will be set up as follows: > @@ -175,7 +177,8 @@ To listen over both IPv4 and IPv6: > It listens on the default value of `--port6` (`9090`) for TCP, and port `9999` for UDP. > QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. -#### Configuring Lighthouse to advertise IPv6 reachable addresses +### Configuring Lighthouse to advertise IPv6 reachable addresses + Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively, and dual stack using one socket for IPv4 and another socket for IPv6. In both scenarios, the previous sections still apply. In summary: @@ -205,7 +208,7 @@ In the general case, a user will not require to set these explicitly. Update these options only if you can guarantee your node is reachable with these values. -#### Known caveats +### Known caveats IPv6 link local addresses are likely to have poor connectivity if used in topologies with more than one interface. Use global addresses for the general diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 3e57edd8db..e7c900e84d 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -10,15 +10,15 @@ A Lighthouse beacon node can be configured to expose an HTTP server by supplying The following CLI flags control the HTTP server: - `--http`: enable the HTTP server (required even if the following flags are - provided). + provided). - `--http-port`: specify the listen port of the server. - `--http-address`: specify the listen address of the server. It is _not_ recommended to listen on `0.0.0.0`, please see [Security](#security) below. - `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` - header. The default is to not supply a header. + header. The default is to not supply a header. - `--http-enable-tls`: serve the HTTP server over TLS. Must be used with `--http-tls-cert` - and `http-tls-key`. This feature is currently experimental, please see - [Serving the HTTP API over TLS](#serving-the-http-api-over-tls) below. + and `http-tls-key`. This feature is currently experimental, please see + [Serving the HTTP API over TLS](#serving-the-http-api-over-tls) below. - `--http-tls-cert`: specify the path to the certificate file for Lighthouse to use. - `--http-tls-key`: specify the path to the private key file for Lighthouse to use. @@ -38,18 +38,18 @@ the listening address from `localhost` should only be done with extreme care. To safely provide access to the API from a different machine you should use one of the following standard techniques: -* Use an [SSH tunnel][ssh_tunnel], i.e. access `localhost` remotely. This is recommended, and +- Use an [SSH tunnel][ssh_tunnel], i.e. access `localhost` remotely. This is recommended, and doesn't require setting `--http-address`. -* Use a firewall to limit access to certain remote IPs, e.g. allow access only from one other +- Use a firewall to limit access to certain remote IPs, e.g. allow access only from one other machine on the local network. -* Shield Lighthouse behind an HTTP server with rate-limiting such as NGINX. This is only +- Shield Lighthouse behind an HTTP server with rate-limiting such as NGINX. This is only recommended for advanced users, e.g. beacon node hosting providers. Additional risks to be aware of include: -* The `node/identity` and `node/peers` endpoints expose information about your node's peer-to-peer +- The `node/identity` and `node/peers` endpoints expose information about your node's peer-to-peer identity. -* The `--http-allow-origin` flag changes the server's CORS policy, allowing cross-site requests +- The `--http-allow-origin` flag changes the server's CORS policy, allowing cross-site requests from browsers. You should only supply it if you understand the risks, e.g. malicious websites accessing your beacon node if you use the same machine for staking and web browsing. @@ -57,7 +57,6 @@ Additional risks to be aware of include: Start a beacon node and an execution node according to [Run a node](./run_a_node.md). Note that since [The Merge](https://ethereum.org/en/roadmap/merge/), an execution client is required to be running along with a beacon node. Hence, the query on Beacon Node APIs requires users to run both. While there are some Beacon Node APIs that you can query with only the beacon node, such as the [node version](https://ethereum.github.io/beacon-APIs/#/Node/getNodeVersion), in general an execution client is required to get the updated information about the beacon chain, such as [state root](https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot), [headers](https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockHeaders) and many others, which are dynamically progressing with time. - ## HTTP Request/Response Examples This section contains some simple examples of using the HTTP API via `curl`. @@ -124,9 +123,11 @@ curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H " } } ``` + You can replace `1` in the above command with the validator index that you would like to query. Other API query can be done similarly by changing the link according to the Beacon API. ### Events API + The [events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) provides information such as the payload attributes that are of interest to block builders and relays. To query the payload attributes, it is necessary to run Lighthouse beacon node with the flag `--always-prepare-payload`. It is also recommended to add the flag `--prepare-payload-lookahead 8000` which configures the payload attributes to be sent at 4s into each slot (or 8s from the start of the next slot). An example of the command is: ```bash @@ -141,8 +142,8 @@ An example of response is: data:{"version":"capella","data":{"proposal_slot":"11047","proposer_index":"336057","parent_block_root":"0x26f8999d270dd4677c2a1c815361707157a531f6c599f78fa942c98b545e1799","parent_block_number":"9259","parent_block_hash":"0x7fb788cd7afa814e578afa00a3edd250cdd4c8e35c22badd327d981b5bda33d2","payload_attributes":{"timestamp":"1696034964","prev_randao":"0xeee34d7a3f6b99ade6c6a881046c9c0e96baab2ed9469102d46eb8d6e4fde14c","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"40705","validator_index":"360712","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1202941"},{"index":"40706","validator_index":"360713","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1201138"},{"index":"40707","validator_index":"360714","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1215255"},{"index":"40708","validator_index":"360715","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1161977"},{"index":"40709","validator_index":"360716","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1257278"},{"index":"40710","validator_index":"360717","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1247740"},{"index":"40711","validator_index":"360718","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1204337"},{"index":"40712","validator_index":"360719","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1183575"},{"index":"40713","validator_index":"360720","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1157785"},{"index":"40714","validator_index":"360721","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1143371"},{"index":"40715","validator_index":"360722","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1234787"},{"index":"40716","validator_index":"360723","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1286673"},{"index":"40717","validator_index":"360724","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1419241"},{"index":"40718","validator_index":"360725","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1231015"},{"index":"40719","validator_index":"360726","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1304321"},{"index":"40720","validator_index":"360727","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1236543"}]}}} ``` - ## Serving the HTTP API over TLS +> > **Warning**: This feature is currently experimental. The HTTP server can be served over TLS by using the `--http-enable-tls`, @@ -160,10 +161,13 @@ Below is a simple example serving the HTTP API over TLS using a self-signed certificate on Linux: ### Enabling TLS on a beacon node + Generate a self-signed certificate using `openssl`: + ```bash openssl req -x509 -nodes -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -subj "/CN=localhost" ``` + Note that currently Lighthouse only accepts keys that are not password protected. This means we need to run with the `-nodes` flag (short for 'no DES'). @@ -180,21 +184,27 @@ lighthouse bn \ --http-tls-cert cert.pem \ --http-tls-key key.pem ``` + Note that the user running Lighthouse must have permission to read the certificate and key. The API is now being served at `https://localhost:5052`. To test connectivity, you can run the following: + ```bash curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem | jq ``` + ### Connecting a validator client + In order to connect a validator client to a beacon node over TLS, the validator client needs to be aware of the certificate. There are two ways to do this: + #### Option 1: Add the certificate to the operating system trust store + The process for this will vary depending on your operating system. Below are the instructions for Ubuntu and Arch Linux: @@ -211,13 +221,16 @@ sudo trust extract-compat ``` Now the validator client can be connected to the beacon node by running: + ```bash lighthouse vc --beacon-nodes https://localhost:5052 ``` #### Option 2: Specify the certificate via CLI + You can also specify any custom certificates via the validator client CLI like so: + ```bash lighthouse vc --beacon-nodes https://localhost:5052 --beacon-nodes-tls-certs cert.pem ``` diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index ce71450987..b63505c490 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -16,12 +16,12 @@ Although we don't recommend that users rely on these endpoints, we document them briefly so they can be utilized by developers and researchers. +## `/lighthouse/health` - -### `/lighthouse/health` *Note: This endpoint is presently only available on Linux.* Returns information regarding the health of the host machine. + ```bash curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq ``` @@ -64,7 +64,8 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ``` -### `/lighthouse/ui/health` +## `/lighthouse/ui/health` + Returns information regarding the health of the host machine. ```bash @@ -101,8 +102,10 @@ curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: applicatio } ``` -### `/lighthouse/ui/validator_count` +## `/lighthouse/ui/validator_count` + Returns an overview of validators. + ```bash curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq ``` @@ -123,9 +126,10 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap } ``` +## `/lighthouse/ui/validator_metrics` -### `/lighthouse/ui/validator_metrics` Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. + ```bash curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq ``` @@ -150,7 +154,9 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic } } ``` + Running this API without the flag `--validator-monitor-auto` in the beacon node will return null: + ```json { "data": { @@ -159,8 +165,10 @@ Running this API without the flag `--validator-monitor-auto` in the beacon node } ``` -### `/lighthouse/syncing` +## `/lighthouse/syncing` + Returns the sync status of the beacon node. + ```bash curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/json" | jq ``` @@ -168,6 +176,7 @@ curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/ There are two possible outcomes, depending on whether the beacon node is syncing or synced. 1. Syncing: + ```json { "data": { @@ -178,20 +187,21 @@ There are two possible outcomes, depending on whether the beacon node is syncing } } ``` + 1. Synced: + ```json { "data": "Synced" } ``` -### `/lighthouse/peers` +## `/lighthouse/peers` ```bash curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/json" | jq ``` - ```json [ { @@ -255,14 +265,14 @@ curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/js ] ``` -### `/lighthouse/peers/connected` +## `/lighthouse/peers/connected` + Returns information about connected peers. + ```bash curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: application/json" | jq ``` - - ```json [ { @@ -327,7 +337,7 @@ curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: appl ] ``` -### `/lighthouse/proto_array` +## `/lighthouse/proto_array` ```bash curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: application/json" | jq @@ -335,45 +345,45 @@ curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: applicat *Example omitted for brevity.* -### `/lighthouse/validator_inclusion/{epoch}/{validator_id}` +## `/lighthouse/validator_inclusion/{epoch}/{validator_id}` See [Validator Inclusion APIs](./validator-inclusion.md). -### `/lighthouse/validator_inclusion/{epoch}/global` +## `/lighthouse/validator_inclusion/{epoch}/global` See [Validator Inclusion APIs](./validator-inclusion.md). -### `/lighthouse/eth1/syncing` +## `/lighthouse/eth1/syncing` Returns information regarding execution layer, as it is required for use in consensus layer -#### Fields +### Fields - `head_block_number`, `head_block_timestamp`: the block number and timestamp from the very head of the execution chain. Useful for understanding the immediate health of the execution node that the beacon node is connected to. - `latest_cached_block_number` & `latest_cached_block_timestamp`: the block number and timestamp of the latest block we have in our block cache. - - For correct execution client voting this timestamp should be later than the + - For correct execution client voting this timestamp should be later than the `voting_target_timestamp`. - `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. - `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the execution node is from the head of the execution chain. - - `100.0` indicates a fully synced execution node. - - `0.0` indicates an execution node that has not verified any blocks past the - genesis block. + - `100.0` indicates a fully synced execution node. + - `0.0` indicates an execution node that has not verified any blocks past the + genesis block. - `lighthouse_is_cached_and_ready`: Is set to `true` if the caches in the - beacon node are ready for block production. - - This value might be set to - `false` whilst `eth1_node_sync_status_percentage == 100.0` if the beacon - node is still building its internal cache. - - This value might be set to `true` whilst - `eth1_node_sync_status_percentage < 100.0` since the cache only cares - about blocks a certain distance behind the head. + beacon node are ready for block production. + - This value might be set to + `false` whilst `eth1_node_sync_status_percentage == 100.0` if the beacon + node is still building its internal cache. + - This value might be set to `true` whilst + `eth1_node_sync_status_percentage < 100.0` since the cache only cares + about blocks a certain distance behind the head. -#### Example +### Example ```bash curl -X GET "http://localhost:5052/lighthouse/eth1/syncing" -H "accept: application/json" | jq @@ -393,11 +403,11 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/syncing" -H "accept: applica } ``` -### `/lighthouse/eth1/block_cache` +## `/lighthouse/eth1/block_cache` Returns a list of all the execution layer blocks in the execution client voting cache. -#### Example +### Example ```bash curl -X GET "http://localhost:5052/lighthouse/eth1/block_cache" -H "accept: application/json" | jq @@ -424,11 +434,11 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/block_cache" -H "accept: app } ``` -### `/lighthouse/eth1/deposit_cache` +## `/lighthouse/eth1/deposit_cache` Returns a list of all cached logs from the deposit contract. -#### Example +### Example ```bash curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: application/json" | jq @@ -463,7 +473,7 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: a } ``` -### `/lighthouse/liveness` +## `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list of objects, each including the validator index, epoch, and `is_live` status of a requested validator. @@ -488,9 +498,7 @@ curl -X POST "http://localhost:5052/lighthouse/liveness" -d '{"indices":["0","1" } ``` - - -### `/lighthouse/database/info` +## `/lighthouse/database/info` Information about the database's split point and anchor info. @@ -498,7 +506,6 @@ Information about the database's split point and anchor info. curl "http://localhost:5052/lighthouse/database/info" | jq ``` - ```json { "schema_version": 18, @@ -541,9 +548,10 @@ reconstruction has yet to be completed. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). +## `/lighthouse/merge_readiness` -### `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: + ```bash curl -X GET "http://localhost:5052/lighthouse/merge_readiness" | jq ``` @@ -574,16 +582,15 @@ As all testnets and Mainnet have been merged, both values will be the same after } ``` - -### `/lighthouse/analysis/attestation_performance/{index}` +## `/lighthouse/analysis/attestation_performance/{index}` Fetch information about the attestation performance of a validator index or all validators for a range of consecutive epochs. Two query parameters are required: -* `start_epoch` (inclusive): the first epoch to compute attestation performance for. -* `end_epoch` (inclusive): the final epoch to compute attestation performance for. +- `start_epoch` (inclusive): the first epoch to compute attestation performance for. +- `end_epoch` (inclusive): the final epoch to compute attestation performance for. Example: @@ -649,18 +656,18 @@ curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/g Caveats: -* For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, +- For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state *prior* to the `start_epoch` needs to be loaded from the database, and loading a state on a boundary is most efficient. -### `/lighthouse/analysis/block_rewards` +## `/lighthouse/analysis/block_rewards` Fetch information about the block rewards paid to proposers for a range of consecutive blocks. Two query parameters are required: -* `start_slot` (inclusive): the slot of the first block to compute rewards for. -* `end_slot` (inclusive): the slot of the last block to compute rewards for. +- `start_slot` (inclusive): the slot of the first block to compute rewards for. +- `end_slot` (inclusive): the slot of the last block to compute rewards for. Example: @@ -668,7 +675,6 @@ Example: curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=1" | jq ``` - The first few lines of the response would look like: ```json @@ -680,7 +686,7 @@ The first few lines of the response would look like: "slot": "1", "parent_slot": "0", "proposer_index": 93, - "graffiti": "EF #vm-eth2-raw-iron-prater-101" + "graffiti": "EF #vm-eth2-raw-iron-101" }, "attestation_rewards": { "total": 637260, @@ -698,25 +704,25 @@ The first few lines of the response would look like: Caveats: -* Presently only attestation and sync committee rewards are computed. -* The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] +- Presently only attestation and sync committee rewards are computed. +- The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] in the source. -* For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. - This is because the state _prior_ to the `start_slot` needs to be loaded from the database, and +- For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. + This is because the state *prior* to the `start_slot` needs to be loaded from the database, and loading a state on a boundary is most efficient. [block_reward_src]: https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs -### `/lighthouse/analysis/block_packing` +## `/lighthouse/analysis/block_packing` Fetch information about the block packing efficiency of blocks for a range of consecutive epochs. Two query parameters are required: -* `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. -* `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. +- `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. +- `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. ```bash curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq @@ -745,13 +751,12 @@ An excerpt of the response looks like: Caveats: -* `start_epoch` must not be `0`. -* For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and +- `start_epoch` must not be `0`. +- For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state *prior* to the `start_epoch` needs to be loaded from the database, and loading a state on a boundary is most efficient. - -### `/lighthouse/logs` +## `/lighthouse/logs` This is a Server Side Event subscription endpoint. This allows a user to read the Lighthouse logs directly from the HTTP API endpoint. This currently @@ -764,6 +769,7 @@ curl -N "http://localhost:5052/lighthouse/logs" ``` Should provide an output that emits log events as they occur: + ```json { "data": { @@ -779,7 +785,8 @@ Should provide an output that emits log events as they occur: } ``` -### `/lighthouse/nat` +## `/lighthouse/nat` + Checks if the ports are open. ```bash @@ -787,6 +794,7 @@ curl -X GET "http://localhost:5052/lighthouse/nat" | jq ``` An open port will return: + ```json { "data": true diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index 33f6f6ff7a..f2f9caf46b 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -11,7 +11,7 @@ HTTP header: Where `` is a string that can be obtained from the validator client host. Here is an example `Authorization` header: -``` +```text Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 ``` @@ -22,17 +22,16 @@ this is `~/.lighthouse/{network}/validators/api-token.txt`. Here's an example using the `cat` command to print the token to the terminal, but any text editor will suffice: -``` -$ cat api-token.txt +```bash +cat api-token.txt api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 ``` - When starting the validator client it will output a log message containing the path to the file containing the api token. -``` -Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/prater/validators/api-token.txt", listen_address: 127.0.0.1:5062 +```text +Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/holesky/validators/api-token.txt", listen_address: 127.0.0.1:5062 ``` The _path_ to the API token may also be fetched from the HTTP API itself (this endpoint is the only @@ -46,7 +45,7 @@ Response: ```json { - "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" } ``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index cf52454c2d..a36aa73708 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -2,27 +2,27 @@ ## Endpoints -HTTP Path | Description | +| HTTP Path | Description | | --- | -- | -[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. -[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. -[`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications. -[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. -[`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. -[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. -[`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator. -[`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator. -[`POST /lighthouse/validators`](#post-lighthousevalidators) | Create a new validator and mnemonic. -[`POST /lighthouse/validators/keystore`](#post-lighthousevalidatorskeystore) | Import a keystore. -[`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. -[`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. -[`GET /lighthouse/logs`](#get-lighthouselogs) | Get logs +| [`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. | +| [`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. | +| [`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications. | +| [`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. | +| [`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. | +| [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. | +| [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. | +| [`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator. | +| [`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator. | +| [`POST /lighthouse/validators`](#post-lighthousevalidators) | Create a new validator and mnemonic. | +| [`POST /lighthouse/validators/keystore`](#post-lighthousevalidatorskeystore) | Import a keystore. | +| [`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. | +| [`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. | +| [`GET /lighthouse/logs`](#get-lighthouselogs) | Get logs | -The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api-vc-auth-header.md). +The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api-vc-auth-header.md). In addition to the above endpoints Lighthouse also supports all of the [standard keymanager APIs](https://ethereum.github.io/keymanager-APIs/). - ## `GET /lighthouse/version` Returns the software version and `git` commit hash for the Lighthouse binary. @@ -37,6 +37,7 @@ Returns the software version and `git` commit hash for the Lighthouse binary. | Typical Responses | 200 | Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq @@ -44,7 +45,6 @@ curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer Example Response Body: - ```json { "data": { @@ -52,9 +52,11 @@ Example Response Body: } } ``` + > Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/API-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. > As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176`. In this case, you obtain the token from the file `API token.txt` and the command becomes: + ```bash curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176" | jq ``` @@ -75,6 +77,7 @@ Returns information regarding the health of the host machine. *Note: this endpoint is presently only available on Linux.* Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/health" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq @@ -133,6 +136,7 @@ Returns information regarding the health of the host machine. | Typical Responses | 200 | Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/ui/health" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq @@ -178,10 +182,12 @@ Returns the graffiti that will be used for the next block proposal of each valid | Typical Responses | 200 | Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/ui/graffiti" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq ``` + Example Response Body ```json @@ -219,7 +225,7 @@ Example Response Body ```json { "data": { - "CONFIG_NAME": "prater", + "CONFIG_NAME": "holesky", "PRESET_BASE": "mainnet", "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -323,7 +329,7 @@ Example Response Body ## `GET /lighthouse/auth` Fetch the filesystem path of the [authorization token](./api-vc-auth-header.md). -Unlike the other endpoints this may be called _without_ providing an authorization token. +Unlike the other endpoints this may be called *without* providing an authorization token. This API is intended to be called from the same machine as the validator client, so that the token file may be read by a local user with access rights. @@ -347,7 +353,7 @@ Example Response Body ```json { - "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" } ``` @@ -440,7 +446,6 @@ and `graffiti`. The following example updates a validator from `enabled: true` | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200, 400 | - Example Request Body ```json @@ -458,6 +463,7 @@ curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf4 -H "Content-Type: application/json" \ -d "{\"enabled\":false}" | jq ``` + ### Example Response Body ```json @@ -466,12 +472,11 @@ null A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will log: -``` +```text INFO Disabled validator voting_pubkey: 0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde INFO Modified key_cache saved successfully ``` - ## `POST /lighthouse/validators/` Create any number of new validators, all of which will share a common mnemonic @@ -510,7 +515,8 @@ Validators are generated from the mnemonic according to ] ``` -Command: +Command: + ```bash DATADIR=/var/lib/lighthouse curl -X POST http://localhost:5062/lighthouse/validators \ @@ -560,7 +566,7 @@ curl -X POST http://localhost:5062/lighthouse/validators \ `lighthouse vc` will log: -``` +```text INFO Enabled validator voting_pubkey: 0x8ffbc881fb60841a4546b4b385ec5e9b5090fd1c4395e568d98b74b94b41a912c6101113da39d43c101369eeb9b48e50, signing_method: local_keystore INFO Modified key_cache saved successfully INFO Disabled validator voting_pubkey: 0xa9fadd620dc68e9fe0d6e1a69f6c54a0271ad65ab5a509e645e45c6e60ff8f4fc538f301781193a08b55821444801502 @@ -625,6 +631,7 @@ Import a keystore into the validator client. We can use [JSON to String Converter](https://jsontostring.com/) so that the above data can be properly presented as a command. The command is as below: Command: + ```bash DATADIR=/var/lib/lighthouse curl -X POST http://localhost:5062/lighthouse/validators/keystore \ @@ -636,6 +643,7 @@ curl -X POST http://localhost:5062/lighthouse/validators/keystore \ As this is an example for demonstration, the above command will return `InvalidPassword`. However, with a keystore file and correct password, running the above command will import the keystore to the validator client. An example of a success message is shown below: ### Example Response Body + ```json { "data": { @@ -717,7 +725,7 @@ curl -X POST http://localhost:5062/lighthouse/validators/mnemonic \ `lighthouse vc` will log: -``` +```text INFO Enabled validator voting_pubkey: 0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380, signing_method: local_keystore INFO Modified key_cache saved successfully ``` @@ -759,8 +767,8 @@ Create any number of new validators, all of which will refer to a Some of the fields above may be omitted or nullified to obtain default values (e.g., `graffiti`, `request_timeout_ms`). - Command: + ```bash DATADIR=/var/lib/lighthouse curl -X POST http://localhost:5062/lighthouse/validators/web3signer \ @@ -769,21 +777,18 @@ curl -X POST http://localhost:5062/lighthouse/validators/web3signer \ -d "[{\"enable\":true,\"description\":\"validator_one\",\"graffiti\":\"Mr F was here\",\"suggested_fee_recipient\":\"0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d\",\"voting_public_key\":\"0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380\",\"builder_proposals\":true,\"url\":\"http://path-to-web3signer.com\",\"root_certificate_path\":\"/path/to/certificate.pem\",\"client_identity_path\":\"/path/to/identity.p12\",\"client_identity_password\":\"pass\",\"request_timeout_ms\":12000}]" ``` - ### Example Response Body - ```json null ``` A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will log: -``` +```text INFO Enabled validator voting_pubkey: 0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380, signing_method: remote_signer ``` - ## `GET /lighthouse/logs` Provides a subscription to receive logs as Server Side Events. Currently the diff --git a/book/src/api-vc-sig-header.md b/book/src/api-vc-sig-header.md index a1b9b104f9..468f714cfa 100644 --- a/book/src/api-vc-sig-header.md +++ b/book/src/api-vc-sig-header.md @@ -9,7 +9,7 @@ The validator client HTTP server adds the following header to all responses: Example `Signature` header: -``` +```text Signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 ``` @@ -83,7 +83,7 @@ The previous Javascript example was written using the output from the following curl -v localhost:5062/lighthouse/version -H "Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" ``` -``` +```text * Trying ::1:5062... * connect to ::1 port 5062 failed: Connection refused * Trying 127.0.0.1:5062... diff --git a/book/src/api-vc.md b/book/src/api-vc.md index a3400016ec..630a032006 100644 --- a/book/src/api-vc.md +++ b/book/src/api-vc.md @@ -19,11 +19,11 @@ A Lighthouse validator client can be configured to expose a HTTP server by suppl The following CLI flags control the HTTP server: - `--http`: enable the HTTP server (required even if the following flags are - provided). + provided). - `--http-address`: specify the listen address of the server. It is almost always unsafe to use a non-default HTTP listen address. Use this with caution. See the **Security** section below for more information. - `--http-port`: specify the listen port of the server. - `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` - header. The default is to not supply a header. + header. The default is to not supply a header. ## Security diff --git a/book/src/builders.md b/book/src/builders.md index 930d330d99..5b8e9ddb8b 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -18,30 +18,34 @@ a missed proposal and the opportunity cost of lost block rewards. The beacon node and validator client each require a new flag for lighthouse to be fully compatible with builder API servers. -``` +```bash lighthouse bn --builder https://mainnet-builder.test ``` + The `--builder` flag will cause the beacon node to simultaneously query the provided URL and the local execution engine during block production for a block payload with stubbed-out transactions. If either fails, the successful result will be used; If both succeed, the more profitable result will be used. The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for blinded blocks, you should use the following flag: -``` +```bash lighthouse vc --builder-proposals ``` + With the `--builder-proposals` flag, the validator client will ask for blinded blocks for all validators it manages. -``` +```bash lighthouse vc --prefer-builder-proposals ``` + With the `--prefer-builder-proposals` flag, the validator client will always prefer blinded blocks, regardless of the payload value, for all validators it manages. -``` +```bash lighthouse vc --builder-boost-factor ``` + With the `--builder-boost-factor` flag, a percentage multiplier is applied to the builder's payload value when choosing between a -builder payload header and payload from the paired execution node. For example, `--builder-boost-factor 50` will only use the builder payload if it is 2x more profitable than the local payload. +builder payload header and payload from the paired execution node. For example, `--builder-boost-factor 50` will only use the builder payload if it is 2x more profitable than the local payload. In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) @@ -88,7 +92,6 @@ You can also update the configured gas limit with these requests. #### `PATCH /lighthouse/validators/:voting_pubkey` - #### HTTP Specification | Property | Specification | @@ -100,12 +103,14 @@ You can also update the configured gas limit with these requests. #### Example Path -``` +```text localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde ``` #### Example Request Body + Each field is optional. + ```json { "builder_proposals": true, @@ -113,7 +118,7 @@ Each field is optional. } ``` -Command: +Command: ```bash DATADIR=/var/lib/lighthouse @@ -125,6 +130,7 @@ curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf4 "gas_limit": 30000001 }' | jq ``` + If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"` #### Example Response Body @@ -135,7 +141,7 @@ null A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will show a log which looks like: -``` +```text INFO Published validator registrations to the builder network, count: 3, service: preparation ``` @@ -147,7 +153,7 @@ Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. You can also directly configure these fields in the `validator_definitions.yml` file. -``` +```text --- - enabled: true voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" @@ -178,16 +184,16 @@ checks to try and avoid scenarios like this. By default, Lighthouse is strict with these conditions, but we encourage users to learn about and adjust them. -- `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query +* `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query any connected builders, and will use the local execution engine for payload construction. -- `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT +* `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT query any connected builders, and will use the local execution engine for payload construction. -- `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within +* `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within this number of epochs, we will NOT query any connected builders, and will use the local execution engine for payload construction. Setting this value to anything less than 2 will cause the node to NEVER query connected builders. Setting it to 2 will cause this condition to be hit if there are skips slots at the start of an epoch, right before this node is set to propose. -- `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder +* `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder API will always be used for payload construction, regardless of recent chain conditions. ## Checking your builder config @@ -196,20 +202,20 @@ You can check that your builder is configured correctly by looking for these log On start-up, the beacon node will log if a builder is configured: -``` +```text INFO Using external block builder ``` At regular intervals the validator client will log that it successfully registered its validators with the builder network: -``` +```text INFO Published validator registrations to the builder network ``` When you successfully propose a block using a builder, you will see this log on the beacon node: -``` +```text INFO Successfully published a block to the builder network ``` @@ -218,34 +224,35 @@ for `INFO` and `WARN` messages indicating why the builder was not used. Examples of messages indicating fallback to a locally produced block are: -``` +```text INFO Builder did not return a payload ``` -``` +```text WARN Builder error when requesting payload ``` -``` +```text WARN Builder returned invalid payload ``` -``` +```text INFO Builder payload ignored ``` -``` +```text INFO Chain is unhealthy, using local payload ``` In case of fallback you should see a log indicating that the locally produced payload was used in place of one from the builder: -``` +```text INFO Reconstructing a full block using a local payload ``` ## Information for block builders and relays + Block builders and relays can query beacon node events from the [Events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). An example of querying the payload attributes in the Events API is outlined in [Beacon node API - Events API](./api-bn.md#events-api) [mev-rs]: https://github.com/ralexstokes/mev-rs diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 37677c00ad..63d96874c3 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -15,20 +15,20 @@ To begin checkpoint sync you will need HTTP API access to another synced beacon checkpoint sync by providing the other beacon node's URL to `--checkpoint-sync-url`, alongside any other flags: -``` +```bash lighthouse bn --checkpoint-sync-url "http://remote-bn:5052" ... ``` Lighthouse will print a message to indicate that checkpoint sync is being used: -``` +```text INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon ``` After a short time (usually less than a minute), it will log the details of the checkpoint loaded from the remote beacon node: -``` +```text INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon ``` @@ -43,7 +43,8 @@ as soon as forwards sync completes. ### Use a community checkpoint sync endpoint The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the url for the `--checkpoint-sync-url` flag. e.g. -``` + +```bash lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` @@ -52,7 +53,7 @@ lighthouse bn --checkpoint-sync-url https://example.com/ ... If the beacon node fails to start due to a timeout from the checkpoint sync server, you can try running it again with a longer timeout by adding the flag `--checkpoint-sync-url-timeout`. -``` +```bash lighthouse bn --checkpoint-sync-url-timeout 300 --checkpoint-sync-url https://example.com/ ... ``` @@ -66,7 +67,7 @@ from the checkpoint back to genesis. The beacon node will log messages similar to the following each minute while it completes backfill sync: -``` +```text INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/sec, distance: 2020451 slots (40 weeks 0 days), service: slot_notifier ``` @@ -80,21 +81,16 @@ Once backfill is complete, a `INFO Historical block download complete` log will 1. What if I have an existing database? How can I use checkpoint sync? -The existing beacon database needs to be deleted before Lighthouse will attempt checkpoint sync. -You can do this by providing the `--purge-db` flag, or by manually deleting `/beacon`. + The existing beacon database needs to be deleted before Lighthouse will attempt checkpoint sync. + You can do this by providing the `--purge-db` flag, or by manually deleting `/beacon`. -2. Why is checkpoint sync faster? +1. Why is checkpoint sync faster? -Checkpoint sync prioritises syncing to the head of the chain quickly so that the node can perform -its duties. Additionally, it only has to perform lightweight verification of historic blocks: -it checks the hash chain integrity & proposer signature rather than computing the full state -transition. + Checkpoint sync prioritises syncing to the head of the chain quickly so that the node can perform its duties. Additionally, it only has to perform lightweight verification of historic blocks: it checks the hash chain integrity & proposer signature rather than computing the full state transition. -3. Is checkpoint sync less secure? +1. Is checkpoint sync less secure? -No, in fact it is more secure! Checkpoint sync guards against long-range attacks that -genesis sync does not. This is due to a property of Proof of Stake consensus known as [Weak -Subjectivity][weak-subj]. + No, in fact it is more secure! Checkpoint sync guards against long-range attacks that genesis sync does not. This is due to a property of Proof of Stake consensus known as [Weak Subjectivity][weak-subj]. ## Reconstructing States @@ -122,7 +118,7 @@ states: Reconstruction runs from the state lower limit to the upper limit, narrowing the window of unavailable states as it goes. It will log messages like the following to show its progress: -``` +```text INFO State reconstruction in progress remaining: 747519, slot: 466944, service: freezer_db ``` diff --git a/book/src/cli.md b/book/src/cli.md index 6540d3fc3a..f9e7df0748 100644 --- a/book/src/cli.md +++ b/book/src/cli.md @@ -4,10 +4,10 @@ The `lighthouse` binary provides all necessary Ethereum consensus client functio has two primary sub-commands: - `$ lighthouse beacon_node`: the largest and most fundamental component which connects to - the p2p network, processes messages and tracks the head of the beacon - chain. + the p2p network, processes messages and tracks the head of the beacon + chain. - `$ lighthouse validator_client`: a lightweight but important component which loads a validators private - key and signs messages using a `beacon_node` as a source-of-truth. + key and signs messages using a `beacon_node` as a source-of-truth. There are also some ancillary binaries like `lcli` and `account_manager`, but these are primarily for testing. @@ -34,11 +34,11 @@ Each binary supports the `--help` flag, this is the best source of documentation. ```bash -$ lighthouse beacon_node --help +lighthouse beacon_node --help ``` ```bash -$ lighthouse validator_client --help +lighthouse validator_client --help ``` ## Creating a new database/testnet diff --git a/book/src/contributing.md b/book/src/contributing.md index 5b0ab48e86..312acccbc0 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -8,7 +8,6 @@ [stable]: https://github.com/sigp/lighthouse/tree/stable [unstable]: https://github.com/sigp/lighthouse/tree/unstable - Lighthouse welcomes contributions. If you are interested in contributing to the Ethereum ecosystem, and you want to learn Rust, Lighthouse is a great project to work on. @@ -56,8 +55,8 @@ Please use [clippy](https://github.com/rust-lang/rust-clippy) and inconsistent code formatting: ```bash -$ cargo clippy --all -$ cargo fmt --all --check +cargo clippy --all +cargo fmt --all --check ``` ### Panics @@ -88,8 +87,9 @@ pub fn my_function(&mut self, _something &[u8]) -> Result { **General Comments** -* Prefer line (``//``) comments to block comments (``/* ... */``) -* Comments can appear on the line prior to the item or after a trailing space. +- Prefer line (``//``) comments to block comments (``/* ... */``) +- Comments can appear on the line prior to the item or after a trailing space. + ```rust // Comment for this struct struct Lighthouse {} @@ -98,8 +98,8 @@ fn make_blockchain() {} // A comment on the same line after a space **Doc Comments** -* The ``///`` is used to generate comments for Docs. -* The comments should come before attributes. +- The ``///`` is used to generate comments for Docs. +- The comments should come before attributes. ```rust /// Stores the core configuration for this Lighthouse instance. @@ -123,9 +123,9 @@ introduction and tutorial for the language). Rust has a steep learning curve, but there are many resources to help. We suggest: -* [Rust Book](https://doc.rust-lang.org/stable/book/) -* [Rust by example](https://doc.rust-lang.org/stable/rust-by-example/) -* [Learning Rust With Entirely Too Many Linked Lists](http://cglab.ca/~abeinges/blah/too-many-lists/book/) -* [Rustlings](https://github.com/rustlings/rustlings) -* [Rust Exercism](https://exercism.io/tracks/rust) -* [Learn X in Y minutes - Rust](https://learnxinyminutes.com/docs/rust/) +- [Rust Book](https://doc.rust-lang.org/stable/book/) +- [Rust by example](https://doc.rust-lang.org/stable/rust-by-example/) +- [Learning Rust With Entirely Too Many Linked Lists](http://cglab.ca/~abeinges/blah/too-many-lists/book/) +- [Rustlings](https://github.com/rustlings/rustlings) +- [Rust Exercism](https://exercism.io/tracks/rust) +- [Learn X in Y minutes - Rust](https://learnxinyminutes.com/docs/rust/) diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index 7cf7f4feb1..dfddcbc294 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -4,7 +4,6 @@ Lighthouse supports cross-compiling, allowing users to run a binary on one platform (e.g., `aarch64`) that was compiled on another platform (e.g., `x86_64`). - ## Instructions Cross-compiling requires [`Docker`](https://docs.docker.com/engine/install/), diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 1e8e134436..a81acd7794 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -53,13 +53,13 @@ To apply a downgrade you need to use the `lighthouse db migrate` command with th 5. After stopping the beacon node, run the migrate command with the `--to` parameter set to the schema version you would like to downgrade to. -``` +```bash sudo -u "$LH_USER" lighthouse db migrate --to "$VERSION" --datadir "$LH_DATADIR" --network "$NET" ``` For example if you want to downgrade to Lighthouse v4.0.1 from v4.2.0 and you followed Somer Esat's guide, you would run: -``` +```bash sudo -u lighthousebeacon lighthouse db migrate --to 16 --datadir /var/lib/lighthouse --network mainnet ``` @@ -113,7 +113,7 @@ The `schema_version` key indicates that this database is using schema version 16 Alternatively, you can check the schema version with the `lighthouse db` command. -``` +```bash sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet ``` @@ -132,25 +132,27 @@ Several conditions need to be met in order to run `lighthouse db`: The general form for a `lighthouse db` command is: -``` +```bash sudo -u "$LH_USER" lighthouse db version --datadir "$LH_DATADIR" --network "$NET" ``` If you followed Somer Esat's guide for mainnet: -``` +```bash sudo systemctl stop lighthousebeacon ``` -``` + +```bash sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet ``` If you followed the CoinCashew guide for mainnet: -``` +```bash sudo systemctl stop beacon-chain ``` -``` + +```bash lighthouse db version --network mainnet ``` @@ -178,7 +180,7 @@ Here are the steps to prune historic states: If pruning is available, Lighthouse will log: - ``` + ```text INFO Ready to prune states WARN Pruning states is irreversible WARN Re-run this command with --confirm to commit to state deletion @@ -193,10 +195,10 @@ Here are the steps to prune historic states: The `--confirm` flag ensures that you are aware the action is irreversible, and historic states will be permanently removed. Lighthouse will log: - ``` + ```text INFO Historic states pruned successfully ``` - + 4. After successfully pruning the historic states, you can restart the Lighthouse beacon node: ```bash diff --git a/book/src/developers.md b/book/src/developers.md index ab12bed5b9..244c935ac2 100644 --- a/book/src/developers.md +++ b/book/src/developers.md @@ -5,7 +5,6 @@ _Documentation for protocol developers._ This section lists Lighthouse-specific decisions that are not strictly spec'd and may be useful for other protocol developers wishing to interact with lighthouse. - ## Custom ENR Fields Lighthouse currently uses the following ENR fields: @@ -18,7 +17,6 @@ Lighthouse currently uses the following ENR fields: | `attnets` | An SSZ bitfield which indicates which of the 64 subnets the node is subscribed to for an extended period of time | | `syncnets` | An SSZ bitfield which indicates which of the sync committee subnets the node is subscribed to | - ### Lighthouse Custom Fields Lighthouse is currently using the following custom ENR fields. @@ -27,7 +25,6 @@ Lighthouse is currently using the following custom ENR fields. | `quic` | The UDP port on which the QUIC transport is listening on IPv4 | | `quic6` | The UDP port on which the QUIC transport is listening on IPv6 | - ## Custom RPC Messages The specification leaves room for implementation-specific errors. Lighthouse uses the following @@ -43,7 +40,6 @@ custom RPC error messages. | 251 | Banned | The peer has been banned and disconnected | | 252 | Banned IP | The IP the node is connected to us with has been banned | - ### Error Codes | Code | Message | Description | diff --git a/book/src/docker.md b/book/src/docker.md index 2c410877e5..16e685491e 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -30,7 +30,7 @@ If you can see the latest [Lighthouse release](https://github.com/sigp/lighthous ### Example Version Output -``` +```text Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` @@ -49,13 +49,13 @@ compatibility (see [Portability](./installation-binaries.md#portability)). To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: -``` +```bash docker pull sigp/lighthouse:latest-modern ``` Image tags follow this format: -``` +```text ${version}${arch}${stability}${modernity}${features} ``` @@ -85,7 +85,6 @@ The `features` is: * `-dev` for a development build with `minimal` preset enabled (`spec-minimal` feature). * empty for a standard build with no custom feature enabled. - Examples: * `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) diff --git a/book/src/faq.md b/book/src/faq.md index 9cc695c442..c7fdb6b32f 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -1,6 +1,7 @@ # Frequently Asked Questions ## [Beacon Node](#beacon-node-1) + - [I see a warning about "Syncing deposit contract block cache" or an error about "updating deposit contract cache", what should I do?](#bn-deposit-contract) - [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#bn-ee) - [I see beacon logs showing `Error during execution engine upcheck`, what should I do?](#bn-upcheck) @@ -16,6 +17,7 @@ - [My beacon node logs `WARN Failed to finalize deposit cache`, what should I do?](#bn-deposit-cache) ## [Validator](#validator-1) + - [Why does it take so long for a validator to be activated?](#vc-activation) - [Can I use redundancy in my staking setup?](#vc-redundancy) - [I am missing attestations. Why?](#vc-missed-attestations) @@ -27,6 +29,7 @@ - [How can I delete my validator once it is imported?](#vc-delete) ## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1) + - [I have a low peer count and it is not increasing](#net-peer) - [How do I update lighthouse?](#net-update) - [Do I need to set up any port mappings (port forwarding)?](#net-port-forwarding) @@ -38,13 +41,14 @@ - [How to know how many of my peers are connected through QUIC?](#net-quic) ## [Miscellaneous](#miscellaneous-1) + - [What should I do if I lose my slashing protection database?](#misc-slashing) - [I can't compile lighthouse](#misc-compile) - [How do I check the version of Lighthouse that is running?](#misc-version) - [Does Lighthouse have pruning function like the execution client to save disk space?](#misc-prune) - [Can I use a HDD for the freezer database and only have the hot db on SSD?](#misc-freezer) - [Can Lighthouse log in local timestamp instead of UTC?](#misc-timestamp) -- [My hard disk is full and my validator is down. What should I do? ](#misc-full) +- [My hard disk is full and my validator is down. What should I do?](#misc-full) ## Beacon Node @@ -52,13 +56,13 @@ The error can be a warning: -``` +```text Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier ``` or an error: -``` +```text ERRO Error updating deposit contract cache error: Failed to get remote head and new block ranges: EndpointError(FarBehind), retry_millis: 60000, service: deposit_contract_rpc ``` @@ -80,11 +84,13 @@ The `WARN Execution engine called failed` log is shown when the beacon node cann `error: HttpClient(url: http://127.0.0.1:8551/, kind: timeout, detail: operation timed out), service: exec` which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flags `--execution-timeout-multiplier 3` and `--disable-lock-timeouts` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: + 1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. 1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. 1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: + - Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. - The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. - Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. @@ -95,7 +101,7 @@ An example of the full error is: `ERRO Error during execution engine upcheck error: HttpClient(url: http://127.0.0.1:8551/, kind: request, detail: error trying to connect: tcp connect error: Connection refused (os error 111)), service: exec` -Connection refused means the beacon node cannot reach the execution client. This could be due to the execution client is offline or the configuration is wrong. If the execution client is offline, run the execution engine and the error will disappear. +Connection refused means the beacon node cannot reach the execution client. This could be due to the execution client is offline or the configuration is wrong. If the execution client is offline, run the execution engine and the error will disappear. If it is a configuration issue, ensure that the execution engine can be reached. The standard endpoint to connect to the execution client is `--execution-endpoint http://localhost:8551`. If the execution client is on a different host, the endpoint to connect to it will change, e.g., `--execution-endpoint http://IP_address:8551` where `IP_address` is the IP of the execution client node (you may also need additional flags to be set). If it is using another port, the endpoint link needs to be changed accordingly. Once the execution client/beacon node is configured correctly, the error will disappear. @@ -109,13 +115,12 @@ INFO Downloading historical blocks est_time: --, distance: 4524545 slo If the same log appears every minute and you do not see progress in downloading historical blocks, you can try one of the followings: - - Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the ports 9000 TCP/UDP and 9001 UDP to increase peer count. - - Restart the beacon node. - +- Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the ports 9000 TCP/UDP and 9001 UDP to increase peer count. +- Restart the beacon node. ### I proposed a block but the beacon node shows `could not publish message` with error `duplicate` as below, should I be worried? -``` +```text INFO Block from HTTP API already known` WARN Could not publish message error: Duplicate, service: libp2p ``` @@ -128,7 +133,7 @@ In short, it is nothing to worry about. The log looks like: -``` +```text WARN Head is optimistic execution_block_hash: 0x47e7555f1d4215d1ad409b1ac188b008fcb286ed8f38d3a5e8078a0af6cbd6e1, info: chain not fully verified, block and attestation production disabled until execution engine syncs, service: slot_notifier ``` @@ -138,7 +143,7 @@ It means the beacon node will follow the chain, but it will not be able to attes An example of the log is shown below: -``` +```text CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon WARN BlockProcessingFailure outcome: ValidatorPubkeyCacheLockTimeout, msg: unexpected condition in processing block. ``` @@ -149,7 +154,7 @@ A `Timeout` error suggests that the computer may be overloaded at the moment, fo An example of the full log is shown below: -``` +```text WARN BlockProcessingFailure outcome: MissingBeaconBlock(0xbdba211f8d72029554e405d8e4906690dca807d1d7b1bc8c9b88d7970f1648bc), msg: unexpected condition in processing block. ``` @@ -165,41 +170,41 @@ This warning usually comes with an http error code. Some examples are given belo 1. The log shows: -``` -WARN Error processing HTTP API request method: GET, path: /eth/v1/validator/attestation_data, status: 500 Internal Server Error, elapsed: 305.65µs -``` + ```text + WARN Error processing HTTP API request method: GET, path: /eth/v1/validator/attestation_data, status: 500 Internal Server Error, elapsed: 305.65µs + ``` -The error is `500 Internal Server Error`. This suggests that the execution client is not synced. Once the execution client is synced, the error will disappear. + The error is `500 Internal Server Error`. This suggests that the execution client is not synced. Once the execution client is synced, the error will disappear. -2. The log shows: +1. The log shows: -``` -WARN Error processing HTTP API request method: POST, path: /eth/v1/validator/duties/attester/199565, status: 503 Service Unavailable, elapsed: 96.787µs -``` + ```text + WARN Error processing HTTP API request method: POST, path: /eth/v1/validator/duties/attester/199565, status: 503 Service Unavailable, elapsed: 96.787µs + ``` -The error is `503 Service Unavailable`. This means that the beacon node is still syncing. When this happens, the validator client will log: + The error is `503 Service Unavailable`. This means that the beacon node is still syncing. When this happens, the validator client will log: -``` -ERRO Failed to download attester duties err: FailedToDownloadAttesters("Some endpoints failed, num_failed: 2 http://localhost:5052/ => Unavailable(NotSynced), http://localhost:5052/ => RequestFailed(ServerMessage(ErrorMessage { code: 503, message: \"SERVICE_UNAVAILABLE: beacon node is syncing -``` + ```text + ERRO Failed to download attester duties err: FailedToDownloadAttesters("Some endpoints failed, num_failed: 2 http://localhost:5052/ => Unavailable(NotSynced), http://localhost:5052/ => RequestFailed(ServerMessage(ErrorMessage { code: 503, message: \"SERVICE_UNAVAILABLE: beacon node is syncing + ``` -This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. + This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. ### My beacon node logs `WARN Error signalling fork choice waiter`, what should I do? An example of the full log is shown below: -``` +```text WARN Error signalling fork choice waiter slot: 6763073, error: ForkChoiceSignalOutOfOrder { current: Slot(6763074), latest: Slot(6763073) }, service: state_advance ``` This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced. - ### My beacon node logs `ERRO Aggregate attestation queue full`, what should I do? An example of the full log is shown below: -``` + +```text ERRO Aggregate attestation queue full, queue_len: 4096, msg: the system has insufficient resources for load, module: network::beacon_processor:1542 ``` @@ -207,7 +212,7 @@ This suggests that the computer resources are being overwhelmed. It could be due ### My beacon node logs `WARN Failed to finalize deposit cache`, what should I do? -This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will fix by itself. +This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will fix by itself. ## Validator @@ -312,7 +317,9 @@ However, there are some components which can be configured with redundancy. See [Redundancy](./redundancy.md) guide for more information. ### I am missing attestations. Why? + The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: + - the clock is synced - the computer has sufficient resources and is not overloaded - the internet is working well @@ -322,13 +329,12 @@ You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`): -``` +```text DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon ``` The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block. - ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. @@ -337,18 +343,17 @@ You could also check for the sync aggregate participation percentage on block ex Another possible reason for missing the head vote is due to a chain "reorg". A reorg can happen if the proposer publishes block `n` late, and the proposer of block `n+1` builds upon block `n-1` instead of `n`. This is called a "reorg". Due to the reorg, block `n` was never included in the chain. If you are assigned to attest at slot `n`, it is possible you may still attest to block `n` despite most of the network recognizing the block as being late. In this case you will miss the head reward. - ### Can I submit a voluntary exit message without running a beacon node? Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md). It is also noted that you can submit your BLS-to-execution-change message to update your withdrawal credentials from type `0x00` to `0x01` using the same link. -If you would like to still use Lighthouse to submit the message, you will need to run a beacon node and an execution client. For the beacon node, you can use checkpoint sync to quickly sync the chain under a minute. On the other hand, the execution client can be syncing and *needs not be synced*. This implies that it is possible to broadcast a voluntary exit message within a short time by quickly spinning up a node. +If you would like to still use Lighthouse to submit the message, you will need to run a beacon node and an execution client. For the beacon node, you can use checkpoint sync to quickly sync the chain under a minute. On the other hand, the execution client can be syncing and _needs not be synced_. This implies that it is possible to broadcast a voluntary exit message within a short time by quickly spinning up a node. ### Does increasing the number of validators increase the CPU and other computer resources used? -A computer with hardware specifications stated in the [Recommended System Requirements](./installation.md#recommended-system-requirements) can run hundreds validators with only marginal increase in CPU usage. +A computer with hardware specifications stated in the [Recommended System Requirements](./installation.md#recommended-system-requirements) can run hundreds validators with only marginal increase in CPU usage. ### I want to add new validators. Do I have to reimport the existing keys? @@ -360,8 +365,7 @@ Generally yes. If you do not want to stop `lighthouse vc`, you can use the [key manager API](./api-vc-endpoints.md) to import keys. - -### How can I delete my validator once it is imported? +### How can I delete my validator once it is imported? Lighthouse supports the [KeyManager API](https://ethereum.github.io/keymanager-APIs/#/Local%20Key%20Manager/deleteKeys) to delete validators and remove them from the `validator_definitions.yml` file. To do so, start the validator client with the flag `--http` and call the API. @@ -371,7 +375,7 @@ If you are looking to delete the validators in one node and import it to another ### I have a low peer count and it is not increasing -If you cannot find *ANY* peers at all, it is likely that you have incorrect +If you cannot find _ANY_ peers at all, it is likely that you have incorrect network configuration settings. Ensure that the network you wish to connect to is correct (the beacon node outputs the network it is connecting to in the initial boot-up log lines). On top of this, ensure that you are not using the @@ -385,26 +389,25 @@ expect, there are a few things to check on: 1. Ensure that port forward was correctly set up as described [here](./advanced_networking.md#nat-traversal-port-forwarding). -To check that the ports are forwarded, run the command: + To check that the ports are forwarded, run the command: - ```bash - curl http://localhost:5052/lighthouse/nat - ``` + ```bash + curl http://localhost:5052/lighthouse/nat + ``` -It should return `{"data":true}`. If it returns `{"data":false}`, you may want to double check if the port forward was correctly set up. + It should return `{"data":true}`. If it returns `{"data":false}`, you may want to double check if the port forward was correctly set up. -If the ports are open, you should have incoming peers. To check that you have incoming peers, run the command: + If the ports are open, you should have incoming peers. To check that you have incoming peers, run the command: - ```bash - curl localhost:5052/lighthouse/peers | jq '.[] | select(.peer_info.connection_direction=="Incoming")' - ``` + ```bash + curl localhost:5052/lighthouse/peers | jq '.[] | select(.peer_info.connection_direction=="Incoming")' + ``` -If you have incoming peers, it should return a lot of data containing information of peers. If the response is empty, it means that you have no incoming peers and there the ports are not open. You may want to double check if the port forward was correctly set up. + If you have incoming peers, it should return a lot of data containing information of peers. If the response is empty, it means that you have no incoming peers and there the ports are not open. You may want to double check if the port forward was correctly set up. -2. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 80. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. - -3. Ensure that you have a quality router for the internet connection. For example, if you connect the router to many devices including the node, it may be possible that the router cannot handle all routing tasks, hence struggling to keep up the number of peers. Therefore, using a quality router for the node is important to keep a healthy number of peers. +1. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 100. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. +1. Ensure that you have a quality router for the internet connection. For example, if you connect the router to many devices including the node, it may be possible that the router cannot handle all routing tasks, hence struggling to keep up the number of peers. Therefore, using a quality router for the node is important to keep a healthy number of peers. ### How do I update lighthouse? @@ -415,7 +418,7 @@ If you are updating by rebuilding from source, see [here.](./installation-source If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: ```bash -$ docker pull sigp/lighthouse:v1.0.0 +docker pull sigp/lighthouse:v1.0.0 ``` If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image) @@ -461,7 +464,7 @@ Monitoring](./validator-monitoring.md) for more information. Lighthouse has also The setting on the beacon node is the same for both cases below. In the beacon node, specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than `localhost`. You can find the `local_IP` by running the command `hostname -I | awk '{print $1}'` on the server running the beacon node. -1. If the beacon node and validator clients are on different servers *in the same network*, the setting in the validator client is as follows: +1. If the beacon node and validator clients are on different servers _in the same network_, the setting in the validator client is as follows: Use the flag `--beacon-nodes` to point to the beacon node. For example, `lighthouse vc --beacon-nodes http://local_IP:5052` where `local_IP` is the local IP address of the beacon node and `5052` is the default `http-port` of the beacon node. @@ -475,34 +478,33 @@ The setting on the beacon node is the same for both cases below. In the beacon n You can refer to [Redundancy](./redundancy.md) for more information. -2. If the beacon node and validator clients are on different servers *and different networks*, it is necessary to perform port forwarding of the SSH port (e.g., the default port 22) on the router, and also allow firewall on the SSH port. The connection can be established via port forwarding on the router. +2. If the beacon node and validator clients are on different servers _and different networks_, it is necessary to perform port forwarding of the SSH port (e.g., the default port 22) on the router, and also allow firewall on the SSH port. The connection can be established via port forwarding on the router. - - - In the validator client, use the flag `--beacon-nodes` to point to the beacon node. However, since the beacon node and the validator client are on different networks, the IP address to use is the public IP address of the beacon node, i.e., `lighthouse vc --beacon-nodes http://public_IP:5052`. You can get the public IP address of the beacon node by running the command ` dig +short myip.opendns.com @resolver1.opendns.com` on the server running the beacon node. + In the validator client, use the flag `--beacon-nodes` to point to the beacon node. However, since the beacon node and the validator client are on different networks, the IP address to use is the public IP address of the beacon node, i.e., `lighthouse vc --beacon-nodes http://public_IP:5052`. You can get the public IP address of the beacon node by running the command `dig +short myip.opendns.com @resolver1.opendns.com` on the server running the beacon node. Additionally, port forwarding of port 5052 on the router connected to the beacon node is required for the vc to connect to the bn. To do port forwarding, refer to [how to open ports](./advanced_networking.md#how-to-open-ports). - If you have firewall setup, e.g., `ufw`, you will need to allow connections to port 5052 (assuming that the default port is used). Since the beacon node HTTP/HTTPS API is public-facing (i.e., the 5052 port is now exposed to the internet due to port forwarding), we strongly recommend users to apply IP-address filtering to the BN/VC connection from malicious actors. This can be done using the command: - ``` + ```bash sudo ufw allow from vc_IP_address proto tcp to any port 5052 ``` - where `vc_IP_address` is the public IP address of the validator client. The command will only allow connections to the beacon node from the validator client IP address to prevent malicious attacks on the beacon node over the internet. + where `vc_IP_address` is the public IP address of the validator client. The command will only allow connections to the beacon node from the validator client IP address to prevent malicious attacks on the beacon node over the internet. It is also worth noting that the `--beacon-nodes` flag can also be used for redundancy of beacon nodes. For example, let's say you have a beacon node and a validator client running on the same host, and a second beacon node on another server as a backup. In this case, you can use `lighthouse vc --beacon-nodes http://localhost:5052, http://IP-address:5052` on the validator client. ### Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address? + No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used). ### How to change the TCP/UDP port 9000 that Lighthouse listens on? + Use the flag `--port ` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., `--port 9100`. Since V4.5.0, Lighthouse supports QUIC and by default will use the value of `--port` + 1 to listen via UDP (default `9001`). This can be configured by using the flag `--quic-port`. Refer to [Advanced Networking](./advanced_networking.md#nat-traversal-port-forwarding) for more information. -### Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return. +### Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return Previously, having more validators means subscribing to more subnets. Since the change, a node will now only subscribe to 2 subnets in total. This will bring about significant reductions in bandwidth for nodes with multiple validators. @@ -520,11 +522,12 @@ With `--metrics` enabled in the beacon node, you can find the number of peers co A response example is: -``` +```text # HELP libp2p_quic_peers Count of libp2p peers currently connected via QUIC # TYPE libp2p_quic_peers gauge libp2p_quic_peers 4 ``` + which shows that there are 4 peers connected via QUIC. ## Miscellaneous @@ -552,19 +555,22 @@ Specs: mainnet (true), minimal (false), gnosis (true) If you download the binary file, navigate to the location of the directory, for example, the binary file is in `/usr/local/bin`, run `/usr/local/bin/lighthouse --version`, the example of output is the same as above. Alternatively, if you have Lighthouse running, on the same computer, you can run: + ```bash curl "http://127.0.0.1:5052/eth/v1/node/version" ``` Example of output: + ```bash {"data":{"version":"Lighthouse/v4.1.0-693886b/x86_64-linux"}} ``` + which says that the version is v4.1.0. ### Does Lighthouse have pruning function like the execution client to save disk space? -Yes, Lighthouse supports [state pruning](./database-migrations.md#how-to-prune-historic-states) which can help to save disk space. +Yes, Lighthouse supports [state pruning](./database-migrations.md#how-to-prune-historic-states) which can help to save disk space. ### Can I use a HDD for the freezer database and only have the hot db on SSD? @@ -574,20 +580,6 @@ Yes, you can do so by using the flag `--freezer-dir /path/to/freezer_db` in the The reason why Lighthouse logs in UTC is due to the dependency on an upstream library that is [yet to be resolved](https://github.com/sigp/lighthouse/issues/3130). Alternatively, using the flag `disable-log-timestamp` in combination with systemd will suppress the UTC timestamps and print the logs in local timestamps. -### My hard disk is full and my validator is down. What should I do? +### My hard disk is full and my validator is down. What should I do? A quick way to get the validator back online is by removing the Lighthouse beacon node database and resync Lighthouse using checkpoint sync. A guide to do this can be found in the [Lighthouse Discord server](https://discord.com/channels/605577013327167508/605577013331361793/1019755522985050142). With some free space left, you will then be able to prune the execution client database to free up more space. - - - - - - - - - - - - - - diff --git a/book/src/graffiti.md b/book/src/graffiti.md index 302f8f9679..ba9c7d05d7 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -2,14 +2,16 @@ Lighthouse provides four options for setting validator graffiti. -### 1. Using the "--graffiti-file" flag on the validator client +## 1. Using the "--graffiti-file" flag on the validator client + Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded everytime a validator is chosen to propose a block. Usage: `lighthouse vc --graffiti-file graffiti_file.txt` The file should contain key value pairs corresponding to validator public keys and their associated graffiti. The file can also contain a `default` key for the default case. -``` + +```text default: default_graffiti public_key1: graffiti1 public_key2: graffiti2 @@ -18,7 +20,7 @@ public_key2: graffiti2 Below is an example of a graffiti file: -``` +```text default: Lighthouse 0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007: mr f was here 0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477: mr v was here @@ -26,13 +28,15 @@ default: Lighthouse Lighthouse will first search for the graffiti corresponding to the public key of the proposing validator, if there are no matches for the public key, then it uses the graffiti corresponding to the default key if present. -### 2. Setting the graffiti in the `validator_definitions.yml` +## 2. Setting the graffiti in the `validator_definitions.yml` + Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal. -You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). +You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). Below is an example of the validator_definitions.yml with validator specific graffitis: -``` + +```text --- - enabled: true voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" @@ -48,32 +52,35 @@ Below is an example of the validator_definitions.yml with validator specific gra graffiti: "somethingprofound" ``` -### 3. Using the "--graffiti" flag on the validator client +## 3. Using the "--graffiti" flag on the validator client + Users can specify a common graffiti for all their validators using the `--graffiti` flag on the validator client. Usage: `lighthouse vc --graffiti example` -### 4. Using the "--graffiti" flag on the beacon node +## 4. Using the "--graffiti" flag on the beacon node + Users can also specify a common graffiti using the `--graffiti` flag on the beacon node as a common graffiti for all validators. Usage: `lighthouse bn --graffiti fortytwo` > Note: The order of preference for loading the graffiti is as follows: +> > 1. Read from `--graffiti-file` if provided. -> 2. If `--graffiti-file` is not provided or errors, read graffiti from `validator_definitions.yml`. -> 3. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client. -> 4. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node. -> 4. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti. +> 1. If `--graffiti-file` is not provided or errors, read graffiti from `validator_definitions.yml`. +> 1. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client. +> 1. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node. +> 1. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti. -### Set Graffiti via HTTP +## Set Graffiti via HTTP Use the [Lighthouse API](api-vc-endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti -both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal +both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal without requiring a validator client restart. Refer to [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification. -#### Example Command +### Example Command ```bash DATADIR=/var/lib/lighthouse @@ -85,4 +92,4 @@ curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf4 }' | jq ``` -A `null` response indicates that the request is successful. \ No newline at end of file +A `null` response indicates that the request is successful. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index e437925a0e..b458842e08 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -1,489 +1,610 @@ # Beacon Node ``` -Sigma Prime -The primary component which connects to the Ethereum 2.0 P2P network and downloads, verifies and stores blocks. Provides -a HTTP API for querying the beacon chain and publishing messages to the network. +The primary component which connects to the Ethereum 2.0 P2P network and +downloads, verifies and stores blocks. Provides a HTTP API for querying the +beacon chain and publishing messages to the network. -USAGE: - lighthouse beacon_node [FLAGS] [OPTIONS] +Usage: lighthouse beacon_node [OPTIONS] -FLAGS: - --allow-insecure-genesis-sync Enable syncing from genesis, which is generally insecure and incompatible - with data availability checks. Checkpoint syncing is the preferred method - for syncing a node. Only use this flag when testing. DO NOT use on - mainnet! - --always-prefer-builder-payload This flag is deprecated and has no effect. - --always-prepare-payload Send payload attributes with every fork choice update. This is intended - for use by block builders, relays and developers. You should set a fee - recipient on this BN and also consider adjusting the --prepare-payload- - lookahead flag. - --builder-fallback-disable-checks This flag disables all checks related to chain health. This means the - builder API will always be used for payload construction, regardless of - recent chain conditions. - --compact-db If present, apply compaction to the database on start-up. Use with - caution. It is generally not recommended unless auto-compaction is - disabled. - --disable-backfill-rate-limiting Disable the backfill sync rate-limiting. This allow users to just sync - the entire chain as fast as possible, however it can result in resource - contention which degrades staking performance. Stakers should generally - choose to avoid this flag since backfill sync is not required for - staking. - --disable-deposit-contract-sync Explicitly disables syncing of deposit logs from the execution node. This - overrides any previous option that depends on it. Useful if you intend to - run a non-validating beacon node. - --disable-duplicate-warn-logs This flag is deprecated and has no effect. - -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP - address and port as seen by other peers on the network. This disables - this feature, fixing the ENR's IP/PORT to those specified on boot. - --disable-lock-timeouts Disable the timeouts applied to some internal locks by default. This can - lead to less spurious failures on slow hardware but is considered - experimental as it may obscure performance issues. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag - will generally increase memory usage, it should only be provided when - debugging specific memory allocation issues. - --disable-optimistic-finalized-sync Force Lighthouse to verify every execution block hash with the execution - client during finalized sync. By default block hashes will be checked in - Lighthouse and only passed to the EL if initial verification fails. - --disable-packet-filter Disables the discovery packet filter. Useful for testing in smaller - networks - --disable-proposer-reorgs Do not attempt to reorg late blocks from other validators when proposing. - --disable-quic Disables the quic transport. The node will rely solely on the TCP - transport for libp2p connections. - --disable-upnp Disables UPnP support. Setting this will prevent Lighthouse from - attempting to automatically establish external port mappings. - --dummy-eth1 If present, uses an eth1 backend that generates static dummy - data.Identical to the method used at the 2019 Canada interop. - --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this - flag to enable connection attempts to local addresses. - -e, --enr-match Sets the local ENR IP address and port to match those set for lighthouse. - Specifically, the IP address will be the value of --listen-address and - the UDP port will be --discovery-port. - --eth1 If present the node will connect to an eth1 node. This is required for - block production, you must use this flag if you wish to serve a - validator. - --eth1-purge-cache Purges the eth1 block and deposit caches - --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint - syncing. - --gui Enable the graphical user interface and all its requirements. This - enables --http and --validator-monitor-auto and enables SSE logging. - -h, --help Prints help information - --http Enable the RESTful HTTP API server. Disabled by default. - --http-enable-tls Serves the RESTful HTTP API server over TLS. This feature is currently - experimental. - --import-all-attestations Import and aggregate all attestations, regardless of validator - subscriptions. This will only import attestations from already-subscribed - subnets, use with --subscribe-all-subnets to ensure all attestations are - received for import. - --light-client-server Act as a full node supporting light clients on the p2p network - [experimental] - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed - to store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they - can be read by any user on the machine. Note that logs can often contain - sensitive information about your validator and so this flag should be - used with caution. For Windows users, the log file permissions will be - inherited from the parent folder. - --metrics Enable the Prometheus metrics HTTP server. Disabled by default. - --private Prevents sending various client identification information. - --proposer-only Sets this beacon node at be a block proposer only node. This will run the - beacon node in a minimal configuration that is sufficient for block - publishing only. This flag should be used for a beacon node being - referenced by validator client using the --proposer-node flag. This - configuration is for enabling more secure setups. - --purge-db If present, the chain database will be deleted. Use with caution. - --reconstruct-historic-states After a checkpoint sync, reconstruct historic states in the database. - This requires syncing all the way back to genesis. - --reset-payload-statuses When present, Lighthouse will forget the payload statuses of any already- - imported blocks. This can assist in the recovery from a consensus - failure caused by the execution layer. - --shutdown-after-sync Shutdown beacon node as soon as sync is completed. Backfill sync will not - be performed before shutdown. - --slasher Run a slasher alongside the beacon node. It is currently only recommended - for expert users because of the immaturity of the slasher UX and the - extra resources required. - --staking Standard option for a staking beacon node. This will enable the HTTP - server on localhost:5052 and import deposit logs from the execution node. - This is equivalent to `--http` on merge-ready networks, or `--http - --eth1` pre-merge - --subscribe-all-subnets Subscribe to all subnets regardless of validator count. This will also - advertise the beacon node as being long-lived subscribed to all subnets. - --validator-monitor-auto Enables the automatic detection and monitoring of validators connected to - the HTTP API and using the subnet subscription endpoint. This generally - has the effect of providing additional logging and metrics for locally - controlled validators. - -V, --version Prints version information - -z, --zero-ports Sets all listening TCP/UDP ports to 0, allowing the OS to choose some - arbitrary free ports. +Options: + --auto-compact-db + Enable or disable automatic compaction of the database on + finalization. [default: true] + --blob-prune-margin-epochs + The margin for blob pruning in epochs. The oldest blobs are pruned up + until data_availability_boundary - blob_prune_margin_epochs. [default: + 0] + --blobs-dir + Data directory for the blobs database. + --block-cache-size + Specifies how many blocks the database should cache in memory + [default: 5] + --boot-nodes + One or more comma-delimited base64-encoded ENR's to bootstrap the p2p + network. Multiaddr is also supported. + --builder + The URL of a service compatible with the MEV-boost API. + --builder-fallback-epochs-since-finalization + If this node is proposing a block and the chain has not finalized + within this number of epochs, it will NOT query any connected + builders, and will use the local execution engine for payload + construction. Setting this value to anything less than 2 will cause + the node to NEVER query connected builders. Setting it to 2 will cause + this condition to be hit if there are skips slots at the start of an + epoch, right before this node is set to propose. [default: 3] + --builder-fallback-skips + If this node is proposing a block and has seen this number of skip + slots on the canonical chain in a row, it will NOT query any connected + builders, and will use the local execution engine for payload + construction. [default: 3] + --builder-fallback-skips-per-epoch + If this node is proposing a block and has seen this number of skip + slots on the canonical chain in the past `SLOTS_PER_EPOCH`, it will + NOT query any connected builders, and will use the local execution + engine for payload construction. [default: 8] + --builder-header-timeout + Defines a timeout value (in milliseconds) to use when fetching a block + header from the builder API. [default: 1000] + --builder-profit-threshold + This flag is deprecated and has no effect. + --builder-user-agent + The HTTP user agent to send alongside requests to the builder URL. The + default is Lighthouse's version string. + --checkpoint-blobs + Set the checkpoint blobs to start syncing from. Must be aligned and + match --checkpoint-block. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-block + Set a checkpoint block to start syncing from. Must be aligned and + match --checkpoint-state. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-state + Set a checkpoint state to start syncing from. Must be aligned and + match --checkpoint-block. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-sync-url + Set the remote beacon node HTTP endpoint to use for checkpoint sync. + --checkpoint-sync-url-timeout + Set the timeout for checkpoint sync calls to remote beacon node HTTP + endpoint. [default: 180] + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --discovery-port + The UDP port that discovery will listen on. Defaults to `port` + --discovery-port6 + The UDP port that discovery will listen on over IPv6 if listening over + both IPv4 and IPv6. Defaults to `port6` + --enr-address
... + The IP address/ DNS address to broadcast to other peers on how to + reach this node. If a DNS address is provided, the enr-address is set + to the IP address it resolves to and does not auto-update based on + PONG responses in discovery. Set this only if you are sure other nodes + can connect to your local node on this address. This will update the + `ip4` or `ip6` ENR fields accordingly. To update both, set this flag + twice with the different values. + --enr-quic-port + The quic UDP4 port that will be set on the local ENR. Set this only if + you are sure other nodes can connect to your local node on this port + over IPv4. + --enr-quic6-port + The quic UDP6 port that will be set on the local ENR. Set this only if + you are sure other nodes can connect to your local node on this port + over IPv6. + --enr-tcp-port + The TCP4 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv4. The + --port flag is used if this is not set. + --enr-tcp6-port + The TCP6 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv6. The + --port6 flag is used if this is not set. + --enr-udp-port + The UDP4 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv4. + --enr-udp6-port + The UDP6 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv6. + --epochs-per-blob-prune + The epoch interval with which to prune blobs from Lighthouse's + database when they are older than the data availability boundary + relative to the current epoch. [default: 1] + --epochs-per-migration + The number of epochs to wait between running the migration of data + from the hot DB to the cold DB. Less frequent runs can be useful for + minimizing disk writes [default: 1] + --eth1-blocks-per-log-query + Specifies the number of blocks that a deposit log query should span. + This will reduce the size of responses from the Eth1 endpoint. + [default: 1000] + --eth1-cache-follow-distance + Specifies the distance between the Eth1 chain head and the last block + which should be imported into the cache. Setting this value lower can + help compensate for irregular Proof-of-Work block times, but setting + it too low can make the node vulnerable to re-orgs. + --execution-endpoint + Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC + connection. Uses the same endpoint to populate the deposit cache. + --execution-jwt + File path which contains the hex-encoded JWT secret for the execution + endpoint provided in the --execution-endpoint flag. + --execution-jwt-id + Used by the beacon node to communicate a unique identifier to + execution nodes during JWT authentication. It corresponds to the 'id' + field in the JWT claims object.Set to empty by default + --execution-jwt-secret-key + Hex-encoded JWT secret for the execution endpoint provided in the + --execution-endpoint flag. + --execution-jwt-version + Used by the beacon node to communicate a client version to execution + nodes during JWT authentication. It corresponds to the 'clv' field in + the JWT claims object.Set to empty by default + --execution-timeout-multiplier + Unsigned integer to multiply the default execution timeouts by. + [default: 1] + --fork-choice-before-proposal-timeout + Set the maximum number of milliseconds to wait for fork choice before + proposing a block. You can prevent waiting at all by setting the + timeout to 0, however you risk proposing atop the wrong parent block. + [default: 250] + --freezer-dir + Data directory for the freezer database. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --graffiti + Specify your custom graffiti to be included in blocks. Defaults to the + current version and commit, truncated to fit in 32 bytes. + --historic-state-cache-size + Specifies how many states from the freezer database should cache in + memory [default: 1] + --http-address
+ Set the listen address for the RESTful HTTP API server. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5052). + --http-duplicate-block-status + Status code to send when a block that is already known is POSTed to + the HTTP API. + --http-enable-beacon-processor + The beacon processor is a scheduler which provides quality-of-service + and DoS protection. When set to "true", HTTP API requests will be + queued and scheduled alongside other tasks. When set to "false", HTTP + API responses will be executed immediately. + --http-port + Set the listen TCP port for the RESTful HTTP API server. + --http-sse-capacity-multiplier + Multiplier to apply to the length of HTTP server-sent-event (SSE) + channels. Increasing this value can prevent messages from being + dropped. + --http-tls-cert + The path of the certificate to be used when serving the HTTP API + server over TLS. + --http-tls-key + The path of the private key to be used when serving the HTTP API + server over TLS. Must not be password-protected. + --inbound-rate-limiter-protocols + Configures the inbound rate limiter (requests received by this + node).Rate limit quotas per protocol can be set in the form of + :/. To set quotas for multiple + protocols, separate them by ';'. This is enabled by default, using + default quotas. To disable rate limiting use the + disable-inbound-rate-limiter flag instead. + --invalid-gossip-verified-blocks-path + If a block succeeds gossip validation whilst failing full validation, + store the block SSZ as a file at this path. This feature is only + recommended for developers. This directory is not pruned, users should + be careful to avoid filling up their disks. + --libp2p-addresses + One or more comma-delimited multiaddrs to manually connect to a libp2p + peer without an ENR. + --listen-address [
...] + The address lighthouse will listen for UDP and TCP connections. To + listen over IpV4 and IpV6 set this flag twice with the different + values. + Examples: + - --listen-address '0.0.0.0' will listen over IPv4. + - --listen-address '::' will listen over IPv6. + - --listen-address '0.0.0.0' --listen-address '::' will listen over + both IPv4 and IPv6. The order of the given addresses is not relevant. + However, multiple IPv4, or multiple IPv6 addresses will not be + accepted. [default: 0.0.0.0] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --max-skip-slots + Refuse to skip more than this many slots when processing an + attestation. This prevents nodes on minority forks from wasting our + time and disk space, but could also cause unnecessary consensus + failures, so is disabled by default. + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5054). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote + endpoint. This can be used to monitor your setup on certain services + (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote + sever which may identify and associate your validators, IP address and + other personal information. Always use a HTTPS connection and never + provide an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the + monitoring-endpoint. Default: 60s + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --network-dir + Data directory for network keys. Defaults to network/ inside the + beacon node dir. + --port + The TCP/UDP ports to listen on. There are two UDP ports. The discovery + UDP port will be set to this value and the Quic UDP port will be set + to this value + 1. The discovery port can be modified by the + --discovery-port flag and the quic port can be modified by the + --quic-port flag. If listening over both IPv4 and IPv6 the --port flag + will apply to the IPv4 address and --port6 to the IPv6 address. + [default: 9000] + --port6 + The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 + and IPv6. Defaults to 9090 when required. The Quic UDP port will be + set to this value + 1. [default: 9090] + --prepare-payload-lookahead + The time before the start of a proposal slot at which payload + attributes should be sent. Low values are useful for execution nodes + which don't improve their payload after the first call, and high + values are useful for ensuring the EL is given ample notice. Default: + 1/3 of a slot. + --progressive-balances + Deprecated. This optimisation is now the default and cannot be + disabled. + --proposer-reorg-cutoff + Maximum delay after the start of the slot at which to propose a + reorging block. Lower values can prevent failed reorgs by ensuring the + block has ample time to propagate and be processed by the network. The + default is 1/12th of a slot (1 second on mainnet) + --proposer-reorg-disallowed-offsets + Comma-separated list of integer offsets which can be used to avoid + proposing reorging blocks at certain slots. An offset of N means that + reorging proposals will not be attempted at any slot such that `slot % + SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be + avoided. Any offsets supplied with this flag will impose additional + restrictions. + --proposer-reorg-epochs-since-finalization + Maximum number of epochs since finalization at which proposer reorgs + are allowed. Default: 2 + --proposer-reorg-parent-threshold + Percentage of parent vote weight above which to attempt a proposer + reorg. Default: 160% + --proposer-reorg-threshold + Percentage of head vote weight below which to attempt a proposer + reorg. Default: 20% + --prune-blobs + Prune blobs from Lighthouse's database when they are older than the + data data availability boundary relative to the current epoch. + [default: true] + --prune-payloads + Prune execution payloads from Lighthouse's database. This saves space + but imposes load on the execution client, as payloads need to be + reconstructed and sent to syncing peers. [default: true] + --quic-port + The UDP port that quic will listen on. Defaults to `port` + 1 + --quic-port6 + The UDP port that quic will listen on over IPv6 if listening over both + IPv4 and IPv6. Defaults to `port6` + 1 + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --self-limiter-protocols + Enables the outbound rate limiter (requests made by this node).Rate + limit quotas per protocol can be set in the form of + :/. To set quotas for multiple + protocols, separate them by ';'. If the self rate limiter is enabled + and a protocol is not present in the configuration, the quotas used + for the inbound rate limiter will be used. + --shuffling-cache-size + Some HTTP API requests can be optimised by caching the shufflings at + each epoch. This flag allows the user to set the shuffling cache size + in epochs. Shufflings are dependent on validator count and setting + this value to a large number can consume a large amount of memory. + --slasher-att-cache-size + Set the maximum number of attestation roots for the slasher to cache + --slasher-backend + Set the database backend to be used by the slasher. [possible values: + lmdb, disabled] + --slasher-broadcast + Broadcast slashings found by the slasher to the rest of the network + [Enabled by default]. [default: true] + --slasher-chunk-size + Number of epochs per validator per chunk stored on disk. + --slasher-dir + Set the slasher's database directory. + --slasher-history-length + Configure how many epochs of history the slasher keeps. Immutable + after initialization. + --slasher-max-db-size + Maximum size of the MDBX database used by the slasher. + --slasher-slot-offset + Set the delay from the start of the slot at which the slasher should + ingest attestations. Only effective if the slasher-update-period is a + multiple of the slot duration. + --slasher-update-period + Configure how often the slasher runs batch processing. + --slasher-validator-chunk-size + Number of validators per chunk stored on disk. + --slots-per-restore-point + Specifies how often a freezer DB restore point should be stored. + Cannot be changed after initialization. [default: 8192 (mainnet) or 64 + (minimal)] + --state-cache-size + Specifies the size of the state cache [default: 128] + --suggested-fee-recipient + Emergency fallback fee recipient for use in case the validator client + does not have one configured. You should set this flag on the + validator client instead of (or in addition to) setting it here. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --target-peers + The target number of peers. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --trusted-peers + One or more comma-delimited trusted peer ids which always have the + highest score according to the peer scoring system. + --trusted-setup-file-override + Path to a json file containing the trusted setup params. NOTE: This + will override the trusted setup that is generated from the mainnet kzg + ceremony. Use with caution + --validator-monitor-file + As per --validator-monitor-pubkeys, but the comma-separated list is + contained within a file at the given path. + --validator-monitor-individual-tracking-threshold + Once the validator monitor reaches this number of local validators it + will stop collecting per-validator Prometheus metrics and issuing + per-validator logs. Instead, it will provide aggregate metrics and + logs. This avoids infeasibly high cardinality in the Prometheus + database and high log volume when using many validators. Defaults to + 64. + --validator-monitor-pubkeys + A comma-separated list of 0x-prefixed validator public keys. These + validators will receive special monitoring and additional logging. + --wss-checkpoint + Specify a weak subjectivity checkpoint in `block_root:epoch` format to + verify the node's sync against. The block root should be 0x-prefixed. + Note that this flag is for verification only, to perform a checkpoint + sync from a recent state use --checkpoint-sync-url. + -V, --version + Print version -OPTIONS: - --auto-compact-db - Enable or disable automatic compaction of the database on finalization. [default: true] - - --blob-prune-margin-epochs - The margin for blob pruning in epochs. The oldest blobs are pruned up until data_availability_boundary - - blob_prune_margin_epochs. [default: 0] - --blobs-dir - Data directory for the blobs database. - - --block-cache-size - Specifies how many blocks the database should cache in memory [default: 5] - - --boot-nodes - One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported. - - --builder - The URL of a service compatible with the MEV-boost API. - - --builder-fallback-epochs-since-finalization - If this node is proposing a block and the chain has not finalized within this number of epochs, it will NOT - query any connected builders, and will use the local execution engine for payload construction. Setting this - value to anything less than 2 will cause the node to NEVER query connected builders. Setting it to 2 will - cause this condition to be hit if there are skips slots at the start of an epoch, right before this node is - set to propose. [default: 3] - --builder-fallback-skips - If this node is proposing a block and has seen this number of skip slots on the canonical chain in a row, it - will NOT query any connected builders, and will use the local execution engine for payload construction. - [default: 3] - --builder-fallback-skips-per-epoch - If this node is proposing a block and has seen this number of skip slots on the canonical chain in the past - `SLOTS_PER_EPOCH`, it will NOT query any connected builders, and will use the local execution engine for - payload construction. [default: 8] - --builder-profit-threshold - This flag is deprecated and has no effect. - - --builder-user-agent - The HTTP user agent to send alongside requests to the builder URL. The default is Lighthouse's version - string. - --checkpoint-blobs - Set the checkpoint blobs to start syncing from. Must be aligned and match --checkpoint-block. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-block - Set a checkpoint block to start syncing from. Must be aligned and match --checkpoint-state. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-state - Set a checkpoint state to start syncing from. Must be aligned and match --checkpoint-block. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-sync-url - Set the remote beacon node HTTP endpoint to use for checkpoint sync. - - --checkpoint-sync-url-timeout - Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint. [default: 180] - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --discovery-port - The UDP port that discovery will listen on. Defaults to `port` - - --discovery-port6 - The UDP port that discovery will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to - `port6` - --enr-address
... - The IP address/ DNS address to broadcast to other peers on how to reach this node. If a DNS address is - provided, the enr-address is set to the IP address it resolves to and does not auto-update based on PONG - responses in discovery. Set this only if you are sure other nodes can connect to your local node on this - address. This will update the `ip4` or `ip6` ENR fields accordingly. To update both, set this flag twice - with the different values. - --enr-quic-port - The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes can connect - to your local node on this port over IPv4. - --enr-quic6-port - The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes can connect - to your local node on this port over IPv6. - --enr-tcp-port - The TCP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv4. The --port flag is used if this is not set. - --enr-tcp6-port - The TCP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv6. The --port6 flag is used if this is not set. - --enr-udp-port - The UDP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv4. - --enr-udp6-port - The UDP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv6. - --epochs-per-blob-prune - The epoch interval with which to prune blobs from Lighthouse's database when they are older than the data - availability boundary relative to the current epoch. [default: 1] - --epochs-per-migration - The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less - frequent runs can be useful for minimizing disk writes [default: 1] - --eth1-blocks-per-log-query - Specifies the number of blocks that a deposit log query should span. This will reduce the size of responses - from the Eth1 endpoint. [default: 1000] - --eth1-cache-follow-distance - Specifies the distance between the Eth1 chain head and the last block which should be imported into the - cache. Setting this value lower can help compensate for irregular Proof-of-Work block times, but setting it - too low can make the node vulnerable to re-orgs. - --execution-endpoint - Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC connection. Uses the same endpoint to - populate the deposit cache. - --execution-jwt - File path which contains the hex-encoded JWT secret for the execution endpoint provided in the --execution- - endpoint flag. - --execution-jwt-id - Used by the beacon node to communicate a unique identifier to execution nodes during JWT authentication. It - corresponds to the 'id' field in the JWT claims object.Set to empty by default - --execution-jwt-secret-key - Hex-encoded JWT secret for the execution endpoint provided in the --execution-endpoint flag. - - --execution-jwt-version - Used by the beacon node to communicate a client version to execution nodes during JWT authentication. It - corresponds to the 'clv' field in the JWT claims object.Set to empty by default - --execution-timeout-multiplier - Unsigned integer to multiply the default execution timeouts by. [default: 1] - - --fork-choice-before-proposal-timeout - Set the maximum number of milliseconds to wait for fork choice before proposing a block. You can prevent - waiting at all by setting the timeout to 0, however you risk proposing atop the wrong parent block. - [default: 250] - --freezer-dir - Data directory for the freezer database. - - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --graffiti - Specify your custom graffiti to be included in blocks. Defaults to the current version and commit, truncated - to fit in 32 bytes. - --historic-state-cache-size - Specifies how many states from the freezer database should cache in memory [default: 1] - - --http-address
- Set the listen address for the RESTful HTTP API server. - - --http-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5052). - --http-duplicate-block-status - Status code to send when a block that is already known is POSTed to the HTTP API. - - --http-enable-beacon-processor - The beacon processor is a scheduler which provides quality-of-service and DoS protection. When set to - "true", HTTP API requests will be queued and scheduled alongside other tasks. When set to "false", HTTP API - responses will be executed immediately. - --http-port - Set the listen TCP port for the RESTful HTTP API server. - - --http-sse-capacity-multiplier - Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. Increasing this value can - prevent messages from being dropped. - --http-tls-cert - The path of the certificate to be used when serving the HTTP API server over TLS. - - --http-tls-key - The path of the private key to be used when serving the HTTP API server over TLS. Must not be password- - protected. - --invalid-gossip-verified-blocks-path - If a block succeeds gossip validation whilst failing full validation, store the block SSZ as a file at this - path. This feature is only recommended for developers. This directory is not pruned, users should be careful - to avoid filling up their disks. - --libp2p-addresses - One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR. - - --listen-address
... - The address lighthouse will listen for UDP and TCP connections. To listen over IpV4 and IpV6 set this flag - twice with the different values. - Examples: - - --listen-address '0.0.0.0' will listen over IPv4. - - --listen-address '::' will listen over IPv6. - - --listen-address '0.0.0.0' --listen-address '::' will listen over both IPv4 and IPv6. The order of the - given addresses is not relevant. However, multiple IPv4, or multiple IPv6 addresses will not be accepted. - [default: 0.0.0.0] - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --max-skip-slots - Refuse to skip more than this many slots when processing an attestation. This prevents nodes on minority - forks from wasting our time and disk space, but could also cause unnecessary consensus failures, so is - disabled by default. - --metrics-address
- Set the listen address for the Prometheus metrics HTTP server. - - --metrics-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5054). - --metrics-port - Set the listen TCP port for the Prometheus metrics HTTP server. - - --monitoring-endpoint
- Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor - your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node - metrics will be sent. Note: This will send information to a remote sever which may identify and associate - your validators, IP address and other personal information. Always use a HTTPS connection and never provide - an untrusted URL. - --monitoring-endpoint-period - Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s - - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --network-dir - Data directory for network keys. Defaults to network/ inside the beacon node dir. - - --port - The TCP/UDP ports to listen on. There are two UDP ports. The discovery UDP port will be set to this value - and the Quic UDP port will be set to this value + 1. The discovery port can be modified by the --discovery- - port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 - and IPv6 the --port flag will apply to the IPv4 address and --port6 to the IPv6 address. [default: 9000] - --port6 - The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and IPv6. Defaults to 9090 when - required. The Quic UDP port will be set to this value + 1. [default: 9090] - --prepare-payload-lookahead - The time before the start of a proposal slot at which payload attributes should be sent. Low values are - useful for execution nodes which don't improve their payload after the first call, and high values are - useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. - --progressive-balances - Deprecated. This optimisation is now the default and cannot be disabled. [possible values: fast, disabled, - checked, strict] - --proposer-reorg-cutoff - Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent - failed reorgs by ensuring the block has ample time to propagate and be processed by the network. The default - is 1/12th of a slot (1 second on mainnet) - --proposer-reorg-disallowed-offsets - Comma-separated list of integer offsets which can be used to avoid proposing reorging blocks at certain - slots. An offset of N means that reorging proposals will not be attempted at any slot such that `slot % - SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be avoided. Any offsets supplied with this - flag will impose additional restrictions. - --proposer-reorg-epochs-since-finalization - Maximum number of epochs since finalization at which proposer reorgs are allowed. Default: 2 - - --proposer-reorg-parent-threshold - Percentage of parent vote weight above which to attempt a proposer reorg. Default: 160% - - --proposer-reorg-threshold - Percentage of head vote weight below which to attempt a proposer reorg. Default: 20% - - --prune-blobs - Prune blobs from Lighthouse's database when they are older than the data data availability boundary relative - to the current epoch. [default: true] - --prune-payloads - Prune execution payloads from Lighthouse's database. This saves space but imposes load on the execution - client, as payloads need to be reconstructed and sent to syncing peers. [default: true] - --quic-port - The UDP port that quic will listen on. Defaults to `port` + 1 - - --quic-port6 - The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + - 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --shuffling-cache-size - Some HTTP API requests can be optimised by caching the shufflings at each epoch. This flag allows the user - to set the shuffling cache size in epochs. Shufflings are dependent on validator count and setting this - value to a large number can consume a large amount of memory. - --slasher-att-cache-size - Set the maximum number of attestation roots for the slasher to cache - - --slasher-backend - Set the database backend to be used by the slasher. [possible values: lmdb, disabled] - - --slasher-broadcast - Broadcast slashings found by the slasher to the rest of the network [Enabled by default]. [default: true] - - --slasher-chunk-size - Number of epochs per validator per chunk stored on disk. - - --slasher-dir - Set the slasher's database directory. - - --slasher-history-length - Configure how many epochs of history the slasher keeps. Immutable after initialization. - - --slasher-max-db-size - Maximum size of the MDBX database used by the slasher. - - --slasher-slot-offset - Set the delay from the start of the slot at which the slasher should ingest attestations. Only effective if - the slasher-update-period is a multiple of the slot duration. - --slasher-update-period - Configure how often the slasher runs batch processing. - - --slasher-validator-chunk-size - Number of validators per chunk stored on disk. - - --slots-per-restore-point - Specifies how often a freezer DB restore point should be stored. Cannot be changed after initialization. - [default: 8192 (mainnet) or 64 (minimal)] - --state-cache-size - Specifies the size of the snapshot cache [default: 3] - - --suggested-fee-recipient - Emergency fallback fee recipient for use in case the validator client does not have one configured. You - should set this flag on the validator client instead of (or in addition to) setting it here. - --target-peers - The target number of peers. - - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --trusted-peers - One or more comma-delimited trusted peer ids which always have the highest score according to the peer - scoring system. - --trusted-setup-file-override - Path to a json file containing the trusted setup params. NOTE: This will override the trusted setup that is - generated from the mainnet kzg ceremony. Use with caution - --validator-monitor-file - As per --validator-monitor-pubkeys, but the comma-separated list is contained within a file at the given - path. - --validator-monitor-individual-tracking-threshold - Once the validator monitor reaches this number of local validators it will stop collecting per-validator - Prometheus metrics and issuing per-validator logs. Instead, it will provide aggregate metrics and logs. This - avoids infeasibly high cardinality in the Prometheus database and high log volume when using many - validators. Defaults to 64. - --validator-monitor-pubkeys - A comma-separated list of 0x-prefixed validator public keys. These validators will receive special - monitoring and additional logging. - --wss-checkpoint - Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify the node's sync against. The - block root should be 0x-prefixed. Note that this flag is for verification only, to perform a checkpoint sync - from a recent state use --checkpoint-sync-url. +Flags: + --allow-insecure-genesis-sync + Enable syncing from genesis, which is generally insecure and + incompatible with data availability checks. Checkpoint syncing is the + preferred method for syncing a node. Only use this flag when testing. + DO NOT use on mainnet! + --always-prefer-builder-payload + This flag is deprecated and has no effect. + --always-prepare-payload + Send payload attributes with every fork choice update. This is + intended for use by block builders, relays and developers. You should + set a fee recipient on this BN and also consider adjusting the + --prepare-payload-lookahead flag. + --builder-fallback-disable-checks + This flag disables all checks related to chain health. This means the + builder API will always be used for payload construction, regardless + of recent chain conditions. + --compact-db + If present, apply compaction to the database on start-up. Use with + caution. It is generally not recommended unless auto-compaction is + disabled. + --disable-backfill-rate-limiting + Disable the backfill sync rate-limiting. This allow users to just sync + the entire chain as fast as possible, however it can result in + resource contention which degrades staking performance. Stakers should + generally choose to avoid this flag since backfill sync is not + required for staking. + --disable-deposit-contract-sync + Explicitly disables syncing of deposit logs from the execution node. + This overrides any previous option that depends on it. Useful if you + intend to run a non-validating beacon node. + --disable-duplicate-warn-logs + This flag is deprecated and has no effect. + --disable-enr-auto-update + Discovery automatically updates the nodes local ENR with an external + IP address and port as seen by other peers on the network. This + disables this feature, fixing the ENR's IP/PORT to those specified on + boot. + --disable-inbound-rate-limiter + Disables the inbound rate limiter (requests received by this node). + --disable-lock-timeouts + Disable the timeouts applied to some internal locks by default. This + can lead to less spurious failures on slow hardware but is considered + experimental as it may obscure performance issues. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-optimistic-finalized-sync + Force Lighthouse to verify every execution block hash with the + execution client during finalized sync. By default block hashes will + be checked in Lighthouse and only passed to the EL if initial + verification fails. + --disable-packet-filter + Disables the discovery packet filter. Useful for testing in smaller + networks + --disable-proposer-reorgs + Do not attempt to reorg late blocks from other validators when + proposing. + --disable-quic + Disables the quic transport. The node will rely solely on the TCP + transport for libp2p connections. + --disable-upnp + Disables UPnP support. Setting this will prevent Lighthouse from + attempting to automatically establish external port mappings. + --dummy-eth1 + If present, uses an eth1 backend that generates static dummy + data.Identical to the method used at the 2019 Canada interop. + -e, --enr-match + Sets the local ENR IP address and port to match those set for + lighthouse. Specifically, the IP address will be the value of + --listen-address and the UDP port will be --discovery-port. + --enable-private-discovery + Lighthouse by default does not discover private IP addresses. Set this + flag to enable connection attempts to local addresses. + --eth1 + If present the node will connect to an eth1 node. This is required for + block production, you must use this flag if you wish to serve a + validator. + --eth1-purge-cache + Purges the eth1 block and deposit caches + --genesis-backfill + Attempts to download blocks all the way back to genesis when + checkpoint syncing. + --gui + Enable the graphical user interface and all its requirements. This + enables --http and --validator-monitor-auto and enables SSE logging. + -h, --help + Prints help information + --http + Enable the RESTful HTTP API server. Disabled by default. + --http-enable-tls + Serves the RESTful HTTP API server over TLS. This feature is currently + experimental. + --import-all-attestations + Import and aggregate all attestations, regardless of validator + subscriptions. This will only import attestations from + already-subscribed subnets, use with --subscribe-all-subnets to ensure + all attestations are received for import. + --light-client-server + Act as a full node supporting light clients on the p2p network + [experimental] + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --metrics + Enable the Prometheus metrics HTTP server. Disabled by default. + --private + Prevents sending various client identification information. + --proposer-only + Sets this beacon node at be a block proposer only node. This will run + the beacon node in a minimal configuration that is sufficient for + block publishing only. This flag should be used for a beacon node + being referenced by validator client using the --proposer-node flag. + This configuration is for enabling more secure setups. + --purge-db + If present, the chain database will be deleted. Use with caution. + --reconstruct-historic-states + After a checkpoint sync, reconstruct historic states in the database. + This requires syncing all the way back to genesis. + --reset-payload-statuses + When present, Lighthouse will forget the payload statuses of any + already-imported blocks. This can assist in the recovery from a + consensus failure caused by the execution layer. + --self-limiter + Enables the outbound rate limiter (requests made by this node). Use + the self-limiter-protocol flag to set per protocol configurations. If + the self rate limiter is enabled and a protocol is not present in the + configuration, the quotas used for the inbound rate limiter will be + used. + --shutdown-after-sync + Shutdown beacon node as soon as sync is completed. Backfill sync will + not be performed before shutdown. + --slasher + Run a slasher alongside the beacon node. It is currently only + recommended for expert users because of the immaturity of the slasher + UX and the extra resources required. + --staking + Standard option for a staking beacon node. This will enable the HTTP + server on localhost:5052 and import deposit logs from the execution + node. This is equivalent to `--http` on merge-ready networks, or + `--http --eth1` pre-merge + --subscribe-all-subnets + Subscribe to all subnets regardless of validator count. This will also + advertise the beacon node as being long-lived subscribed to all + subnets. + --validator-monitor-auto + Enables the automatic detection and monitoring of validators connected + to the HTTP API and using the subnet subscription endpoint. This + generally has the effect of providing additional logging and metrics + for locally controlled validators. + -z, --zero-ports + Sets all listening TCP/UDP ports to 0, allowing the OS to choose some + arbitrary free ports. ``` + diff --git a/book/src/help_general.md b/book/src/help_general.md index 551f93e2bf..42bff04d1a 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,108 +1,141 @@ # Lighthouse General Commands ``` -Sigma Prime -Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a validator client and utilities for managing -validator accounts. +Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a +validator client and utilities for managing validator accounts. -USAGE: - lighthouse [FLAGS] [OPTIONS] [SUBCOMMAND] +Usage: lighthouse [OPTIONS] [COMMAND] -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -l DEPRECATED Enables environment logging giving access to sub-protocol logs such - as discv5 and libp2p - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Commands: + account_manager + Utilities for generating and managing Ethereum 2.0 accounts. [aliases: + a, am, account, account_manager] + beacon_node + The primary component which connects to the Ethereum 2.0 P2P network + and downloads, verifies and stores blocks. Provides a HTTP API for + querying the beacon chain and publishing messages to the network. + [aliases: b, bn, beacon] + boot_node + Start a special Lighthouse process that only serves as a discv5 + boot-node. This process will *not* import blocks or perform most + typical beacon node functions. Instead, it will simply run the discv5 + service and assist nodes on the network to discover each other. This + is the recommended way to provide a network boot-node since it has a + reduced attack surface compared to a full beacon node. + database_manager + Manage a beacon node database [aliases: db] + validator_client + When connected to a beacon node, performs the duties of a staked + validator (e.g., proposing blocks and attestations). [aliases: v, vc, + validator] + validator_manager + Utilities for managing a Lighthouse validator client via the HTTP API. + [aliases: vm, validator-manager, validator_manager] + help + Print this message or the help of the given subcommand(s) -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + -V, --version + Print version - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - -SUBCOMMANDS: - account_manager Utilities for generating and managing Ethereum 2.0 accounts. [aliases: a, am, account, - account_manager] - beacon_node The primary component which connects to the Ethereum 2.0 P2P network and downloads, - verifies and stores blocks. Provides a HTTP API for querying the beacon chain and - publishing messages to the network. [aliases: b, bn, beacon] - boot_node Start a special Lighthouse process that only serves as a discv5 boot-node. This process - will *not* import blocks or perform most typical beacon node functions. Instead, it will - simply run the discv5 service and assist nodes on the network to discover each other. This - is the recommended way to provide a network boot-node since it has a reduced attack surface - compared to a full beacon node. - database_manager Manage a beacon node database [aliases: db] - help Prints this message or the help of the given subcommand(s) - validator_client When connected to a beacon node, performs the duties of a staked validator (e.g., proposing - blocks and attestations). [aliases: v, vc, validator] - validator_manager Utilities for managing a Lighthouse validator client via the HTTP API. [aliases: vm, - validator-manager, validator_manager] +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + -l + DEPRECATED Enables environment logging giving access to sub-protocol + logs such as discv5 and libp2p + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/help_vc.md b/book/src/help_vc.md index ede8e5d9b2..ccadc19907 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -1,34 +1,182 @@ # Validator Client ``` -When connected to a beacon node, performs the duties of a staked validator (e.g., proposing blocks and attestations). +When connected to a beacon node, performs the duties of a staked validator +(e.g., proposing blocks and attestations). -USAGE: - lighthouse validator_client [FLAGS] [OPTIONS] +Usage: lighthouse validator_client [OPTIONS] -FLAGS: - --builder-proposals - If this flag is set, Lighthouse will query the Beacon Node for only block headers during proposals and will - sign over headers. Useful for outsourcing execution payload construction during proposals. - --disable-auto-discover - If present, do not attempt to discover new validators in the validators-dir. Validators will need to be - manually added to the validator_definitions.yml file. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning - If present, do not configure the system allocator. Providing this flag will generally increase memory usage, - it should only be provided when debugging specific memory allocation issues. - --disable-run-on-all - DEPRECATED. Use --broadcast. By default, Lighthouse publishes attestation, sync committee subscriptions and - proposer preparation messages to all beacon nodes provided in the `--beacon-nodes flag`. This option changes - that behaviour such that these api calls only go out to the first available and synced beacon node - --disable-slashing-protection-web3signer - Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC - but is only safe if slashing protection is enabled on the remote signer and is implemented correctly. DO NOT - ENABLE THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON THE REMOTE SIGNER. YOU WILL - GET SLASHED IF YOU USE THIS FLAG WITHOUT ENABLING WEB3SIGNER'S SLASHING PROTECTION. - --distributed - Enables functionality required for running the validator in a distributed validator cluster. +Options: + --beacon-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. + Default is http://localhost:5052. + --beacon-nodes-tls-certs + Comma-separated paths to custom TLS certificates to use when + connecting to a beacon node (and/or proposer node). These certificates + must be in PEM format and are used in addition to the OS trust store. + Commas must only be used as a delimiter, and must not be part of the + certificate path. + --broadcast + Comma-separated list of beacon API topics to broadcast to all beacon + nodes. Possible values are: none, attestations, blocks, subscriptions, + sync-committee. Default (when flag is omitted) is to broadcast + subscriptions only. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-registration-timestamp-override + This flag takes a unix timestamp value that will be used to override + the timestamp used in the builder api registration + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --gas-limit + The gas limit to be used in all builder proposals for all validators + managed by this validator client. Note this will not necessarily be + used if the gas limit set here moves too far from the previous block's + gas limit. [default: 30,000,000] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --graffiti + Specify your custom graffiti to be included in blocks. + --graffiti-file + Specify a graffiti file to load validator graffitis from. + --http-address
+ Set the address for the HTTP address. The HTTP server is not encrypted + and therefore it is unsafe to publish on a public network. When this + flag is used, it additionally requires the explicit use of the + `--unencrypted-http-transport` flag to ensure the user is aware of the + risks involved. For access via the Internet, users should apply + transport-layer security like a HTTPS reverse-proxy or SSH tunnelling. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5062). + --http-port + Set the listen TCP port for the RESTful HTTP API server. + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5064). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote + endpoint. This can be used to monitor your setup on certain services + (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote + sever which may identify and associate your validators, IP address and + other personal information. Always use a HTTPS connection and never + provide an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the + monitoring-endpoint. Default: 60s + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --proposer-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. These + specify nodes that are used to send beacon block proposals. A failure + will revert back to the standard beacon nodes specified in + --beacon-nodes. + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --secrets-dir + The directory which contains the password to unlock the validator + voting keypairs. Each password should be contained in a file where the + name is the 0x-prefixed hex representation of the validators voting + public key. Defaults to ~/.lighthouse/{network}/secrets. + --suggested-fee-recipient + Once the merge has happened, this address will receive transaction + fees from blocks proposed by this validator client. If a fee recipient + is configured in the validator definitions it takes priority over this + value. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validator-registration-batch-size + Defines the number of validators per validator/register_validator + request sent to the BN. This value can be reduced to avoid timeouts + from builders. [default: 500] + --validators-dir + The directory which contains the validator keystores, deposit data for + each validator along with the common slashing protection database and + the validator_definitions.yml + --web3-signer-keep-alive-timeout + Keep-alive timeout for each web3signer connection. Set to 'null' to + never timeout [default: 20000] + --web3-signer-max-idle-connections + Maximum number of idle connections to maintain per web3signer host. + Default is unlimited. +<<<<<<< HEAD --enable-doppelganger-protection If this flag is set, Lighthouse will delay startup for three epochs and monitor for messages on the network by any of the validators managed by this client. This will result in three (possibly four) epochs worth of @@ -233,5 +381,108 @@ OPTIONS: --web3-signer-max-idle-connections Maximum number of idle connections to maintain per web3signer host. Default is unlimited. +======= +Flags: + --builder-proposals + If this flag is set, Lighthouse will query the Beacon Node for only + block headers during proposals and will sign over headers. Useful for + outsourcing execution payload construction during proposals. + --disable-auto-discover + If present, do not attempt to discover new validators in the + validators-dir. Validators will need to be manually added to the + validator_definitions.yml file. + --disable-latency-measurement-service + Disables the service that periodically attempts to measure latency to + BNs. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-run-on-all + DEPRECATED. Use --broadcast. By default, Lighthouse publishes + attestation, sync committee subscriptions and proposer preparation + messages to all beacon nodes provided in the `--beacon-nodes flag`. + This option changes that behaviour such that these api calls only go + out to the first available and synced beacon node + --disable-slashing-protection-web3signer + Disable Lighthouse's slashing protection for all web3signer keys. This + can reduce the I/O burden on the VC but is only safe if slashing + protection is enabled on the remote signer and is implemented + correctly. DO NOT ENABLE THIS FLAG UNLESS YOU ARE CERTAIN THAT + SLASHING PROTECTION IS ENABLED ON THE REMOTE SIGNER. YOU WILL GET + SLASHED IF YOU USE THIS FLAG WITHOUT ENABLING WEB3SIGNER'S SLASHING + PROTECTION. + --distributed + Enables functionality required for running the validator in a + distributed validator cluster. + --enable-doppelganger-protection + If this flag is set, Lighthouse will delay startup for three epochs + and monitor for messages on the network by any of the validators + managed by this client. This will result in three (possibly four) + epochs worth of missed attestations. If an attestation is detected + during this period, it means it is very likely that you are running a + second validator client with the same keys. This validator client will + immediately shutdown if this is detected in order to avoid potentially + committing a slashable offense. Use this flag in order to ENABLE this + functionality, without this flag Lighthouse will begin attesting + immediately. + --enable-high-validator-count-metrics + Enable per validator metrics for > 64 validators. Note: This flag is + automatically enabled for <= 64 validators. Enabling this metric for + higher validator counts will lead to higher volume of prometheus + metrics being collected. + -h, --help + Prints help information + --http + Enable the RESTful HTTP API server. Disabled by default. + --http-allow-keystore-export + If present, allow access to the DELETE /lighthouse/keystores HTTP API + method, which allows exporting keystores and passwords to HTTP API + consumers who have access to the API token. This method is useful for + exporting validators, however it should be used with caution since it + exposes private key data to authorized users. + --http-store-passwords-in-secrets-dir + If present, any validators created via the HTTP will have keystore + passwords stored in the secrets-dir rather than the validator + definitions file. + --init-slashing-protection + If present, do not require the slashing protection database to exist + before running. You SHOULD NOT use this flag unless you're certain + that a new slashing protection database is required. Usually, your + database will have been initialized when you imported your validator + keys. If you misplace your database and then run with this flag you + risk being slashed. + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --metrics + Enable the Prometheus metrics HTTP server. Disabled by default. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. + --produce-block-v3 + Enable block production via the block v3 endpoint for this validator + client. This should only be enabled when paired with a beacon node + that has this endpoint implemented. This flag will be enabled by + default in future. + --unencrypted-http-transport + This is a safety flag to ensure that the user is aware that the http + transport is unencrypted and using a custom HTTP address is unsafe. + --use-long-timeouts + If present, the validator client will use longer timeouts for requests + made to the beacon node. This flag is generally not recommended, + longer timeouts can cause missed duties when fallbacks are used. +>>>>>>> unstable ``` + diff --git a/book/src/help_vm.md b/book/src/help_vm.md index db01164a92..6f9cc405e7 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -3,96 +3,126 @@ ``` Utilities for managing a Lighthouse validator client via the HTTP API. -USAGE: - lighthouse validator_manager [FLAGS] [OPTIONS] [SUBCOMMAND] +Usage: lighthouse validator_manager [OPTIONS] [COMMAND] -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Commands: + create + Creates new validators from BIP-39 mnemonic. A JSON file will be + created which contains all the validator keystores and other validator + data. This file can then be imported to a validator client using the + "import-validators" command. Another, optional JSON file is created + which contains a list of validator deposits in the same format as the + "ethereum/staking-deposit-cli" tool. + import + Uploads validators to a validator client using the HTTP API. The + validators are defined in a JSON file which can be generated using the + "create-validators" command. + move + Uploads validators to a validator client using the HTTP API. The + validators are defined in a JSON file which can be generated using the + "create-validators" command. This command only supports validators + signing via a keystore on the local file system (i.e., not Web3Signer + validators). + help + Print this message or the help of the given subcommand(s) -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - -SUBCOMMANDS: - create Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the - validator keystores and other validator data. This file can then be imported to a validator client - using the "import-validators" command. Another, optional JSON file is created which contains a list of - validator deposits in the same format as the "ethereum/staking-deposit-cli" tool. - help Prints this message or the help of the given subcommand(s) - import Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file - which can be generated using the "create-validators" command. - move Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file - which can be generated using the "create-validators" command. This command only supports validators - signing via a keystore on the local file system (i.e., not Web3Signer validators). +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 2fa54265ab..4ddb360e48 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -1,138 +1,169 @@ # Validator Manager Create ``` -Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the validator keystores and -other validator data. This file can then be imported to a validator client using the "import-validators" command. -Another, optional JSON file is created which contains a list of validator deposits in the same format as the -"ethereum/staking-deposit-cli" tool. +Creates new validators from BIP-39 mnemonic. A JSON file will be created which +contains all the validator keystores and other validator data. This file can +then be imported to a validator client using the "import-validators" command. +Another, optional JSON file is created which contains a list of validator +deposits in the same format as the "ethereum/staking-deposit-cli" tool. -USAGE: - lighthouse validator_manager create [FLAGS] [OPTIONS] --output-path +Usage: lighthouse validator_manager create [OPTIONS] --output-path -FLAGS: - --disable-deposits When provided don't generate the deposits JSON file that is commonly used - for submitting validator deposits via a web UI. Using this flag will save - several seconds per validator if the user has an alternate strategy for - submitting deposits. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag - will generally increase memory usage, it should only be provided when - debugging specific memory allocation issues. - --force-bls-withdrawal-credentials If present, allows BLS withdrawal credentials rather than an execution - address. This is not recommended. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed - to store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can - be read by any user on the machine. Note that logs can often contain - sensitive information about your validator and so this flag should be used - with caution. For Windows users, the log file permissions will be - inherited from the parent folder. - --specify-voting-keystore-password If present, the user will be prompted to enter the voting keystore - password that will be used to encrypt the voting keystores. If this flag - is not provided, a random password will be used. It is not necessary to - keep backups of voting keystore passwords if the mnemonic is safely backed - up. - --stdin-inputs If present, read all user inputs from stdin instead of tty. - -V, --version Prints version information +Options: + --beacon-node + A HTTP(S) address of a beacon node using the beacon-API. If this value + is provided, an error will be raised if any validator key here is + already known as a validator by that beacon node. This helps prevent + the same validator being created twice and therefore slashable + conditions. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] + --count + The number of validators to create, regardless of how many already + exist + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --deposit-gwei + The GWEI value of the deposit amount. Defaults to the minimum amount + required for an active validator (MAX_EFFECTIVE_BALANCE) + --eth1-withdrawal-address + If this field is set, the given eth1 address will be used to create + the withdrawal credentials. Otherwise, it will generate withdrawal + credentials with the mnemonic-derived withdrawal public key in + EIP-2334 format. + --first-index + The first of consecutive key indexes you wish to create. [default: 0] + --gas-limit + All created validators will use this gas limit. It is recommended to + leave this as the default value by not specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --mnemonic-path + If present, the mnemonic will be read in from this file. + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --output-path + The path to a directory where the validator and (optionally) deposits + files will be created. The directory will be created if it does not + exist. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. [possible values: true, + false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --suggested-fee-recipient + All created validators will use this value for the suggested fee + recipient. Omit this flag to use the default value from the VC. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. -OPTIONS: - --beacon-node - A HTTP(S) address of a beacon node using the beacon-API. If this value is provided, an error will be raised - if any validator key here is already known as a validator by that beacon node. This helps prevent the same - validator being created twice and therefore slashable conditions. - --builder-boost-factor - Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing - between a builder payload header and payload from the local execution node. - --builder-proposals - When provided, all created validators will attempt to create blocks via builder rather than the local EL. - [possible values: true, false] - --count - The number of validators to create, regardless of how many already exist - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --deposit-gwei - The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator - (MAX_EFFECTIVE_BALANCE) - --eth1-withdrawal-address - If this field is set, the given eth1 address will be used to create the withdrawal credentials. Otherwise, - it will generate withdrawal credentials with the mnemonic-derived withdrawal public key in EIP-2334 format. - --first-index - The first of consecutive key indexes you wish to create. [default: 0] - - --gas-limit - All created validators will use this gas limit. It is recommended to leave this as the default value by not - specifying this flag. - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --mnemonic-path - If present, the mnemonic will be read in from this file. - - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --output-path - The path to a directory where the validator and (optionally) deposits files will be created. The directory - will be created if it does not exist. - --prefer-builder-proposals - If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload - value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --suggested-fee-recipient - All created validators will use this value for the suggested fee recipient. Omit this flag to use the - default value from the VC. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. +Flags: + --disable-deposits + When provided don't generate the deposits JSON file that is commonly + used for submitting validator deposits via a web UI. Using this flag + will save several seconds per validator if the user has an alternate + strategy for submitting deposits. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --force-bls-withdrawal-credentials + If present, allows BLS withdrawal credentials rather than an execution + address. This is not recommended. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --specify-voting-keystore-password + If present, the user will be prompted to enter the voting keystore + password that will be used to encrypt the voting keystores. If this + flag is not provided, a random password will be used. It is not + necessary to keep backups of voting keystore passwords if the mnemonic + is safely backed up. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. ``` + diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index e6ff351dac..799a1db82b 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -1,102 +1,126 @@ # Validator Manager Import ``` -Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be -generated using the "create-validators" command. +Uploads validators to a validator client using the HTTP API. The validators are +defined in a JSON file which can be generated using the "create-validators" +command. -USAGE: - lighthouse validator_manager import [FLAGS] [OPTIONS] --validators-file +Usage: lighthouse validator_manager import [OPTIONS] --validators-file -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --ignore-duplicates If present, ignore any validators which already exist on the VC. Without this - flag, the process will terminate without making any changes. This flag should - be used with caution, whilst it does not directly cause slashable conditions, - it might be an indicator that something is amiss. Users should also be careful - to avoid submitting duplicate deposits for validators that already exist on the - VC. - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validators-file + The path to a JSON file containing a list of validators to be imported + to the validator client. This file is usually named "validators.json". + --vc-token + The file containing a token required by the validator client. + --vc-url + A HTTP(S) address of a validator client using the keymanager-API. If + this value is not supplied then a 'dry run' will be conducted where no + changes are made to the validator client. [default: + http://localhost:5062] -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --validators-file - The path to a JSON file containing a list of validators to be imported to the validator client. This file is - usually named "validators.json". - --vc-token - The file containing a token required by the validator client. - - --vc-url - A HTTP(S) address of a validator client using the keymanager-API. If this value is not supplied then a 'dry - run' will be conducted where no changes are made to the validator client. [default: http://localhost:5062] +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --ignore-duplicates + If present, ignore any validators which already exist on the VC. + Without this flag, the process will terminate without making any + changes. This flag should be used with caution, whilst it does not + directly cause slashable conditions, it might be an indicator that + something is amiss. Users should also be careful to avoid submitting + duplicate deposits for validators that already exist on the VC. + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index fe1d4c5ae9..9b92e21bc2 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -1,119 +1,147 @@ # Validator Manager Move ``` -Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be -generated using the "create-validators" command. This command only supports validators signing via a keystore on the +Uploads validators to a validator client using the HTTP API. The validators are +defined in a JSON file which can be generated using the "create-validators" +command. This command only supports validators signing via a keystore on the local file system (i.e., not Web3Signer validators). -USAGE: - lighthouse validator_manager move [FLAGS] [OPTIONS] --dest-vc-token --dest-vc-url --src-vc-token --src-vc-url +Usage: lighthouse validator_manager move [OPTIONS] --src-vc-token --src-vc-url --dest-vc-token --dest-vc-url -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - --stdin-inputs If present, read all user inputs from stdin instead of tty. - -V, --version Prints version information +Options: + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] + --count + The number of validators to move. + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --dest-vc-token + The file containing a token required by the destination validator + client. + --dest-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This + validator client is the "destination" and will have new validators + added as they are removed from the "source" validator client. + --gas-limit + All created validators will use this gas limit. It is recommended to + leave this as the default value by not specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. [possible values: true, + false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --src-vc-token + The file containing a token required by the source validator client. + --src-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This + validator client is the "source" and contains the validators that are + to be moved. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. + --suggested-fee-recipient + All created validators will use this value for the suggested fee + recipient. Omit this flag to use the default value from the VC. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validators + The validators to be moved. Either a list of 0x-prefixed validator + pubkeys or the keyword "all". -OPTIONS: - --builder-boost-factor - Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing - between a builder payload header and payload from the local execution node. - --builder-proposals - When provided, all created validators will attempt to create blocks via builder rather than the local EL. - [possible values: true, false] - --count The number of validators to move. - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --dest-vc-token - The file containing a token required by the destination validator client. - - --dest-vc-url - A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "destination" - and will have new validators added as they are removed from the "source" validator client. - --gas-limit - All created validators will use this gas limit. It is recommended to leave this as the default value by not - specifying this flag. - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --prefer-builder-proposals - If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload - value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --src-vc-token - The file containing a token required by the source validator client. - - --src-vc-url - A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "source" and - contains the validators that are to be moved. - --suggested-fee-recipient - All created validators will use this value for the suggested fee recipient. Omit this flag to use the - default value from the VC. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --validators - The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/homebrew.md b/book/src/homebrew.md index 486de371f8..da92dcb26c 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -5,7 +5,7 @@ Lighthouse is available on Linux and macOS via the [Homebrew package manager](ht Please note that this installation method is maintained by the Homebrew community. It is not officially supported by the Lighthouse team. -### Installation +## Installation Install the latest version of the [`lighthouse`][formula] formula with: @@ -13,7 +13,7 @@ Install the latest version of the [`lighthouse`][formula] formula with: brew install lighthouse ``` -### Usage +## Usage If Homebrew is installed to your `PATH` (default), simply run: @@ -27,7 +27,7 @@ Alternatively, you can find the `lighthouse` binary at: "$(brew --prefix)/bin/lighthouse" --help ``` -### Maintenance +## Maintenance The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 30bf03e14e..580b5c19d4 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -30,16 +30,16 @@ a `x86_64` binary. 1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and select the latest release. 1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` binary. For example, to obtain the binary file for v4.0.1 (the latest version at the time of writing), a user can run the following commands in a linux terminal: + ```bash cd ~ curl -LO https://github.com/sigp/lighthouse/releases/download/v4.0.1/lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz tar -xvf lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz ``` + 1. Test the binary with `./lighthouse --version` (it should print the version). 1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. - - > Windows users will need to execute the commands in Step 2 from PowerShell. ## Portability @@ -49,10 +49,10 @@ sacrifice the ability to make use of modern CPU instructions. If you have a modern CPU then you should try running a non-portable build to get a 20-30% speed up. -* For **x86_64**, any CPU supporting the [ADX](https://en.wikipedia.org/wiki/Intel_ADX) instruction set +- For **x86_64**, any CPU supporting the [ADX](https://en.wikipedia.org/wiki/Intel_ADX) instruction set extension is compatible with the optimized build. This includes Intel Broadwell (2014) and newer, and AMD Ryzen (2017) and newer. -* For **ARMv8**, most CPUs are compatible with the optimized build, including the Cortex-A72 used by +- For **ARMv8**, most CPUs are compatible with the optimized build, including the Cortex-A72 used by the Raspberry Pi 4. ## Troubleshooting diff --git a/book/src/installation-source.md b/book/src/installation-source.md index c2f5861576..be03a189de 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -23,7 +23,7 @@ The rustup installer provides an easy way to update the Rust compiler, and works With Rust installed, follow the instructions below to install dependencies relevant to your operating system. -#### Ubuntu +### Ubuntu Install the following packages: @@ -42,7 +42,7 @@ sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-de After this, you are ready to [build Lighthouse](#build-lighthouse). -#### Fedora/RHEL/CentOS +### Fedora/RHEL/CentOS Install the following packages: @@ -52,7 +52,7 @@ yum -y install git make perl clang cmake After this, you are ready to [build Lighthouse](#build-lighthouse). -#### macOS +### macOS 1. Install the [Homebrew][] package manager. 1. Install CMake using Homebrew: @@ -61,21 +61,22 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). brew install cmake ``` - [Homebrew]: https://brew.sh/ After this, you are ready to [build Lighthouse](#build-lighthouse). -#### Windows +### Windows 1. Install [Git](https://git-scm.com/download/win). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. > Tips: > - Use PowerShell to install. In Windows, search for PowerShell and run as administrator. > - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run + ```bash Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) ``` + > - To verify that Chocolatey is ready, run `choco` and it should return the version. 1. Install Make, CMake and LLVM using Chocolatey: @@ -158,14 +159,14 @@ FEATURES=gnosis,slasher-lmdb make Commonly used features include: -* `gnosis`: support for the Gnosis Beacon Chain. -* `portable`: support for legacy hardware. -* `modern`: support for exclusively modern hardware. -* `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. -* `slasher-mdbx`: support for the MDBX slasher backend. -* `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. +- `gnosis`: support for the Gnosis Beacon Chain. +- `portable`: support for legacy hardware. +- `modern`: support for exclusively modern hardware. +- `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. +- `slasher-mdbx`: support for the MDBX slasher backend. +- `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. Not supported on Windows. -* `spec-minimal`: support for the minimal preset (useful for testing). +- `spec-minimal`: support for the minimal preset (useful for testing). Default features (e.g. `slasher-lmdb`) may be opted out of using the `--no-default-features` argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. @@ -184,9 +185,9 @@ You can customise the compiler settings used to compile Lighthouse via Lighthouse includes several profiles which can be selected via the `PROFILE` environment variable. -* `release`: default for source builds, enables most optimisations while not taking too long to +- `release`: default for source builds, enables most optimisations while not taking too long to compile. -* `maxperf`: default for binary releases, enables aggressive optimisations including full LTO. +- `maxperf`: default for binary releases, enables aggressive optimisations including full LTO. Although compiling with this profile improves some benchmarks by around 20% compared to `release`, it imposes a _significant_ cost at compile time and is only recommended if you have a fast CPU. diff --git a/book/src/installation.md b/book/src/installation.md index e8caf5c457..a0df394bd2 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -19,20 +19,17 @@ There are also community-maintained installation methods: - Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). - - ## Recommended System Requirements -Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. +Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): +- CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer +- Memory: 32 GB RAM* +- Storage: 2 TB solid state drive +- Network: 100 Mb/s download, 20 Mb/s upload broadband connection -* CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* Memory: 32 GB RAM* -* Storage: 2 TB solid state drive -* Network: 100 Mb/s download, 20 Mb/s upload broadband connection - -> *Note: 16 GB RAM is becoming rather limited due to the increased resources required. 16 GB RAM would likely result in out of memory errors in the case of a spike in computing demand (e.g., caused by a bug) or during periods of non-finality of the beacon chain. Users with 16 GB RAM also have a limited choice when it comes to selecting an execution client, which does not help with the [client diversity](https://clientdiversity.org/). We therefore recommend users to have at least 32 GB RAM for long term health of the node, while also giving users the flexibility to change client should the thought arise. +> *Note: 16 GB RAM is becoming rather limited due to the increased resources required. 16 GB RAM would likely result in out of memory errors in the case of a spike in computing demand (e.g., caused by a bug) or during periods of non-finality of the beacon chain. Users with 16 GB RAM also have a limited choice when it comes to selecting an execution client, which does not help with the [client diversity](https://clientdiversity.org/). We therefore recommend users to have at least 32 GB RAM for long term health of the node, while also giving users the flexibility to change client should the thought arise. Last update: April 2023 diff --git a/book/src/intro.md b/book/src/intro.md index ef16913d68..9892a8a49d 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -24,7 +24,6 @@ You may read this book from start to finish, or jump to some of these topics: - Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). - Query the [RESTful HTTP API](./api.md) using `curl`. - Prospective contributors can read the [Contributing](./contributing.md) section to understand how we develop and test Lighthouse. diff --git a/book/src/key-management.md b/book/src/key-management.md index b2bb7737fd..fa6e99a2aa 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -40,29 +40,32 @@ to secure BTC, ETH and many other coins. We defined some terms in the context of validator key management: - **Mnemonic**: a string of 24 words that is designed to be easy to write down - and remember. E.g., _"radar fly lottery mirror fat icon bachelor sadness - type exhaust mule six beef arrest you spirit clog mango snap fox citizen - already bird erase"_. - - Defined in BIP-39 + and remember. E.g., _"radar fly lottery mirror fat icon bachelor sadness + type exhaust mule six beef arrest you spirit clog mango snap fox citizen + already bird erase"_. + - Defined in BIP-39 - **Wallet**: a wallet is a JSON file which stores an - encrypted version of a mnemonic. - - Defined in EIP-2386 + encrypted version of a mnemonic. + - Defined in EIP-2386 - **Keystore**: typically created by wallet, it contains a single encrypted BLS - keypair. - - Defined in EIP-2335. + keypair. + - Defined in EIP-2335. - **Voting Keypair**: a BLS public and private keypair which is used for - signing blocks, attestations and other messages on regular intervals in the beacon chain. + signing blocks, attestations and other messages on regular intervals in the beacon chain. - **Withdrawal Keypair**: a BLS public and private keypair which will be - required _after_ Phase 0 to manage ETH once a validator has exited. + required _after_ Phase 0 to manage ETH once a validator has exited. ## Create a validator + There are 2 steps involved to create a validator key using Lighthouse: + 1. [Create a wallet](#step-1-create-a-wallet-and-record-the-mnemonic) 1. [Create a validator](#step-2-create-a-validator) The following example demonstrates how to create a single validator key. ### Step 1: Create a wallet and record the mnemonic + A wallet allows for generating practically unlimited validators from an easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is backed up, all validator keys can be trivially re-generated. @@ -72,40 +75,43 @@ mnemonic is encrypted with a password. It is the responsibility of the user to define a strong password. The password is only required for interacting with the wallet, it is not required for recovering keys from a mnemonic. -To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Goerli testnet named `wally` and saves it in `~/.lighthouse/goerli/wallets` with a randomly generated password saved +To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Holesky testnet named `wally` and saves it in `~/.lighthouse/holesky/wallets` with a randomly generated password saved to `./wallet.pass`: ```bash -lighthouse --network goerli account wallet create --name wally --password-file wally.pass +lighthouse --network holesky account wallet create --name wally --password-file wally.pass ``` -Using the above command, a wallet will be created in `~/.lighthouse/goerli/wallets` with the name + +Using the above command, a wallet will be created in `~/.lighthouse/holesky/wallets` with the name `wally`. It is encrypted using the password defined in the -`wally.pass` file. +`wally.pass` file. During the wallet creation process, a 24-word mnemonic will be displayed. Record the mnemonic because it allows you to recreate the files in the case of data loss. > Notes: -> - When navigating to the directory `~/.lighthouse/goerli/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. +> +> - When navigating to the directory `~/.lighthouse/holesky/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. > - The password is not `wally.pass`, it is the _content_ of the > `wally.pass` file. > - If `wally.pass` already exists, the wallet password will be set to the content > of that file. ### Step 2: Create a validator + Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a wallet to generate these keypairs. Once a wallet exists, the `lighthouse account validator create` command can be used to generate the BLS keypair and all necessary information to submit a validator deposit. With the `wally` wallet created in [Step 1](#step-1-create-a-wallet-and-record-the-mnemonic), we can create a validator with the command: ```bash -lighthouse --network goerli account validator create --wallet-name wally --wallet-password wally.pass --count 1 +lighthouse --network holesky account validator create --wallet-name wally --wallet-password wally.pass --count 1 ``` + This command will: -- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/goerli/wallets`, updating it so that it generates a new key next time. -- Create a new directory `~/.lighthouse/goerli/validators` containing: - - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. - - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit - contract for the Goerli testnet. Other networks can be set via the - `--network` parameter. -- Create a new directory `~/.lighthouse/goerli/secrets` which stores a password to the validator's voting keypair. - +- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/holesky/wallets`, updating it so that it generates a new key next time. +- Create a new directory `~/.lighthouse/holesky/validators` containing: + - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. + - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit + contract for the Goerli testnet. Other networks can be set via the + `--network` parameter. +- Create a new directory `~/.lighthouse/holesky/secrets` which stores a password to the validator's voting keypair. If you want to create another validator in the future, repeat [Step 2](#step-2-create-a-validator). The wallet keeps track of how many validators it has generated and ensures that a new validator is generated each time. The important thing is to keep the 24-word mnemonic safe so that it can be used to generate new validator keys if needed. @@ -116,16 +122,16 @@ If you want to create another validator in the future, repeat [Step 2](#step-2-c There are three important directories in Lighthouse validator key management: - `wallets/`: contains encrypted wallets which are used for hierarchical - key derivation. - - Defaults to `~/.lighthouse/{network}/wallets` + key derivation. + - Defaults to `~/.lighthouse/{network}/wallets` - `validators/`: contains a directory for each validator containing - encrypted keystores and other validator-specific data. - - Defaults to `~/.lighthouse/{network}/validators` + encrypted keystores and other validator-specific data. + - Defaults to `~/.lighthouse/{network}/validators` - `secrets/`: since the validator signing keys are "hot", the validator process - needs access to the passwords to decrypt the keystores in the validators - directory. These passwords are stored here. - - Defaults to `~/.lighthouse/{network}/secrets` - + needs access to the passwords to decrypt the keystores in the validators + directory. These passwords are stored here. + - Defaults to `~/.lighthouse/{network}/secrets` + where `{network}` is the name of the network passed in the `--network` parameter. When the validator client boots, it searches the `validators/` for directories diff --git a/book/src/key-recovery.md b/book/src/key-recovery.md index a996e95cbc..a0593ddd94 100644 --- a/book/src/key-recovery.md +++ b/book/src/key-recovery.md @@ -1,6 +1,5 @@ # Key Recovery - Generally, validator keystore files are generated alongside a *mnemonic*. If the keystore and/or the keystore password are lost, this mnemonic can regenerate a new, equivalent keystore with a new password. @@ -8,9 +7,9 @@ regenerate a new, equivalent keystore with a new password. There are two ways to recover keys using the `lighthouse` CLI: - `lighthouse account validator recover`: recover one or more EIP-2335 keystores from a mnemonic. - These keys can be used directly in a validator client. + These keys can be used directly in a validator client. - `lighthouse account wallet recover`: recover an EIP-2386 wallet from a - mnemonic. + mnemonic. ## ⚠️ Warning @@ -18,10 +17,10 @@ There are two ways to recover keys using the `lighthouse` CLI: resort.** Key recovery entails significant risks: - Exposing your mnemonic to a computer at any time puts it at risk of being - compromised. Your mnemonic is **not encrypted** and is a target for theft. + compromised. Your mnemonic is **not encrypted** and is a target for theft. - It's completely possible to regenerate a validator keypairs that is already active - on some other validator client. Running the same keypairs on two different - validator clients is very likely to result in slashing. + on some other validator client. Running the same keypairs on two different + validator clients is very likely to result in slashing. ## Recover EIP-2335 validator keystores @@ -32,7 +31,6 @@ index on the same mnemonic always results in the same validator keypair being generated (see [EIP-2334](https://eips.ethereum.org/EIPS/eip-2334) for more detail). - Using the `lighthouse account validator recover` command you can generate the keystores that correspond to one or more indices in the mnemonic: @@ -41,7 +39,6 @@ keystores that correspond to one or more indices in the mnemonic: - `lighthouse account validator recover --first-index 1`: recover only index `1`. - `lighthouse account validator recover --first-index 1 --count 2`: recover indices `1, 2`. - For each of the indices recovered in the above commands, a directory will be created in the `--validator-dir` location (default `~/.lighthouse/{network}/validators`) which contains all the information necessary to run a validator using the diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 81098715f3..106a5e8947 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -23,7 +23,7 @@ information: - [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup - and configure Siren. + and configure Siren. - [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. - [Usage](./ui-usage.md) - Details various Siren components. - [FAQs](./ui-faqs.md) - Frequently Asked Questions. diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 942ca09b8e..c53be97ccf 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -1,7 +1,6 @@ # Become an Ethereum Consensus Mainnet Validator [launchpad]: https://launchpad.ethereum.org/ -[lh-book]: https://lighthouse-book.sigmaprime.io/ [advanced-datadir]: ./advanced-datadir.md [license]: https://github.com/sigp/lighthouse/blob/stable/LICENSE [slashing]: ./slashing-protection.md @@ -18,7 +17,6 @@ Being educated is critical to a validator's success. Before submitting your main - Reading through this documentation, especially the [Slashing Protection][slashing] section. - Performing a web search and doing your own research. - > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages > occurred through the use of Lighthouse. We have an experienced internal security team and have @@ -27,7 +25,6 @@ Being educated is critical to a validator's success. Before submitting your main > due to the actions of other actors on the consensus layer or software bugs. See the > [software license][license] for more detail on liability. - ## Become a validator There are five primary steps to become a validator: @@ -39,23 +36,24 @@ There are five primary steps to become a validator: 1. [Submit deposit](#step-5-submit-deposit-32eth-per-validator) > **Important note**: The guide below contains both mainnet and testnet instructions. We highly recommend *all* users to **run a testnet validator** prior to staking mainnet ETH. By far, the best technical learning experience is to run a testnet validator. You can get hands-on experience with all the tools and it's a great way to test your staking -hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". +hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". > **Never use real ETH to join a testnet!** Testnet such as the Holesky testnet uses Holesky ETH which is worthless. This allows experimentation without real-world costs. ### Step 1. Create validator keys The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases) for creating validator keys. Download and run the `staking-deposit-cli` with the command: + ```bash ./deposit new-mnemonic ``` + and follow the instructions to generate the keys. When prompted for a network, select `mainnet` if you want to run a mainnet validator, or select `holesky` if you want to run a Holesky testnet validator. A new mnemonic will be generated in the process. > **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. - > Lighthouse also supports creating validator keys, see [Key management](./key-management.md) for more info. ### Step 2. Start an execution client and Lighthouse beacon node @@ -64,15 +62,17 @@ Start an execution client and Lighthouse beacon node according to the [Run a Nod ### Step 3. Import validator keys to Lighthouse -In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that +In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that this directory is `$HOME/staking-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: Mainnet: + ```bash lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` Holesky testnet: + ```bash lighthouse --network holesky account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` @@ -85,7 +85,6 @@ lighthouse --network holesky account validator import --directory $HOME/staking- > Docker users should use the command from the [Docker](#docker-users) documentation. - The user will be prompted for a password for each keystore discovered: ``` @@ -122,11 +121,10 @@ WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR Y Once you see the above message, you have successfully imported the validator keys. You can now proceed to the next step to start the validator client. - ### Step 4. Start Lighthouse validator client After the keys are imported, the user can start performing their validator duties -by starting the Lighthouse validator client `lighthouse vc`: +by starting the Lighthouse validator client `lighthouse vc`: Mainnet: @@ -135,11 +133,12 @@ lighthouse vc --network mainnet --suggested-fee-recipient YourFeeRecipientAddres ``` Holesky testnet: + ```bash lighthouse vc --network holesky --suggested-fee-recipient YourFeeRecipientAddress ``` -The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. +The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. When `lighthouse vc` starts, check that the validator public key appears as a `voting_pubkey` as shown below: @@ -156,9 +155,9 @@ by the protocol. After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Holesky staking launchpad](https://holesky.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending 32ETH per validator to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. -> **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. +> **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. -Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. +Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. Once your validator is activated, the validator client will start to publish attestations each epoch: @@ -172,10 +171,11 @@ If you propose a block, the log will look like: Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block ``` -Congratulations! Your validator is now performing its duties and you will receive rewards for securing the Ethereum network. +Congratulations! Your validator is now performing its duties and you will receive rewards for securing the Ethereum network. ### What is next? -After the validator is running and performing its duties, it is important to keep the validator online to continue accumulating rewards. However, there could be problems with the computer, the internet or other factors that cause the validator to be offline. For this, it is best to subscribe to notifications, e.g., via [beaconcha.in](https://beaconcha.in/) which will send notifications about missed attestations and/or proposals. You will be notified about the validator's offline status and will be able to react promptly. + +After the validator is running and performing its duties, it is important to keep the validator online to continue accumulating rewards. However, there could be problems with the computer, the internet or other factors that cause the validator to be offline. For this, it is best to subscribe to notifications, e.g., via [beaconcha.in](https://beaconcha.in/) which will send notifications about missed attestations and/or proposals. You will be notified about the validator's offline status and will be able to react promptly. The next important thing is to stay up to date with updates to Lighthouse and the execution client. Updates are released from time to time, typically once or twice a month. For Lighthouse updates, you can subscribe to notifications on [Github](https://github.com/sigp/lighthouse) by clicking on `Watch`. If you only want to receive notification on new releases, select `Custom`, then `Releases`. You could also join [Lighthouse Discord](https://discord.gg/cyAszAh) where we will make an announcement when there is a new release. @@ -202,9 +202,10 @@ Here we use two `-v` volumes to attach: - `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. - The `validator_keys` directory in the present working directory of the host - to the `/root/validator_keys` directory of the Docker container. + to the `/root/validator_keys` directory of the Docker container. ### Start Lighthouse beacon node and validator client + Those using Docker images can start the processes with: ```bash @@ -222,8 +223,5 @@ $ docker run \ lighthouse --network mainnet vc ``` - If you get stuck you can always reach out on our [Discord][discord] or [create an issue](https://github.com/sigp/lighthouse/issues/new). - - diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index a5769162b0..6de05cff2a 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -16,7 +16,7 @@ the merge: be made to your `lighthouse vc` configuration, and are covered on the [Suggested fee recipient](./suggested-fee-recipient.md) page. -Additionally, you _must_ update Lighthouse to v3.0.0 (or later), and must update your execution +Additionally, you *must* update Lighthouse to v3.0.0 (or later), and must update your execution engine to a merge-ready version. ## When? @@ -27,7 +27,7 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln | Network | Bellatrix | The Merge | Remark | |---------|-------------------------------|-------------------------------| -----------| -| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | +| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | | Sepolia | 20th June 2022 | 6th July 2022 | | | Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater`| | Mainnet | 6th September 2022| 15th September 2022| | @@ -55,7 +55,7 @@ has the authority to control the execution engine. > needing to pass a jwt secret file. The execution engine connection must be **exclusive**, i.e. you must have one execution node -per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please +per beacon node. The reason for this is that the beacon node *controls* the execution node. Please see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not supported. @@ -173,7 +173,7 @@ client to be able to connect to the beacon node. ### Can I use `http://localhost:8545` for the execution endpoint? Most execution nodes use port `8545` for the Ethereum JSON-RPC API. Unless custom configuration is -used, an execution node _will not_ provide the necessary engine API on port `8545`. You should +used, an execution node *will not* provide the necessary engine API on port `8545`. You should not attempt to use `http://localhost:8545` as your engine URL and should instead use `http://localhost:8551`. @@ -209,4 +209,3 @@ guidance for specific setups. - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). - [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/archived-guides/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) - [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) -- [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/partial-withdrawal.md b/book/src/partial-withdrawal.md index e5a0a97c6c..26003e1f2f 100644 --- a/book/src/partial-withdrawal.md +++ b/book/src/partial-withdrawal.md @@ -2,12 +2,13 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023: - - if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. - - if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. +- if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. +- if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. + +## FAQ -### FAQ 1. How to know if I have the withdrawal credentials type `0x00` or `0x01`? - + Refer [here](./voluntary-exit.md#1-how-to-know-if-i-have-the-withdrawal-credentials-type-0x01). 2. My validator has withdrawal credentials type `0x00`, is there a deadline to update my withdrawal credentials? @@ -16,8 +17,8 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`? - No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01). + No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01). Figure below summarizes partial withdrawals. - ![partial](./imgs/partial-withdrawal.png) \ No newline at end of file + ![partial](./imgs/partial-withdrawal.png) diff --git a/book/src/pi.md b/book/src/pi.md index 2fea91ad17..b91ecab548 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -4,22 +4,21 @@ Tested on: - - Raspberry Pi 4 Model B (4GB) - - `Ubuntu 20.04 LTS (GNU/Linux 5.4.0-1011-raspi aarch64)` - +- Raspberry Pi 4 Model B (4GB) +- `Ubuntu 20.04 LTS (GNU/Linux 5.4.0-1011-raspi aarch64)` *Note: [Lighthouse supports cross-compiling](./cross-compiling.md) to target a Raspberry Pi (`aarch64`). Compiling on a faster machine (i.e., `x86_64` desktop) may be convenient.* -### 1. Install Ubuntu +## 1. Install Ubuntu Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). **A 64-bit version is required** A graphical environment is not required in order to use Lighthouse. Only the terminal and an Internet connection are necessary. -### 2. Install Packages +## 2. Install Packages Install the Ubuntu dependencies: @@ -32,7 +31,7 @@ sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-de > - If there are difficulties, try updating the package manager with `sudo apt > update`. -### 3. Install Rust +## 3. Install Rust Install Rust as per [rustup](https://rustup.rs/): @@ -47,7 +46,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh > be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. > - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. -### 4. Install Lighthouse +## 4. Install Lighthouse ```bash git clone https://github.com/sigp/lighthouse.git diff --git a/book/src/redundancy.md b/book/src/redundancy.md index bd1976f950..ee685a17cf 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -1,7 +1,5 @@ # Redundancy -[subscribe-api]: https://ethereum.github.io/beacon-APIs/#/Validator/prepareBeaconCommitteeSubnet - There are three places in Lighthouse where redundancy is notable: 1. ✅ GOOD: Using a redundant beacon node in `lighthouse vc --beacon-nodes` @@ -38,9 +36,9 @@ duties as long as *at least one* of the beacon nodes is available. There are a few interesting properties about the list of `--beacon-nodes`: - *Ordering matters*: the validator client prefers a beacon node that is - earlier in the list. + earlier in the list. - *Synced is preferred*: the validator client prefers a synced beacon node over - one that is still syncing. + one that is still syncing. - *Failure is sticky*: if a beacon node fails, it will be flagged as offline and won't be retried again for the rest of the slot (12 seconds). This helps prevent the impact of time-outs and other lengthy errors. @@ -49,7 +47,6 @@ There are a few interesting properties about the list of `--beacon-nodes`: > provided (if it is desired). It will only be used as default if no `--beacon-nodes` flag is > provided at all. - ### Configuring a redundant Beacon Node In our previous example, we listed `http://192.168.1.1:5052` as a redundant @@ -58,8 +55,10 @@ following flags: - `--http`: starts the HTTP API server. - `--http-address local_IP`: where `local_IP` is the private IP address of the computer running the beacon node. This is only required if your backup beacon node is on a different host. + > Note: You could also use `--http-address 0.0.0.0`, but this allows *any* external IP address to access the HTTP server. As such, a firewall should be configured to deny unauthorized access to port `5052`. - - `--execution-endpoint`: see [Merge Migration](./merge-migration.md). + +- `--execution-endpoint`: see [Merge Migration](./merge-migration.md). - `--execution-jwt`: see [Merge Migration](./merge-migration.md). For example one could use the following command to provide a backup beacon node: @@ -107,7 +106,7 @@ The default is `--broadcast subscriptions`. To also broadcast blocks for example ## Redundant execution nodes Lighthouse previously supported redundant execution nodes for fetching data from the deposit -contract. On merged networks _this is no longer supported_. Each Lighthouse beacon node must be +contract. On merged networks *this is no longer supported*. Each Lighthouse beacon node must be configured in a 1:1 relationship with an execution node. For more information on the rationale behind this decision please see the [Merge Migration](./merge-migration.md) documentation. diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index ab42c0c10a..6c1f23d8e8 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -8,9 +8,8 @@ You should be finished with one [Installation](./installation.md) method of your 1. Set up a [beacon node](#step-3-set-up-a-beacon-node-using-lighthouse); 1. [Check logs for sync status](#step-4-check-logs-for-sync-status); - - ## Step 1: Create a JWT secret file + A JWT secret file is used to secure the communication between the execution client and the consensus client. In this step, we will create a JWT secret file which will be used in later steps. ```bash @@ -21,18 +20,15 @@ openssl rand -hex 32 | tr -d "\n" | sudo tee /secrets/jwt.hex ## Step 2: Set up an execution node The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions present in blocks. The execution engine connection must be *exclusive*, i.e. you must have one execution node -per beacon node. The reason for this is that the beacon node _controls_ the execution node. Select an execution client from the list below and run it: - +per beacon node. The reason for this is that the beacon node *controls* the execution node. Select an execution client from the list below and run it: - [Nethermind](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) - [Erigon](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) - [Geth](https://geth.ethereum.org/docs/getting-started/consensus-clients) - > Note: Each execution engine has its own flags for configuring the engine API and JWT secret to connect to a beacon node. Please consult the relevant page of your execution engine as above for the required flags. - Once the execution client is up, just let it continue running. The execution client will start syncing when it connects to a beacon node. Depending on the execution client and computer hardware specifications, syncing can take from a few hours to a few days. You can safely proceed to Step 3 to set up a beacon node while the execution client is still syncing. ## Step 3: Set up a beacon node using Lighthouse @@ -50,9 +46,10 @@ lighthouse bn \ --http ``` -> Note: If you download the binary file, you need to navigate to the directory of the binary file to run the above command. +> Note: If you download the binary file, you need to navigate to the directory of the binary file to run the above command. + +Notable flags: -Notable flags: - `--network` flag, which selects a network: - `lighthouse` (no flag): Mainnet. - `lighthouse --network mainnet`: Mainnet. @@ -71,14 +68,11 @@ provide a `--network` flag instead of relying on the default. - `--checkpoint-sync-url`: Lighthouse supports fast sync from a recent finalized checkpoint. Checkpoint sync is *optional*; however, we **highly recommend** it since it is substantially faster than syncing from genesis while still providing the same functionality. The checkpoint sync is done using [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) provided by the Ethereum community. For example, in the above command, we use the URL for Sigma Prime's checkpoint sync server for mainnet `https://mainnet.checkpoint.sigp.io`. - `--http`: to expose an HTTP server of the beacon chain. The default listening address is `http://localhost:5052`. The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. - - If you intend to run the beacon node without running the validator client (e.g., for non-staking purposes such as supporting the network), you can modify the above command so that the beacon node is configured for non-staking purposes: - ### Non-staking -``` +``` lighthouse bn \ --network mainnet \ --execution-endpoint http://localhost:8551 \ @@ -89,16 +83,14 @@ lighthouse bn \ Since we are not staking, we can use the `--disable-deposit-contract-sync` flag to disable syncing of deposit logs from the execution node. - - Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. - - ## Step 4: Check logs for sync status -Several logs help you identify if Lighthouse is running correctly. + +Several logs help you identify if Lighthouse is running correctly. ### Logs - Checkpoint sync + If you run Lighthouse with the flag `--checkpoint-sync-url`, Lighthouse will print a message to indicate that checkpoint sync is being used: ``` @@ -147,11 +139,11 @@ as `verified` indicating that they have been processed successfully by the execu INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 ``` -Once you see the above message - congratulations! This means that your node is synced and you have contributed to the decentralization and security of the Ethereum network. +Once you see the above message - congratulations! This means that your node is synced and you have contributed to the decentralization and security of the Ethereum network. ## Further readings -Several other resources are the next logical step to explore after running your beacon node: +Several other resources are the next logical step to explore after running your beacon node: - If you intend to run a validator, proceed to [become a validator](./mainnet-validator.md); - Explore how to [manage your keys](./key-management.md); diff --git a/book/src/setup.md b/book/src/setup.md index c678b4387a..d3da68f97c 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,6 +9,7 @@ particularly useful for development but still a good way to ensure you have the base dependencies. The additional requirements for developers are: + - [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil). This is used to simulate the execution chain during tests. You'll get failures during tests if you don't have `anvil` available on your `PATH`. @@ -17,10 +18,11 @@ The additional requirements for developers are: - [`java 17 runtime`](https://openjdk.java.net/projects/jdk/). 17 is the minimum, used by web3signer_tests. - [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also known as - `libpq-devel` on some systems. + `libpq-devel` on some systems. - [`docker`](https://www.docker.com/). Some tests need docker installed and **running**. ## Using `make` + Commands to run the test suite are available via the `Makefile` in the project root for the benefit of CI/CD. We list some of these commands below so you can run them locally and avoid CI failures: @@ -31,7 +33,7 @@ you can run them locally and avoid CI failures: - `$ make test-ef`: (medium) runs the Ethereum Foundation test vectors. - `$ make test-full`: (slow) runs the full test suite (including all previous commands). This is approximately everything - that is required to pass CI. + that is required to pass CI. _The lighthouse test suite is quite extensive, running the whole suite may take 30+ minutes._ @@ -80,6 +82,7 @@ test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fini Alternatively, since `lighthouse` is a cargo workspace you can use `-p eth2_ssz` where `eth2_ssz` is the package name as defined `/consensus/ssz/Cargo.toml` + ```bash $ head -2 consensus/ssz/Cargo.toml [package] @@ -120,13 +123,14 @@ test src/lib.rs - (line 10) ... ok test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.15s$ cargo test -p eth2_ssz ``` -#### test_logger +### test_logger The test_logger, located in `/common/logging/` can be used to create a `Logger` that by default returns a NullLogger. But if `--features 'logging/test_logger'` is passed while testing the logs are displayed. This can be very helpful while debugging tests. Example: + ``` $ cargo test -p beacon_chain validator_pubkey_cache::test::basic_operation --features 'logging/test_logger' Finished test [unoptimized + debuginfo] target(s) in 0.20s diff --git a/book/src/slasher.md b/book/src/slasher.md index 79a2d1f8eb..5098fe6eda 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -8,6 +8,7 @@ extra income for your validators. However it is currently only recommended for e of the immaturity of the slasher UX and the extra resources required. ## Minimum System Requirements + * Quad-core CPU * 16 GB RAM * 256 GB solid state storage (in addition to the space requirement for the beacon node DB) @@ -47,8 +48,8 @@ directory. It is possible to use one of several database backends with the slasher: -- LMDB (default) -- MDBX +* LMDB (default) +* MDBX The advantage of MDBX is that it performs compaction, resulting in less disk usage over time. The disadvantage is that upstream MDBX is unstable, so Lighthouse is pinned to a specific version. diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index 38348d2094..88e2bb955c 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -65,11 +65,11 @@ interchange file is a record of blocks and attestations signed by a set of valid basically a portable slashing protection database! To import a slashing protection database to Lighthouse, you first need to export your existing client's database. Instructions to export the slashing protection database for other clients are listed below: -- [Lodestar](https://chainsafe.github.io/lodestar/reference/cli/#validator-slashing-protection-export) -- [Nimbus](https://nimbus.guide/migration.html#2-export-slashing-protection-history) -- [Prysm](https://docs.prylabs.network/docs/wallet/slashing-protection#exporting-your-validators-slashing-protection-history) -- [Teku](https://docs.teku.consensys.net/HowTo/Prevent-Slashing#export-a-slashing-protection-file) +* [Lodestar](https://chainsafe.github.io/lodestar/reference/cli/#validator-slashing-protection-export) +* [Nimbus](https://nimbus.guide/migration.html#2-export-slashing-protection-history) +* [Prysm](https://docs.prylabs.network/docs/wallet/slashing-protection#exporting-your-validators-slashing-protection-history) +* [Teku](https://docs.teku.consensys.net/HowTo/Prevent-Slashing#export-a-slashing-protection-file) Once you have the slashing protection database from your existing client, you can now import the database to Lighthouse. With your validator client stopped, you can import a `.json` interchange file from another client using this command: diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index 44accbd143..4a9be7b963 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -9,14 +9,14 @@ During post-merge block production, the Beacon Node (BN) will provide a `suggest the execution node. This is a 20-byte Ethereum address which the execution node might choose to set as the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, -it may use any address it chooses. It is assumed that an honest execution node *will* use the -`suggested_fee_recipient`, but users should note this trust assumption. +it may use any address it chooses. It is assumed that an honest execution node _will_ use the +`suggested_fee_recipient`, but users should note this trust assumption. The `suggested_fee_recipient` can be provided to the VC, which will transmit it to the BN. The BN also has a choice regarding the fee recipient it passes to the execution node, creating another noteworthy trust assumption. -To be sure *you* control your fee recipient value, run your own BN and execution node (don't use +To be sure _you_ control your fee recipient value, run your own BN and execution node (don't use third-party services). ## How to configure a suggested fee recipient @@ -68,7 +68,6 @@ Provide a 0x-prefixed address, e.g. lighthouse vc --suggested-fee-recipient 0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b ... ``` - ### 3. Using the "--suggested-fee-recipient" flag on the beacon node The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the @@ -96,7 +95,8 @@ client. | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 202, 404 | -#### Example Request Body +### Example Request Body + ```json { "ethaddress": "0x1D4E51167DBDC4789a014357f4029ff76381b16c" @@ -120,6 +120,7 @@ curl -X POST \ Note that an authorization header is required to interact with the API. This is specified with the header `-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)"` which read the API token to supply the authentication. Refer to [Authorization Header](./api-vc-auth-header.md) for more information. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. #### Successful Response (202) + ```json null ``` @@ -137,7 +138,7 @@ The same path with a `GET` request can be used to query the fee recipient for a | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200, 404 | -Command: +Command: ```bash DATADIR=$HOME/.lighthouse/mainnet @@ -150,6 +151,7 @@ curl -X GET \ ``` #### Successful Response (200) + ```json { "data": { @@ -171,7 +173,7 @@ This is useful if you want the fee recipient to fall back to the validator clien | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 204, 404 | -Command: +Command: ```bash DATADIR=$HOME/.lighthouse/mainnet @@ -184,6 +186,7 @@ curl -X DELETE \ ``` #### Successful Response (204) + ```json null ``` diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md index 0572824d5c..8d457c8f68 100644 --- a/book/src/ui-authentication.md +++ b/book/src/ui-authentication.md @@ -2,9 +2,9 @@ To enhance the security of your account, we offer the option to set a session password. This allows the user to avoid re-entering the api-token when performing critical mutating operations on the validator. Instead a user can simply enter their session password. In the absence of a session password, Siren will revert to the api-token specified in your configuration settings as the default security measure. -> This does not protect your validators from unauthorized device access. +> This does not protect your validators from unauthorized device access. -![](imgs/ui-session-auth.png) +![authentication](imgs/ui-session-auth.png) Session passwords must contain at least: @@ -14,20 +14,18 @@ Session passwords must contain at least: - 1 number - 1 special character - ## Protected Actions Prior to executing any sensitive validator action, Siren will request authentication of the session password or api-token. -![](imgs/ui-exit.png) - +![exit](imgs/ui-exit.png) In the event of three consecutive failed attempts, Siren will initiate a security measure by locking all actions and prompting for configuration settings to be renewed to regain access to these features. -![](imgs/ui-fail-auth.png) +![fail-authentication](imgs/ui-fail-auth.png) ## Auto Connect In the event that auto-connect is enabled, refreshing the Siren application will result in a prompt to authenticate the session password or api-token. If three consecutive authentication attempts fail, Siren will activate a security measure by locking the session and prompting for configuration settings to be reset to regain access. -![](imgs/ui-autoconnect-auth.png) \ No newline at end of file +![autoconnect](imgs/ui-autoconnect-auth.png) diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index 31951c3c92..f5e4bed34a 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -6,7 +6,6 @@ following configuration screen. ![ui-configuration](./imgs/ui-configuration.png) - ## Connecting to the Clients Both the Beacon node and the Validator client need to have their HTTP APIs enabled. These ports should be accessible from the computer running Siren. This allows you to enter the address and ports of the associated Lighthouse @@ -18,7 +17,7 @@ To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This a If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. -> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. +> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. @@ -27,7 +26,6 @@ If you run Siren in the browser (by entering `localhost` in the browser), you wi A green tick will appear once Siren is able to connect to both clients. You can specify different ports for each client by clicking on the advanced tab. - ## API Token The API Token is a secret key that allows you to connect to the validator diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index 77821788f6..4e4de225af 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -1,16 +1,20 @@ # Frequently Asked Questions ## 1. Are there any requirements to run Siren? + Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. ## 2. Where can I find my API token? + The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). ## 3. How do I fix the Node Network Errors? + If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? -The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. + +The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. Subsequently, by designating the host as `192.168.0.200`, you can seamlessly connect Siren to this specific beacon node and validator client pair from any computer situated within the same network. ## 5. How can I use Siren to monitor my validators remotely when I am not at home? @@ -22,6 +26,7 @@ Most contemporary home routers provide options for VPN access in various ways. A In the absence of a VPN, an alternative approach involves utilizing an SSH tunnel. To achieve this, you need remote SSH access to the computer hosting the Beacon Node and Validator Client pair (which necessitates a port forward in your router). In this context, while it is not obligatory to set a `--http-address` flag on the Beacon Node and Validator Client, you can configure an SSH tunnel to the local ports on the node and establish a connection through the tunnel. For instructions on setting up an SSH tunnel, refer to [`Connecting Siren via SSH tunnel`](./ui-faqs.md#6-how-do-i-connect-siren-to-lighthouse-via-a-ssh-tunnel) for detailed guidance. ## 6. How do I connect Siren to Lighthouse via a ssh tunnel? + If you would like to access Siren beyond the local network (i.e across the internet), we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended)), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling: ```bash @@ -30,13 +35,10 @@ ssh -N -L 80:127.0.0.1:80 -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username ``` - Where `username` is the username of the server and `local_ip` is the local IP address of the server. Note that with the `-N` option in an SSH session, you will not be able to execute commands in the CLI to avoid confusion with ordinary shell sessions. The connection will appear to be "hung" upon a successful connection, but that is normal. Once you have successfully connected to the server via SSH tunneling, you should be able to access Siren by entering `localhost` in a web browser. - You can also access Siren using the app downloaded in the [Siren release page](https://github.com/sigp/siren/releases). To access Siren beyond the local computer, you can use SSH tunneling for ports `5052` and `5062` using the command: - ```bash ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip @@ -44,7 +46,9 @@ ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip ``` ## 7. Does Siren support reverse proxy or DNS named addresses? + Yes, if you need to access your beacon or validator from an address such as `https://merp-server:9909/eth2-vc` you should follow the following steps for configuration: + 1. Toggle `https` as your protocol 2. Add your address as `merp-server/eth2-vc` 3. Add your Beacon and Validator ports as `9909` @@ -53,9 +57,10 @@ If you have configured it correctly you should see a green checkmark indicating If you have separate address setups for your Validator Client and Beacon Node respectively you should access the `Advance Settings` on the configuration and repeat the steps above for each address. - ## 8. How do I change my Beacon or Validator address after logging in? + Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right-hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes. ## 9. Why doesn't my validator balance graph show any data? + If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md index b8ae788c69..4f7df4e8ff 100644 --- a/book/src/ui-installation.md +++ b/book/src/ui-installation.md @@ -3,6 +3,7 @@ Siren runs on Linux, MacOS and Windows. ## Version Requirement + The Siren app requires Lighthouse v3.5.1 or higher to function properly. These versions can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. ## Pre-Built Electron Packages @@ -26,26 +27,26 @@ The electron app can be built from source by first cloning the repository and entering the directory: ``` -$ git clone https://github.com/sigp/siren.git -$ cd siren +git clone https://github.com/sigp/siren.git +cd siren ``` Once cloned, the electron app can be built and ran via the Makefile by: ``` -$ make +make ``` alternatively it can be built via: ``` -$ yarn +yarn ``` Once completed successfully the electron app can be run via: ``` -$ yarn dev +yarn dev ``` ### Running In The Browser @@ -59,19 +60,22 @@ production-grade web-server to host the application. `docker` is required to be installed with the service running. The docker image can be built and run via the Makefile by running: + ``` -$ make docker +make docker ``` Alternatively, to run with Docker, the image needs to be built. From the repository directory run: + ``` -$ docker build -t siren . +docker build -t siren . ``` Then to run the image: + ``` -$ docker run --rm -ti --name siren -p 80:80 siren +docker run --rm -ti --name siren -p 80:80 siren ``` This will open port 80 and allow your browser to connect. You can choose @@ -83,20 +87,24 @@ To view Siren, simply go to `http://localhost` in your web browser. #### Development Server A development server can also be built which will expose a local port 3000 via: + ``` -$ yarn start +yarn start ``` Once executed, you can direct your web browser to the following URL to interact with the app: + ``` http://localhost:3000 ``` A production version of the app can be built via + ``` -$ yarn build +yarn build ``` + and then further hosted via a production web server. ### Known Issues diff --git a/book/src/ui-usage.md b/book/src/ui-usage.md index 867a49a91f..eddee311fd 100644 --- a/book/src/ui-usage.md +++ b/book/src/ui-usage.md @@ -1,10 +1,10 @@ # Usage -# Dashboard +## Dashboard Siren's dashboard view provides a summary of all performance and key validator metrics. Sync statuses, uptimes, accumulated rewards, hardware and network metrics are all consolidated on the dashboard for evaluation. -![](imgs/ui-dashboard.png) +![dashboard](imgs/ui-dashboard.png) ## Account Earnings @@ -12,66 +12,62 @@ The account earnings component accumulates reward data from all registered valid Below in the earning section, you can also view your total earnings or click the adjacent buttons to view your estimated earnings given a specific time frame based on current device and network conditions. -![](imgs/ui-account-earnings.png) +![earning](imgs/ui-account-earnings.png) ## Validator Table The validator table component is a list of all registered validators, which includes data such as name, index, total balance, earned rewards and current status. Each validator row also contains a link to a detailed data modal and additional data provided by [Beaconcha.in](https://beaconcha.in). -![](imgs/ui-validator-table.png) +![validator-table](imgs/ui-validator-table.png) ## Validator Balance Chart The validator balance component is a graphical representation of each validator balance over the latest 10 epochs. Take note that only active validators are rendered in the chart visualization. -![](imgs/ui-validator-balance1.png) +![validator-balance](imgs/ui-validator-balance1.png) By clicking on the chart component you can filter selected validators in the render. This call allow for greater resolution in the rendered visualization. - - - - +balance-modal +validator-balance2 ## Hardware Usage and Device Diagnostics The hardware usage component gathers information about the device the Beacon Node is currently running. It displays the Disk usage, CPU metrics and memory usage of the Beacon Node device. The device diagnostics component provides the sync status of the execution client and beacon node. - - - +hardware +device ## Log Statistics The log statistics present an hourly combined rate of critical, warning, and error logs from the validator client and beacon node. This analysis enables informed decision-making, troubleshooting, and proactive maintenance for optimal system performance. - +log -# Validator Management +## Validator Management Siren's validator management view provides a detailed overview of all validators with options to deposit to and/or add new validators. Each validator table row displays the validator name, index, balance, rewards, status and all available actions per validator. -![](imgs/ui-validator-management.png) +![validator-management](imgs/ui-validator-management.png) ## Validator Modal Clicking the validator icon activates a detailed validator modal component. This component also allows users to trigger validator actions and as well to view and update validator graffiti. Each modal contains the validator total income with hourly, daily and weekly earnings estimates. - +ui-validator-modal -# Settings +## Settings Siren's settings view provides access to the application theme, version, name, device name and important external links. From the settings page users can also access the configuration screen to adjust any beacon or validator node parameters. -![](imgs/ui-settings.png) +![settings](imgs/ui-settings.png) - -# Validator and Beacon Logs +## Validator and Beacon Logs The logs page provides users with the functionality to access and review recorded logs for both validators and beacons. Users can conveniently observe log severity, messages, timestamps, and any additional data associated with each log entry. The interface allows for seamless switching between validator and beacon log outputs, and incorporates useful features such as built-in text search and the ability to pause log feeds. Additionally, users can obtain log statistics, which are also available on the main dashboard, thereby facilitating a comprehensive overview of the system's log data. Please note that Siren is limited to storing and displaying only the previous 1000 log messages. This also means the text search is limited to the logs that are currently stored within Siren's limit. -![](imgs/ui-logs.png) \ No newline at end of file +![logs](imgs/ui-logs.png) diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index b62086d4bf..a3d60d31b3 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -16,7 +16,7 @@ achieves this by staying silent for 2-3 epochs after a validator is started so i other instances of that validator before starting to sign potentially slashable messages. > Note: Doppelganger Protection is not yet interoperable, so if it is configured on a Lighthouse -> validator client, the client must be connected to a Lighthouse beacon node. +> validator client, the client must be connected to a Lighthouse beacon node. ## Initial Considerations diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index f31d729449..092c813a1e 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -12,10 +12,10 @@ In order to apply these APIs, you need to have historical states information in ## Endpoints -HTTP Path | Description | +| HTTP Path | Description | | --- | -- | -[`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch. -[`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch. +| [`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch. | +| [`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch. | ## Global @@ -53,16 +53,17 @@ vote (that is why it is _effective_ `Gwei`). The following fields are returned: - `current_epoch_active_gwei`: the total staked gwei that was active (i.e., - able to vote) during the current epoch. + able to vote) during the current epoch. - `current_epoch_target_attesting_gwei`: the total staked gwei that attested to - the majority-elected Casper FFG target epoch during the current epoch. + the majority-elected Casper FFG target epoch during the current epoch. +- `previous_epoch_active_gwei`: as per `current_epoch_active_gwei`, but during the previous epoch. - `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a - head beacon block that is in the canonical chain. + head beacon block that is in the canonical chain. From this data you can calculate: -#### Justification/Finalization Rate +### Justification/Finalization Rate `previous_epoch_target_attesting_gwei / current_epoch_active_gwei` @@ -95,7 +96,6 @@ The [Global Votes](#global) endpoint is the summation of all of these individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". - ### HTTP Example ```bash diff --git a/book/src/validator-management.md b/book/src/validator-management.md index bc6aba3c4f..b9610b6967 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -41,6 +41,7 @@ Here's an example file with two validators: voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json voting_keystore_password: myStrongpa55word123&$ ``` + In this example we can see two validators: - A validator identified by the `0x87a5...` public key which is enabled. @@ -51,7 +52,7 @@ In this example we can see two validators: Each permitted field of the file is listed below for reference: - `enabled`: A `true`/`false` indicating if the validator client should consider this - validator "enabled". + validator "enabled". - `voting_public_key`: A validator public key. - `type`: How the validator signs messages (this can be `local_keystore` or `web3signer` (see [Web3Signer](./validator-web3signer.md))). - `voting_keystore_path`: The path to a EIP-2335 keystore. @@ -59,9 +60,9 @@ Each permitted field of the file is listed below for reference: - `voting_keystore_password`: The password to the EIP-2335 keystore. > **Note**: Either `voting_keystore_password_path` or `voting_keystore_password` *must* be -> supplied. If both are supplied, `voting_keystore_password_path` is ignored. +> supplied. If both are supplied, `voting_keystore_password_path` is ignored. ->If you do not wish to have `voting_keystore_password` being stored in the `validator_definitions.yml` file, you can add the field `voting_keystore_password_path` and point it to a file containing the password. The file can be, e.g., on a mounted portable drive that contains the password so that no password is stored on the validating node. +>If you do not wish to have `voting_keystore_password` being stored in the `validator_definitions.yml` file, you can add the field `voting_keystore_password_path` and point it to a file containing the password. The file can be, e.g., on a mounted portable drive that contains the password so that no password is stored on the validating node. ## Populating the `validator_definitions.yml` file @@ -77,7 +78,6 @@ recap: ### Automatic validator discovery - When the `--disable-auto-discover` flag is **not** provided, the validator client will search the `validator-dir` for validators and add any *new* validators to the `validator_definitions.yml` with `enabled: true`. @@ -148,7 +148,6 @@ ensure their `secrets-dir` is organised as below: └── 0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 ``` - ### Manual configuration The automatic validator discovery process works out-of-the-box with validators @@ -181,7 +180,7 @@ the active validator, the validator client will: password. 1. Use the keystore password to decrypt the keystore and obtain a BLS keypair. 1. Verify that the decrypted BLS keypair matches the `voting_public_key`. -1. Create a `voting-keystore.json.lock` file adjacent to the +1. Create a `voting-keystore.json.lock` file adjacent to the `voting_keystore_path`, indicating that the voting keystore is in-use and should not be opened by another process. 1. Proceed to act for that validator, creating blocks and attestations if/when required. diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md index 98202d3b52..d97f953fc1 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator-manager-create.md @@ -48,6 +48,7 @@ lighthouse \ --suggested-fee-recipient
\ --output-path ./ ``` + > If the flag `--first-index` is not provided, it will default to using index 0. > The `--suggested-fee-recipient` flag may be omitted to use whatever default > value the VC uses. It does not necessarily need to be identical to @@ -63,6 +64,7 @@ lighthouse \ --validators-file validators.json \ --vc-token ``` + > This is assuming that `validators.json` is in the present working directory. If it is not, insert the directory of the file. > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. @@ -141,7 +143,6 @@ must be known. The location of the file varies, but it is located in the `~/.lighthouse/mainnet/validators/api-token.txt`. We will use `` to substitute this value. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. - Once the VC is running, use the `import` command to import the validators to the VC: ```bash @@ -166,16 +167,18 @@ The user should now *securely* delete the `validators.json` file (e.g., `shred - The `validators.json` contains the unencrypted validator keys and must not be shared with anyone. At the same time, `lighthouse vc` will log: + ```bash INFO Importing keystores via standard HTTP API, count: 1 WARN No slashing protection data provided with keystores INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore INFO Modified key_cache saved successfully ``` -The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk. + +The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk. The validators will now go through 2-3 epochs of [doppelganger protection](./validator-doppelganger.md) and will automatically start performing -their duties when they are deposited and activated. +their duties when they are deposited and activated. If the host VC contains the same public key as the `validators.json` file, an error will be shown and the `import` process will stop: @@ -194,6 +197,7 @@ lighthouse \ --vc-token \ --ignore-duplicates ``` + and the output will be as follows: ```bash diff --git a/book/src/validator-manager-move.md b/book/src/validator-manager-move.md index 5009e6407e..10de1fe87c 100644 --- a/book/src/validator-manager-move.md +++ b/book/src/validator-manager-move.md @@ -100,7 +100,7 @@ lighthouse \ > it is recommended for an additional layer of safety. It will result in 2-3 > epochs of downtime for the validator after it is moved, which is generally an > inconsequential cost in lost rewards or penalties. -> +> > Optionally, users can add the `--http-store-passwords-in-secrets-dir` flag if they'd like to have > the import validator keystore passwords stored in separate files rather than in the > `validator-definitions.yml` file. If you don't know what this means, you can safely omit the flag. @@ -158,7 +158,9 @@ Moved keystore 1 of 2 Moved keystore 2 of 2 Done. ``` + At the same time, `lighthouse vc` will log: + ```bash INFO Importing keystores via standard HTTP API, count: 1 INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore @@ -183,12 +185,13 @@ lighthouse \ ``` > Note: If you have the `validator-monitor-auto` turned on, the source beacon node may still be reporting the attestation status of the validators that have been moved: + ``` INFO Previous epoch attestation(s) success validators: ["validator_index"], epoch: 100000, service: val_mon, service: beacon ``` -> This is fine as the validator monitor does not know that the validators have been moved (it *does not* mean that the validators have attested twice for the same slot). A restart of the beacon node will resolve this. +> This is fine as the validator monitor does not know that the validators have been moved (it *does not* mean that the validators have attested twice for the same slot). A restart of the beacon node will resolve this. Any errors encountered during the operation should include information on how to proceed. Assistance is also available on our -[Discord](https://discord.gg/cyAszAh). \ No newline at end of file +[Discord](https://discord.gg/cyAszAh). diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md index e3cb74bd66..a71fab1e3a 100644 --- a/book/src/validator-manager.md +++ b/book/src/validator-manager.md @@ -1,7 +1,6 @@ # Validator Manager [Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/ -[Import Validators]: #import-validators ## Introduction @@ -32,4 +31,4 @@ The `validator-manager` boasts the following features: ## Guides - [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) -- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) \ No newline at end of file +- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 532bd50065..6439ea83a3 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -20,7 +20,6 @@ Lighthouse performs validator monitoring in the Beacon Node (BN) instead of the - Users can use a local BN to observe some validators running in a remote location. - Users can monitor validators that are not their own. - ## How to Enable Monitoring The validator monitor is always enabled in Lighthouse, but it might not have any enrolled @@ -57,7 +56,8 @@ Monitor the mainnet validators at indices `0` and `1`: ``` lighthouse bn --validator-monitor-pubkeys 0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95,0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c ``` -> Note: The validator monitoring will stop collecting per-validator Prometheus metrics and issuing per-validator logs when the number of validators reaches 64. To continue collecting metrics and logging, use the flag `--validator-monitor-individual-tracking-threshold N` where `N` is a number greater than the number of validators to monitor. + +> Note: The validator monitoring will stop collecting per-validator Prometheus metrics and issuing per-validator logs when the number of validators reaches 64. To continue collecting metrics and logging, use the flag `--validator-monitor-individual-tracking-threshold N` where `N` is a number greater than the number of validators to monitor. ## Observing Monitoring @@ -102,7 +102,7 @@ dashboard contains most of the metrics exposed via the validator monitor. Lighthouse v4.6.0 introduces a new feature to track the performance of a beacon node. This feature internally simulates an attestation for each slot, and outputs a hit or miss for the head, target and source votes. The attestation simulator is turned on automatically (even when there are no validators) and prints logs in the debug level. -> Note: The simulated attestations are never published to the network, so the simulator does not reflect the attestation performance of a validator. +> Note: The simulated attestations are never published to the network, so the simulator does not reflect the attestation performance of a validator. The attestation simulation prints the following logs when simulating an attestation: @@ -118,11 +118,11 @@ DEBG Simulated attestation evaluated, head_hit: true, target_hit: true, source_h ``` An example of a log when the head is missed: + ``` DEBG Simulated attestation evaluated, head_hit: false, target_hit: true, source_hit: true, attestation_slot: Slot(1132623), attestation_head: 0x1c0e53c6ace8d0ff57f4a963e4460fe1c030b37bf1c76f19e40928dc2e214c59, attestation_target: 0xaab25a6d01748cf4528e952666558317b35874074632550c37d935ca2ec63c23, attestation_source: 0x13ccbf8978896c43027013972427ee7ce02b2bb9b898dbb264b870df9288c1e7, service: val_mon, service: beacon, module: beacon_chain::validator_monitor:2051 ``` - With `--metrics` enabled on the beacon node, the following metrics will be recorded: ``` @@ -134,11 +134,12 @@ validator_monitor_attestation_simulator_source_attester_hit_total validator_monitor_attestation_simulator_source_attester_miss_total ``` -A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). +A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). -The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. +The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. + +The attestation simulator _does not_ consider: -The attestation simulator *does not* consider: - the latency between the beacon node and the validator client - the potential delays when publishing the attestation to the network @@ -146,10 +147,6 @@ which are critical factors to consider when evaluating the attestation performan Assuming the above factors are ignored (no delays between beacon node and validator client, and in publishing the attestation to the network): -1. If the attestation simulator says that all votes are hit, it means that if the beacon node were to publish the attestation for this slot, the validator should receive the rewards for the head, target and source votes. +1. If the attestation simulator says that all votes are hit, it means that if the beacon node were to publish the attestation for this slot, the validator should receive the rewards for the head, target and source votes. 1. If the attestation simulator says that the one or more votes are missed, it means that there is a delay in importing the block. The delay could be due to slowness in processing the block (e.g., due to a slow CPU) or that the block is arriving late (e.g., the proposer publishes the block late). If the beacon node were to publish the attestation for this slot, the validator will miss one or more votes (e.g., the head vote). - - - - diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 4ec4837fea..6261f2e267 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -22,20 +22,17 @@ In order to initiate an exit, users can use the `lighthouse account validator ex - The `--password-file` flag is used to specify the path to the file containing the password for the voting keystore. If this flag is not provided, the user will be prompted to enter the password. - After validating the password, the user will be prompted to enter a special exit phrase as a final confirmation after which the voluntary exit will be published to the beacon chain. The exit phrase is the following: > Exit my validator - - Below is an example for initiating a voluntary exit on the Holesky testnet. ``` $ lighthouse --network holesky account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 -Running account manager for Prater network +Running account manager for Holesky network validator-dir path: ~/.lighthouse/holesky/validators Enter the keystore password for validator in 0xabcd @@ -71,56 +68,52 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 There are two types of withdrawal credentials, `0x00` and `0x01`. To check which type your validator has, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: - - `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. -- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). - +- `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. +- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). ### 2. What if my validator is of type `0x00` and I do not update my withdrawal credentials after I initiated a voluntary exit? Your staked fund will continue to be locked on the beacon chain. You can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your staked funds in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the staked funds be withdrawn to the withdrawal address. -### 3. How many times can I update my withdrawal credentials? - +### 3. How many times can I update my withdrawal credentials? + If your withdrawal credentials is of type `0x00`, you can only update it once to type `0x01`. It is therefore very important to ensure that the withdrawal address you set is an address under your control, preferably an address controlled by a hardware wallet. If your withdrawal credentials is of type `0x01`, it means you have set your withdrawal address previously, and you will not be able to change the withdrawal address. ### 3. When will my BTEC request (update withdrawal credentials to type `0x01`) be processed ? - + Your BTEC request will be included very quickly as soon as a new block is proposed. This should be the case most (if not all) of the time, given that the peak BTEC request time has now past (right after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023 and lasted for ~ 2 days) . -### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? - +### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? + There are 3 waiting periods until you get the staked funds in your withdrawal address: - - An exit queue: a varying time that takes at a minimum 5 epochs (32 minutes) if there is no queue; or if there are many validators exiting at the same time, it has to go through the exit queue. The exit queue can be from hours to weeks, depending on the number of validators in the exit queue. During this time your validator has to stay online to perform its duties to avoid penalties. - - - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. +- An exit queue: a varying time that takes at a minimum 5 epochs (32 minutes) if there is no queue; or if there are many validators exiting at the same time, it has to go through the exit queue. The exit queue can be from hours to weeks, depending on the number of validators in the exit queue. During this time your validator has to stay online to perform its duties to avoid penalties. - - A varying time of "validator sweep" that can take up to *n* days with *n* listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. +- A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. + +- A varying time of "validator sweep" that can take up to _n_ days with _n_ listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set.
-| Number of eligible validators | Ideal scenario *n* | Practical scenario *n* | +| Number of eligible validators | Ideal scenario _n_ | Practical scenario _n_ | |:----------------:|:---------------------:|:----:| -| 300000 | 2.60 | 2.63 | -| 400000 | 3.47 | 3.51 | -| 500000 | 4.34 | 4.38 | -| 600000 | 5.21 | 5.26 | -| 700000 | 6.08 | 6.14 | -| 800000 | 6.94 | 7.01 | -| 900000 | 7.81 | 7.89 | -| 1000000 | 8.68 | 8.77 | +| 300000 | 2.60 | 2.63 | +| 400000 | 3.47 | 3.51 | +| 500000 | 4.34 | 4.38 | +| 600000 | 5.21 | 5.26 | +| 700000 | 6.08 | 6.14 | +| 800000 | 6.94 | 7.01 | +| 900000 | 7.81 | 7.89 | +| 1000000 | 8.68 | 8.77 |
> Note: Ideal scenario assumes no block proposals are missed. This means a total of withdrawals of 7200 blocks/day * 16 withdrawals/block = 115200 withdrawals/day. Practical scenario assumes 1% of blocks are missed per day. As an example, if there are 700000 eligible validators, one would expect a waiting time of slightly more than 6 days. - - The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. The voluntary exit and full withdrawal process is summarized in the Figure below. ![full](./imgs/full-withdrawal.png) - diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 6cf62e0430..d5c5fe0d64 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -18,9 +18,6 @@ slog-term = { workspace = true } logging = { workspace = true } slog-async = { workspace = true } slog-scope = "4.3.0" -slog-stdlog = "4.0.0" hex = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } -serde_yaml = { workspace = true } eth2_network_config = { workspace = true } diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index d7ea5ab0b3..440a9d27e2 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -1,18 +1,29 @@ //! Simple logic for spawning a Lighthouse BootNode. -use clap::{App, Arg}; +use clap::{Arg, ArgAction, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; // TODO: Add DOS prevention CLI params -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("boot_node") +pub fn cli_app() -> Command { + Command::new("boot_node") .about("Start a special Lighthouse process that only serves as a discv5 boot-node. This \ process will *not* import blocks or perform most typical beacon node functions. Instead, it \ will simply run the discv5 service and assist nodes on the network to discover each other. \ This is the recommended way to provide a network boot-node since it has a reduced attack \ surface compared to a full beacon node.") - .settings(&[clap::AppSettings::ColoredHelp]) + .styles(get_color_style()) + .display_order(0) .arg( - Arg::with_name("enr-address") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("enr-address") .long("enr-address") .value_name("ADDRESS") .help("The IP address/ DNS address to broadcast to other peers on how to reach \ @@ -21,31 +32,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { discovery. Set this only if you are sure other nodes can connect to your \ local node on this address. This will update the `ip4` or `ip6` ENR fields \ accordingly. To update both, set this flag twice with the different values.") - .multiple(true) - .max_values(2) + .action(ArgAction::Append) + .num_args(1..=2) .required(true) .conflicts_with("network-dir") - .takes_value(true), + .display_order(0) ) .arg( - Arg::with_name("port") + Arg::new("port") .long("port") .value_name("PORT") .help("The UDP port to listen on.") .default_value("9000") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("port6") + Arg::new("port6") .long("port6") .value_name("PORT") .help("The UDP port to listen on over IpV6 when listening over both Ipv4 and \ Ipv6. Defaults to 9090 when required.") .default_value("9090") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address the bootnode will listen for UDP communications. To listen \ @@ -56,53 +69,63 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \ multiple Ipv4, or multiple Ipv6 addresses will not be accepted.") - .multiple(true) - .max_values(2) + .num_args(1..=2) .default_value("0.0.0.0") - .takes_value(true) + .action(ArgAction::Append) + .display_order(0) ) .arg( - Arg::with_name("boot-nodes") + Arg::new("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) .value_name("ENR-LIST/Multiaddr") .help("One or more comma-delimited base64-encoded ENR's or multiaddr strings of peers to initially add to the local routing table") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-udp-port") + Arg::new("enr-udp-port") .long("enr-port") .value_name("PORT") .help("The UDP port of the boot node's ENR. This is the port that external peers will dial to reach this boot node. Set this only if the external port differs from the listening port.") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("network-dir") + .display_order(0) ) .arg( - Arg::with_name("enr-udp6-port") + Arg::new("enr-udp6-port") .long("enr-udp6-port") .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IpV6.") .conflicts_with("network-dir") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-enr-auto-update") - .short("x") + Arg::new("enable-enr-auto-update") + .short('x') + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("enable-enr-auto-update") .help("Discovery can automatically update the node's local ENR with an external IP address and port as seen by other peers on the network. \ This enables this feature.") + .display_order(0) ) .arg( - Arg::with_name("disable-packet-filter") + Arg::new("disable-packet-filter") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("disable-packet-filter") .help("Disables discv5 packet filter. Useful for testing in smaller networks") + .display_order(0) ) .arg( - Arg::with_name("network-dir") + Arg::new("network-dir") .value_name("NETWORK_DIR") .long("network-dir") .help("The directory which contains the enr and it's associated private key") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index a9c8950532..a8b0f7aa56 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -25,11 +25,10 @@ pub struct BootNodeConfig { impl BootNodeConfig { pub async fn new( - matches: &ArgMatches<'_>, + matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, ) -> Result { let data_dir = get_data_dir(matches); - // Try and obtain bootnodes let boot_nodes = { @@ -39,7 +38,7 @@ impl BootNodeConfig { boot_nodes.extend_from_slice(enr); } - if let Some(nodes) = matches.value_of("boot-nodes") { + if let Some(nodes) = matches.get_one::("boot-nodes") { boot_nodes.extend_from_slice( &nodes .split(',') @@ -81,14 +80,14 @@ impl BootNodeConfig { }; // By default this is enabled. If it is not set, revert to false. - if !matches.is_present("enable-enr-auto-update") { + if !matches.get_flag("enable-enr-auto-update") { network_config.discv5_config.enr_update = false; } let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(private_key)?; - let local_enr = if let Some(dir) = matches.value_of("network-dir") { + let local_enr = if let Some(dir) = matches.get_one::("network-dir") { let network_dir: PathBuf = dir.into(); load_enr_from_disk(&network_dir)? } else { diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index e707dc14f7..669b126bd3 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -14,8 +14,8 @@ const LOG_CHANNEL_SIZE: usize = 2048; /// Run the bootnode given the CLI configuration. pub fn run( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth_spec_id: EthSpecId, eth2_network_config: &Eth2NetworkConfig, debug_level: String, @@ -67,8 +67,8 @@ pub fn run( } fn main( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, log: slog::Logger, ) -> Result<(), String> { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index b6bdd148f4..286fa9e0f0 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -12,8 +12,8 @@ use slog::info; use types::EthSpec; pub async fn run( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, log: slog::Logger, ) -> Result<(), String> { @@ -28,7 +28,7 @@ pub async fn run( ð2_network_config.chain_spec::()?, )?; - if lh_matches.is_present("immediate-shutdown") { + if lh_matches.get_flag("immediate-shutdown") { return Ok(()); } diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index 1ebd2b1740..ea56e7e672 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,5 +1,6 @@ //! A helper library for parsing values from `clap::ArgMatches`. +use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; use ethereum_types::U256 as Uint256; @@ -15,12 +16,14 @@ pub const BAD_TESTNET_DIR_MESSAGE: &str = "The hard-coded testnet directory was or when there is no default public network to connect to. \ During these times you must specify a --testnet-dir."; +pub const FLAG_HEADER: &str = "Flags"; + /// Try to parse the eth2 network config from the `network`, `testnet-dir` flags in that order. /// Returns the default hardcoded testnet if neither flags are set. pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result { - let optional_network_config = if cli_args.is_present("network") { + let optional_network_config = if cli_args.contains_id("network") { parse_hardcoded_network(cli_args, "network")? - } else if cli_args.is_present("testnet-dir") { + } else if cli_args.contains_id("testnet-dir") { parse_testnet_dir(cli_args, "testnet-dir")? } else { // if neither is present, assume the default network @@ -92,7 +95,7 @@ pub fn parse_path_with_default_in_home_dir( default: PathBuf, ) -> Result { matches - .value_of(name) + .get_one::(name) .map(|dir| { dir.parse::() .map_err(|e| format!("Unable to parse {}: {}", name, e)) @@ -122,7 +125,8 @@ where ::Err: std::fmt::Display, { matches - .value_of(name) + .try_get_one::(name) + .map_err(|e| format!("Unable to parse {}: {}", name, e))? .map(|val| { val.parse() .map_err(|e| format!("Unable to parse {}: {}", name, e)) @@ -150,7 +154,7 @@ pub fn parse_ssz_optional( name: &'static str, ) -> Result, String> { matches - .value_of(name) + .get_one::(name) .map(|val| { if let Some(stripped) = val.strip_prefix("0x") { let vec = hex::decode(stripped) @@ -190,3 +194,15 @@ where } Ok(()) } + +pub fn get_color_style() -> Styles { + Styles::styled() + .header(AnsiColor::Yellow.on_default()) + .usage(AnsiColor::Green.on_default()) + .literal(AnsiColor::Green.on_default()) + .placeholder(AnsiColor::Green.on_default()) +} + +pub fn parse_flag(matches: &ArgMatches, name: &str) -> bool { + *matches.get_one::(name).unwrap_or(&false) +} diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index e8585c504a..df03b4f9a4 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -21,9 +21,9 @@ pub const CUSTOM_TESTNET_DIR: &str = "custom"; /// if not present, then checks the "testnet-dir" flag and returns a custom name /// If neither flags are present, returns the default hardcoded network name. pub fn get_network_dir(matches: &ArgMatches) -> String { - if let Some(network_name) = matches.value_of("network") { + if let Some(network_name) = matches.get_one::("network") { network_name.to_string() - } else if matches.value_of("testnet-dir").is_some() { + } else if matches.get_one::("testnet-dir").is_some() { CUSTOM_TESTNET_DIR.to_string() } else { eth2_network_config::DEFAULT_HARDCODED_NETWORK.to_string() diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index dc6860d90f..d23a4068f1 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -10,7 +10,6 @@ edition = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } ssz_types = { workspace = true } -tree_hash = { workspace = true } types = { workspace = true } reqwest = { workspace = true } lighthouse_network = { workspace = true } @@ -29,7 +28,6 @@ futures = { workspace = true } store = { workspace = true } slashing_protection = { workspace = true } mediatype = "0.19.13" -mime = "0.3.16" pretty_reqwest_error = { workspace = true } derivative = { workspace = true } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 838be4beff..2bb749af9f 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1080,6 +1080,9 @@ pub enum EventKind { #[cfg(feature = "lighthouse")] BlockReward(BlockReward), PayloadAttributes(VersionedSsePayloadAttributes), + ProposerSlashing(Box), + AttesterSlashing(Box>), + BlsToExecutionChange(Box), } impl EventKind { @@ -1099,6 +1102,9 @@ impl EventKind { EventKind::LightClientOptimisticUpdate(_) => "light_client_optimistic_update", #[cfg(feature = "lighthouse")] EventKind::BlockReward(_) => "block_reward", + EventKind::ProposerSlashing(_) => "proposer_slashing", + EventKind::AttesterSlashing(_) => "attester_slashing", + EventKind::BlsToExecutionChange(_) => "bls_to_execution_change", } } @@ -1179,6 +1185,21 @@ impl EventKind { "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), )?)), + "attester_slashing" => Ok(EventKind::AttesterSlashing( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Attester Slashing: {:?}", e)) + })?, + )), + "proposer_slashing" => Ok(EventKind::ProposerSlashing( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Proposer Slashing: {:?}", e)) + })?, + )), + "bls_to_execution_change" => Ok(EventKind::BlsToExecutionChange( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Bls To Execution Change: {:?}", e)) + })?, + )), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -1210,6 +1231,9 @@ pub enum EventTopic { LightClientOptimisticUpdate, #[cfg(feature = "lighthouse")] BlockReward, + AttesterSlashing, + ProposerSlashing, + BlsToExecutionChange, } impl FromStr for EventTopic { @@ -1231,6 +1255,9 @@ impl FromStr for EventTopic { "light_client_optimistic_update" => Ok(EventTopic::LightClientOptimisticUpdate), #[cfg(feature = "lighthouse")] "block_reward" => Ok(EventTopic::BlockReward), + "attester_slashing" => Ok(EventTopic::AttesterSlashing), + "proposer_slashing" => Ok(EventTopic::ProposerSlashing), + "bls_to_execution_change" => Ok(EventTopic::BlsToExecutionChange), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -1253,6 +1280,9 @@ impl fmt::Display for EventTopic { EventTopic::LightClientOptimisticUpdate => write!(f, "light_client_optimistic_update"), #[cfg(feature = "lighthouse")] EventTopic::BlockReward => write!(f, "block_reward"), + EventTopic::AttesterSlashing => write!(f, "attester_slashing"), + EventTopic::ProposerSlashing => write!(f, "proposer_slashing"), + EventTopic::BlsToExecutionChange => write!(f, "bls_to_execution_change"), } } } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index bf707c4d17..9104db8f67 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -192,7 +192,11 @@ macro_rules! define_net { config_dir: ETH2_NET_DIR.config_dir, genesis_state_source: ETH2_NET_DIR.genesis_state_source, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), - deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), + deploy_block: $this_crate::$include_file!( + $this_crate, + "../", + "deposit_contract_block.txt" + ), boot_enr: $this_crate::$include_file!($this_crate, "../", "boot_enr.yaml"), genesis_state_bytes: $this_crate::$include_file!($this_crate, "../", "genesis.ssz"), } @@ -284,26 +288,6 @@ define_hardcoded_nets!( // Describes how the genesis state can be obtained. GenesisStateSource::IncludedBytes ), - ( - // Network name (must be unique among all networks). - prater, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - "prater", - // Describes how the genesis state can be obtained. - GenesisStateSource::IncludedBytes - ), - ( - // Network name (must be unique among all networks). - goerli, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - // - // The Goerli network is effectively an alias to Prater. - "prater", - // Describes how the genesis state can be obtained. - GenesisStateSource::IncludedBytes - ), ( // Network name (must be unique among all networks). gnosis, diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 3807c2e993..4b34405e5b 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -13,12 +13,11 @@ eth2_config = { workspace = true } [dev-dependencies] tempfile = { workspace = true } tokio = { workspace = true } +ethereum_ssz = { workspace = true } [dependencies] serde_yaml = { workspace = true } -serde_json = { workspace = true } types = { workspace = true } -ethereum_ssz = { workspace = true } eth2_config = { workspace = true } discv5 = { workspace = true } reqwest = { workspace = true } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index c869d9cfc8..07d100b011 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -3,7 +3,7 @@ PRESET_BASE: 'gnosis' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one -# * 'prater' - testnet +# * 'holesky' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'chiado' diff --git a/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/chiado/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/chiado/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/gnosis/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/gnosis/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/holesky/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/holesky/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index c8695123ab..fc9c002dab 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,7 +6,7 @@ PRESET_BASE: 'mainnet' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one -# * 'prater' - testnet +# * 'holesky' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'mainnet' diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/mainnet/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/mainnet/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/mainnet/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml deleted file mode 100644 index 7000ff0bbc..0000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# q9f bootnode errai (lighthouse) -# /ip4/135.181.181.239/tcp/9000/p2p/16Uiu2HAmPitcpwsGZf1vGiu6hdwZHsVLyFzVZeNqaSmUaSyM7Xvj -- enr:-LK4QH1xnjotgXwg25IDPjrqRGFnH1ScgNHA3dv1Z8xHCp4uP3N3Jjl_aYv_WIxQRdwZvSukzbwspXZ7JjpldyeVDzMCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhIe1te-Jc2VjcDI1NmsxoQOkcGXqbCJYbcClZ3z5f6NWhX_1YPFRYRRWQpJjwSHpVIN0Y3CCIyiDdWRwgiMo -# q9f bootnode gudja (teku) -# /ip4/135.181.182.51/tcp/9000/p2p/16Uiu2HAmTttt9ZTmCmwmKiV3QR7iTAfnAckwzhswrNmWkthi6meB -- enr:-KG4QCIzJZTY_fs_2vqWEatJL9RrtnPwDCv-jRBuO5FQ2qBrfJubWOWazri6s9HsyZdu-fRUfEzkebhf1nvO42_FVzwDhGV0aDKQed8EKAAAECD__________4JpZIJ2NIJpcISHtbYziXNlY3AyNTZrMaED4m9AqVs6F32rSCGsjtYcsyfQE2K8nDiGmocUY_iq-TSDdGNwgiMog3VkcIIjKA -# Prysm bootnode #1 -- enr:-Ku4QFmUkNp0g9bsLX2PfVeIyT-9WO-PZlrqZBNtEyofOOfLMScDjaTzGxIb1Ns9Wo5Pm_8nlq-SZwcQfTH2cgO-s88Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQLV_jMOIxKbjHFKgrkFvwDvpexo6Nd58TK5k7ss4Vt0IoN1ZHCCG1g -# Lighthouse bootnode #1 -- enr:-Ly4QFPk-cTMxZ3jWTafiNblEZkQIXGF2aVzCIGW0uHp6KaEAvBMoctE8S7YU0qZtuS7By0AA4YMfKoN9ls_GJRccVpFh2F0dG5ldHOI__________-EZXRoMpCC9KcrAgAQIIS2AQAAAAAAgmlkgnY0gmlwhKh3joWJc2VjcDI1NmsxoQKrxz8M1IHwJqRIpDqdVW_U1PeixMW5SfnBD-8idYIQrIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA -# Lighthouse bootnode #2 -- enr:-L64QJmwSDtaHVgGiqIxJWUtxWg6uLCipsms6j-8BdsOJfTWAs7CLF9HJnVqFE728O-JYUDCxzKvRdeMqBSauHVCMdaCAVWHYXR0bmV0c4j__________4RldGgykIL0pysCABAghLYBAAAAAACCaWSCdjSCaXCEQWxOdolzZWNwMjU2azGhA7Qmod9fK86WidPOzLsn5_8QyzL7ZcJ1Reca7RnD54vuiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo -# Nimbus bootstrap nodes -- enr:-LK4QMzPq4Q7w5R-rnGQDcI8BYky6oPVBGQTbS1JJLVtNi_8PzBLV7Bdzsoame9nJK5bcJYpGHn4SkaDN2CM6tR5G_4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhAN4yvyJc2VjcDI1NmsxoQKa8Qnp_P2clLIP6VqLKOp_INvEjLszalEnW0LoBZo4YYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QLM_pPHa78R8xlcU_s40Y3XhFjlb3kPddW9lRlY67N5qeFE2Wo7RgzDgRs2KLCXODnacVHMFw1SfpsW3R474RZEBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhANBY-yJc2VjcDI1NmsxoQNsZkFXgKbTzuxF7uwxlGauTGJelE6HD269CcFlZ_R7A4N0Y3CCI4yDdWRwgiOM -# Teku bootnode -- enr:-KK4QH0RsNJmIG0EX9LSnVxMvg-CAOr3ZFF92hunU63uE7wcYBjG1cFbUTvEa5G_4nDJkRhUq9q2ck9xY-VX1RtBsruBtIRldGgykIL0pysBABAg__________-CaWSCdjSCaXCEEnXQ0YlzZWNwMjU2azGhA1grTzOdMgBvjNrk-vqWtTZsYQIi0QawrhoZrsn5Hd56g3RjcIIjKIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml deleted file mode 100644 index f474b172c5..0000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ /dev/null @@ -1,134 +0,0 @@ -# Prater config - -# Extends the mainnet preset -PRESET_BASE: 'mainnet' - -CONFIG_NAME: 'prater' - -# Transition -# --------------------------------------------------------------- -# Expected August 10, 2022 -TERMINAL_TOTAL_DIFFICULTY: 10790000 -# By default, don't use these params -TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 -TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - - -# Genesis -# --------------------------------------------------------------- -# `2**14` (= 16,384) -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 -# Mar-01-2021 08:53:32 AM +UTC -MIN_GENESIS_TIME: 1614588812 -# Prater area code (Vienna) -GENESIS_FORK_VERSION: 0x00001020 -# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) -GENESIS_DELAY: 1919188 - - -# Forking -# --------------------------------------------------------------- -# Some forks are disabled for now: -# - These may be re-assigned to another fork-version later -# - Temporarily set to max uint64 value: 2**64 - 1 - -# Altair -ALTAIR_FORK_VERSION: 0x01001020 -ALTAIR_FORK_EPOCH: 36660 -# Bellatrix -BELLATRIX_FORK_VERSION: 0x02001020 -BELLATRIX_FORK_EPOCH: 112260 -# Capella -CAPELLA_FORK_VERSION: 0x03001020 -CAPELLA_FORK_EPOCH: 162304 -# DENEB -DENEB_FORK_VERSION: 0x04001020 -DENEB_FORK_EPOCH: 231680 - -# Time parameters -# --------------------------------------------------------------- -# 12 seconds -SECONDS_PER_SLOT: 12 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours -SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours -ETH1_FOLLOW_DISTANCE: 2048 - - -# Validator cycle -# --------------------------------------------------------------- -# 2**2 (= 4) -INACTIVITY_SCORE_BIAS: 4 -# 2**4 (= 16) -INACTIVITY_SCORE_RECOVERY_RATE: 16 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 -# [New in Deneb:EIP7514] 2**3 (= 8) -MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 - -# Fork choice -# --------------------------------------------------------------- -# 40% -PROPOSER_SCORE_BOOST: 40 -# 20% -REORG_HEAD_WEIGHT_THRESHOLD: 20 -# 160% -REORG_PARENT_WEIGHT_THRESHOLD: 160 -# `2` epochs -REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 - -# Deposit contract -# --------------------------------------------------------------- -# Ethereum Goerli testnet -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -# Prater test deposit contract on Goerli Testnet -DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b - -# Networking -# --------------------------------------------------------------- -# `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 -# `2**10` (= 1024) -MAX_REQUEST_BLOCKS: 1024 -# `2**8` (= 256) -EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) -MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 -# 5s -TTFB_TIMEOUT: 5 -# 10s -RESP_TIMEOUT: 10 -ATTESTATION_PROPAGATION_SLOT_RANGE: 32 -# 500ms -MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 -MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 -MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 -# 2 subnets per node -SUBNETS_PER_NODE: 2 -# 2**8 (= 64) -ATTESTATION_SUBNET_COUNT: 64 -ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS -ATTESTATION_SUBNET_PREFIX_BITS: 6 -ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 - -# Deneb -# `2**7` (=128) -MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 -# `2**12` (= 4096 epochs, ~18 days) -MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 -# `6` -BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt deleted file mode 100644 index e8c50058b6..0000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -4367322 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip deleted file mode 100644 index 36bad7fae6..0000000000 Binary files a/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/sepolia/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/sepolia/deposit_contract_block.txt diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 1ead9a6bde..fb8c6938cd 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -29,14 +29,14 @@ use url::Url; pub use eth2_config::GenesisStateSource; -pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; +pub const DEPLOY_BLOCK_FILE: &str = "deposit_contract_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; pub const GENESIS_STATE_FILE: &str = "genesis.ssz"; pub const BASE_CONFIG_FILE: &str = "config.yaml"; // Creates definitions for: // -// - Each of the `HardcodedNet` values (e.g., `MAINNET`, `PRATER`, etc). +// - Each of the `HardcodedNet` values (e.g., `MAINNET`, `HOLESKY`, etc). // - `HARDCODED_NETS: &[HardcodedNet]` // - `HARDCODED_NET_NAMES: &[&'static str]` instantiate_hardcoded_nets!(eth2_config); @@ -502,13 +502,6 @@ mod tests { .expect("beacon state can decode"); } - #[test] - fn prater_and_goerli_are_equal() { - let goerli = Eth2NetworkConfig::from_hardcoded_net(&GOERLI).unwrap(); - let prater = Eth2NetworkConfig::from_hardcoded_net(&PRATER).unwrap(); - assert_eq!(goerli, prater); - } - #[test] fn hard_coded_nets_work() { for net in HARDCODED_NETS { diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml index 6d90534401..fe966f4a9c 100644 --- a/common/lighthouse_metrics/Cargo.toml +++ b/common/lighthouse_metrics/Cargo.toml @@ -7,5 +7,4 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = { workspace = true } prometheus = "0.13.0" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 1fad56d475..3a03d22f3c 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -15,7 +15,6 @@ parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } slog = { workspace = true } -slog-async = { workspace = true } slog-term = { workspace = true } sloggers = { workspace = true } take_mut = "0.2.2" diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 3a5a5209b0..b0e1da00e9 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -256,14 +256,14 @@ pub fn create_tracing_layer(base_tracing_log_path: PathBuf) { return; }; - let (libp2p_non_blocking_writer, libp2p_guard) = NonBlocking::new(libp2p_writer); - let (discv5_non_blocking_writer, discv5_guard) = NonBlocking::new(discv5_writer); + let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); + let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer); let custom_layer = LoggingLayer { libp2p_non_blocking_writer, - libp2p_guard, + _libp2p_guard, discv5_non_blocking_writer, - discv5_guard, + _discv5_guard, }; if let Err(e) = tracing_subscriber::fmt() diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index aabb6ddd0c..a9ddae828a 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -7,9 +7,9 @@ use tracing_subscriber::Layer; pub struct LoggingLayer { pub libp2p_non_blocking_writer: NonBlocking, - pub libp2p_guard: WorkerGuard, + pub _libp2p_guard: WorkerGuard, pub discv5_non_blocking_writer: NonBlocking, - pub discv5_guard: WorkerGuard, + pub _discv5_guard: WorkerGuard, } impl Layer for LoggingLayer diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 0b2fd83568..890bf47eb4 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -166,6 +166,12 @@ where self.map.contains(key) } + /// List known keys + pub fn keys(&mut self) -> impl Iterator { + self.update(); + self.map.iter() + } + /// Shrink the mappings to fit the current size. pub fn shrink_to_fit(&mut self) { self.map.shrink_to_fit(); diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index 5f0de80d90..be339f2779 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -8,5 +8,4 @@ lighthouse_network = { workspace = true } types = { workspace = true } sysinfo = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } parking_lot = { workspace = true } diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index e163f3b76b..ac5c0f659c 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -17,6 +17,10 @@ lazy_static! { "beacon_participation_prev_epoch_source_attesting_gwei_total", "Total effective balance (gwei) of validators who attested to the source in the previous epoch" ); + pub static ref PARTICIPATION_CURRENT_EPOCH_TOTAL_ACTIVE_GWEI_TOTAL: Result = try_create_int_gauge( + "beacon_participation_current_epoch_active_gwei_total", + "Total effective balance (gwei) of validators who are active in the current epoch" + ); /* * Processing metrics */ diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 2efa121882..98671f82b9 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -508,6 +508,7 @@ pub fn get_expected_withdrawals( let mut withdrawal_index = state.next_withdrawal_index()?; let mut validator_index = state.next_withdrawal_validator_index()?; let mut withdrawals = vec![]; + let fork_name = state.fork_name_unchecked(); let bound = std::cmp::min( state.validators().len() as u64, @@ -518,7 +519,7 @@ pub fn get_expected_withdrawals( let balance = *state.balances().get(validator_index as usize).ok_or( BeaconStateError::BalancesOutOfBounds(validator_index as usize), )?; - if validator.is_fully_withdrawable_at(balance, epoch, spec) { + if validator.is_fully_withdrawable_at(balance, epoch, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, @@ -528,7 +529,7 @@ pub fn get_expected_withdrawals( amount: balance, }); withdrawal_index.safe_add_assign(1)?; - } else if validator.is_partially_withdrawable_validator(balance, spec) { + } else if validator.is_partially_withdrawable_validator(balance, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 6f48050e16..952ab3f649 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -100,6 +100,10 @@ impl EpochProcessingSummary { &metrics::PARTICIPATION_PREV_EPOCH_SOURCE_ATTESTING_GWEI_TOTAL, self.previous_epoch_source_attesting_balance()? as i64, ); + metrics::set_gauge( + &metrics::PARTICIPATION_CURRENT_EPOCH_TOTAL_ACTIVE_GWEI_TOTAL, + self.current_epoch_total_active_balance() as i64, + ); Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 4b2f940e5f..3d02d79736 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -19,19 +19,20 @@ pub fn process_registry_updates( validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance }; + let fork_name = state.fork_name_unchecked(); let indices_to_update: Vec<_> = state .validators() .iter() .enumerate() .filter(|(_, validator)| { - validator.is_eligible_for_activation_queue(spec) || is_ejectable(validator) + validator.is_eligible_for_activation_queue(spec, fork_name) || is_ejectable(validator) }) .map(|(idx, _)| idx) .collect(); for index in indices_to_update { let validator = state.get_validator_mut(index)?; - if validator.is_eligible_for_activation_queue(spec) { + if validator.is_eligible_for_activation_queue(spec, fork_name) { validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; } if is_ejectable(validator) { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 7a95de3317..a9629e73e4 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -466,7 +466,7 @@ fn process_single_registry_update( ) -> Result<(), Error> { let current_epoch = state_ctxt.current_epoch; - if validator.is_eligible_for_activation_queue(spec) { + if validator.is_eligible_for_activation_queue(spec, state_ctxt.fork_name) { validator.make_mut()?.activation_eligibility_epoch = current_epoch.safe_add(1)?; } diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index f64228f050..1e60bf488d 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -1,3 +1,4 @@ +use safe_arith::SafeArith; use std::mem; use types::{ BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, @@ -10,14 +11,28 @@ pub fn upgrade_to_electra( spec: &ChainSpec, ) -> Result<(), Error> { let epoch = pre_state.current_epoch(); - let pre = pre_state.as_deneb_mut()?; + let earliest_exit_epoch = pre_state + .validators() + .iter() + .filter(|v| v.exit_epoch != spec.far_future_epoch) + .map(|v| v.exit_epoch) + .max() + .unwrap_or(epoch) + .safe_add(1)?; + + // The total active balance cache must be built before the consolidation churn limit + // is calculated. + pre_state.build_total_active_balance_cache(spec)?; + let earliest_consolidation_epoch = spec.compute_activation_exit_epoch(epoch)?; + + let pre = pre_state.as_deneb_mut()?; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. // // Fixed size vectors get cloned because replacing them would require the same size // allocation as cloning. - let post = BeaconState::Electra(BeaconStateElectra { + let mut post = BeaconState::Electra(BeaconStateElectra { // Versioning genesis_time: pre.genesis_time, genesis_validators_root: pre.genesis_validators_root, @@ -62,6 +77,16 @@ pub fn upgrade_to_electra( next_withdrawal_index: pre.next_withdrawal_index, next_withdrawal_validator_index: pre.next_withdrawal_validator_index, historical_summaries: pre.historical_summaries.clone(), + // Electra + deposit_receipts_start_index: spec.unset_deposit_receipts_start_index, + deposit_balance_to_consume: 0, + exit_balance_to_consume: 0, + earliest_exit_epoch, + consolidation_balance_to_consume: 0, + earliest_consolidation_epoch, + pending_balance_deposits: Default::default(), + pending_partial_withdrawals: Default::default(), + pending_consolidations: Default::default(), // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), @@ -71,6 +96,39 @@ pub fn upgrade_to_electra( slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), }); + *post.exit_balance_to_consume_mut()? = post.get_activation_exit_churn_limit(spec)?; + *post.consolidation_balance_to_consume_mut()? = post.get_consolidation_churn_limit(spec)?; + + // Add validators that are not yet active to pending balance deposits + let validators = post.validators().clone(); + let mut pre_activation = validators + .iter() + .enumerate() + .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) + .collect::>(); + + // Sort the indices by activation_eligibility_epoch and then by index + pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { + if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { + index_a.cmp(index_b) + } else { + val_a + .activation_eligibility_epoch + .cmp(&val_b.activation_eligibility_epoch) + } + }); + + // Process validators to queue entire balance and reset them + for (index, _) in pre_activation { + post.queue_entire_balance_and_reset_validator(index, spec)?; + } + + // Ensure early adopters of compounding credentials go through the activation churn + for (index, validator) in validators.iter().enumerate() { + if validator.has_compounding_withdrawal_credential(spec) { + post.queue_excess_active_balance(index, spec)?; + } + } *pre_state = post; diff --git a/consensus/swap_or_not_shuffle/benches/benches.rs b/consensus/swap_or_not_shuffle/benches/benches.rs index d5f64f0b6b..2909ff1ac6 100644 --- a/consensus/swap_or_not_shuffle/benches/benches.rs +++ b/consensus/swap_or_not_shuffle/benches/benches.rs @@ -1,7 +1,4 @@ -#![allow(deprecated)] - -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list as fast_shuffle}; const SHUFFLE_ROUND_COUNT: u8 = 90; @@ -25,70 +22,32 @@ fn shuffles(c: &mut Criterion) { b.iter(|| black_box(shuffle_list(&seed, 8))) }); - c.bench( - "whole list shuffle", - Benchmark::new("8 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 8))) - }), - ); + for size in [8, 16, 512, 16_384] { + c.bench_with_input( + BenchmarkId::new("whole list shuffle", format!("{size} elements")), + &size, + move |b, &n| { + let seed = vec![42; 32]; + b.iter(|| black_box(shuffle_list(&seed, n))) + }, + ); + } - c.bench( - "whole list shuffle", - Benchmark::new("16 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 16))) - }), - ); - - c.bench( - "whole list shuffle", - Benchmark::new("512 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 512))) - }) - .sample_size(10), - ); - - c.bench( - "_fast_ whole list shuffle", - Benchmark::new("512 elements", move |b| { - let seed = vec![42; 32]; - let list: Vec = (0..512).collect(); - b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) - }) - .sample_size(10), - ); - - c.bench( - "whole list shuffle", - Benchmark::new("16384 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 16_384))) - }) - .sample_size(10), - ); - - c.bench( - "_fast_ whole list shuffle", - Benchmark::new("16384 elements", move |b| { - let seed = vec![42; 32]; - let list: Vec = (0..16384).collect(); - b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) - }) - .sample_size(10), - ); - - c.bench( - "_fast_ whole list shuffle", - Benchmark::new("4m elements", move |b| { - let seed = vec![42; 32]; - let list: Vec = (0..4_000_000).collect(); - b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) - }) - .sample_size(10), - ); + let mut group = c.benchmark_group("fast"); + group.sample_size(10); + for size in [512, 16_384, 4_000_000] { + group.bench_with_input( + BenchmarkId::new("whole list shuffle", format!("{size} elements")), + &size, + move |b, &n| { + let seed = vec![42; 32]; + let list: Vec = (0..n).collect(); + b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) + }, + ); + } + group.finish(); } -criterion_group!(benches, shuffles,); +criterion_group!(benches, shuffles); criterion_main!(benches); diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 4b7d9f2b98..fd1f862a92 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -51,7 +51,6 @@ metastruct = "0.1.0" serde_json = { workspace = true } smallvec = { workspace = true } maplit = { workspace = true } -strum = { workspace = true } milhouse = { workspace = true } rpds = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 5c1036a4c5..c6dda142b2 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -1,7 +1,4 @@ -#![allow(deprecated)] - -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use criterion::{black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use milhouse::List; use rayon::prelude::*; use ssz::Encode; @@ -53,75 +50,82 @@ fn all_benches(c: &mut Criterion) { let validator_count = 16_384; let spec = Arc::new(MainnetEthSpec::default_spec()); + let mut g = c.benchmark_group("types"); + g.sample_size(10); + let mut state = get_state::(validator_count); state.build_caches(&spec).expect("should build caches"); let state_bytes = state.as_ssz_bytes(); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("encode/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new("encode/beacon_state", validator_count), + &inner_state, + |b, state| { b.iter_batched_ref( - || inner_state.clone(), + || state.clone(), |state| black_box(state.as_ssz_bytes()), - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("decode/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new("decode/beacon_state", validator_count), + &(state_bytes.clone(), spec.clone()), + |b, (bytes, spec)| { b.iter_batched_ref( - || (state_bytes.clone(), spec.clone()), + || (bytes.clone(), spec.clone()), |(bytes, spec)| { let state: BeaconState = BeaconState::from_ssz_bytes(&bytes, &spec).expect("should decode"); black_box(state) }, - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("clone/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new("clone/beacon_state", validator_count), + &inner_state, + |b, state| { b.iter_batched_ref( - || inner_state.clone(), + || state.clone(), |state| black_box(state.clone()), - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new( + g.bench_with_input( + BenchmarkId::new( "initialized_cached_tree_hash_without_changes/beacon_state", - move |b| { - b.iter_batched_ref( - || inner_state.clone(), - |state| black_box(state.update_tree_hash_cache()), - criterion::BatchSize::SmallInput, - ) - }, - ) - .sample_size(10), + validator_count, + ), + &inner_state, + |b, state| { + b.iter_batched_ref( + || state.clone(), + |state| black_box(state.update_tree_hash_cache()), + BatchSize::SmallInput, + ) + }, ); let mut inner_state = state.clone(); inner_state.drop_all_caches().unwrap(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("non_initialized_cached_tree_hash/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new( + "non_initialized_cached_tree_hash/beacon_state", + validator_count, + ), + &inner_state, + |b, state| { b.iter_batched_ref( - || inner_state.clone(), + || state.clone(), |state| { black_box( state @@ -129,41 +133,40 @@ fn all_benches(c: &mut Criterion) { .expect("should update tree hash"), ) }, - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new( + g.bench_with_input( + BenchmarkId::new( "initialized_cached_tree_hash_with_new_validators/beacon_state", - move |b| { - b.iter_batched_ref( - || { - let mut state = inner_state.clone(); - for _ in 0..16 { - state - .validators_mut() - .push(Validator::default()) - .expect("should push validatorj"); - state - .balances_mut() - .push(32_000_000_000) - .expect("should push balance"); - } + validator_count, + ), + &inner_state, + |b, state| { + b.iter_batched_ref( + || { + let mut state = state.clone(); + for _ in 0..16 { state - }, - |state| black_box(state.update_tree_hash_cache()), - criterion::BatchSize::SmallInput, - ) - }, - ) - .sample_size(10), + .validators_mut() + .push(Validator::default()) + .expect("should push validator"); + state + .balances_mut() + .push(32_000_000_000) + .expect("should push balance"); + } + state + }, + |state| black_box(state.update_tree_hash_cache()), + BatchSize::SmallInput, + ) + }, ); } -criterion_group!(benches, all_benches,); +criterion_group!(benches, all_benches); criterion_main!(benches); diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 5af53b3fa1..ed3d182772 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -4,6 +4,7 @@ use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; +use std::fmt; use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -836,6 +837,23 @@ impl> ForkVersionDeserialize )) } } +pub enum BlockImportSource { + Gossip, + Lookup, + RangeSync, + HttpApi, +} + +impl fmt::Display for BlockImportSource { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + BlockImportSource::Gossip => write!(f, "gossip"), + BlockImportSource::Lookup => write!(f, "lookup"), + BlockImportSource::RangeSync => write!(f, "range_sync"), + BlockImportSource::HttpApi => write!(f, "http_api"), + } + } +} #[cfg(test)] mod tests { @@ -1074,9 +1092,8 @@ mod tests { .expect("good electra block can be decoded"), good_block ); - // TODO(electra): once the Electra block is changed from Deneb, update this to match - // the other forks. - assert!(BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec).is_ok()); + BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + .expect_err("bad electra block cannot be decoded"); } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index f6bae53505..577f282a55 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -467,6 +467,40 @@ where #[test_random(default)] pub historical_summaries: List, + // Electra + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub deposit_receipts_start_index: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub deposit_balance_to_consume: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub exit_balance_to_consume: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + pub earliest_exit_epoch: Epoch, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + #[serde(with = "serde_utils::quoted_u64")] + pub consolidation_balance_to_consume: u64, + #[superstruct(only(Electra), partial_getter(copy))] + #[metastruct(exclude_from(tree_lists))] + pub earliest_consolidation_epoch: Epoch, + #[test_random(default)] + #[superstruct(only(Electra))] + pub pending_balance_deposits: List, + #[test_random(default)] + #[superstruct(only(Electra))] + pub pending_partial_withdrawals: + List, + #[test_random(default)] + #[superstruct(only(Electra))] + pub pending_consolidations: List, + // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -2031,6 +2065,210 @@ impl BeaconState { self.epoch_cache().get_base_reward(validator_index) } + // ******* Electra accessors ******* + + /// Return the churn limit for the current epoch. + pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { + let total_active_balance = self.get_total_active_balance()?; + let churn = std::cmp::max( + spec.min_per_epoch_churn_limit_electra, + total_active_balance.safe_div(spec.churn_limit_quotient)?, + ); + + Ok(churn.safe_sub(churn.safe_rem(spec.effective_balance_increment)?)?) + } + + /// Return the churn limit for the current epoch dedicated to activations and exits. + pub fn get_activation_exit_churn_limit(&self, spec: &ChainSpec) -> Result { + Ok(std::cmp::min( + spec.max_per_epoch_activation_exit_churn_limit, + self.get_balance_churn_limit(spec)?, + )) + } + + pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { + self.get_balance_churn_limit(spec)? + .safe_sub(self.get_activation_exit_churn_limit(spec)?) + .map_err(Into::into) + } + + /// Get active balance for the given `validator_index`. + pub fn get_active_balance( + &self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let max_effective_balance = self + .validators() + .get(validator_index) + .map(|validator| validator.get_validator_max_effective_balance(spec)) + .ok_or(Error::UnknownValidator(validator_index))?; + Ok(std::cmp::min( + *self + .balances() + .get(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?, + max_effective_balance, + )) + } + + pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { + let mut pending_balance = 0; + for withdrawal in self + .pending_partial_withdrawals()? + .iter() + .filter(|withdrawal| withdrawal.index as usize == validator_index) + { + pending_balance.safe_add_assign(withdrawal.amount)?; + } + Ok(pending_balance) + } + + // ******* Electra mutators ******* + + pub fn queue_excess_active_balance( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), Error> { + let balance = self + .balances_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + if *balance > spec.min_activation_balance { + let excess_balance = balance.safe_sub(spec.min_activation_balance)?; + *balance = spec.min_activation_balance; + self.pending_balance_deposits_mut()? + .push(PendingBalanceDeposit { + index: validator_index as u64, + amount: excess_balance, + })?; + } + Ok(()) + } + + pub fn queue_entire_balance_and_reset_validator( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), Error> { + let balance = self + .balances_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + let balance_copy = *balance; + *balance = 0_u64; + + let validator = self + .validators_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + validator.effective_balance = 0; + validator.activation_eligibility_epoch = spec.far_future_epoch; + + self.pending_balance_deposits_mut()? + .push(PendingBalanceDeposit { + index: validator_index as u64, + amount: balance_copy, + }) + .map_err(Into::into) + } + + /// Change the withdrawal prefix of the given `validator_index` to the compounding withdrawal validator prefix. + pub fn switch_to_compounding_validator( + &mut self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<(), Error> { + let validator = self + .validators_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index))?; + if validator.has_eth1_withdrawal_credential(spec) { + validator.withdrawal_credentials.as_fixed_bytes_mut()[0] = + spec.compounding_withdrawal_prefix_byte; + self.queue_excess_active_balance(validator_index, spec)?; + } + Ok(()) + } + + pub fn compute_exit_epoch_and_update_churn( + &mut self, + exit_balance: u64, + spec: &ChainSpec, + ) -> Result { + let mut earliest_exit_epoch = std::cmp::max( + self.earliest_exit_epoch()?, + self.compute_activation_exit_epoch(self.current_epoch(), spec)?, + ); + + let per_epoch_churn = self.get_activation_exit_churn_limit(spec)?; + // New epoch for exits + let mut exit_balance_to_consume = if self.earliest_exit_epoch()? < earliest_exit_epoch { + per_epoch_churn + } else { + self.exit_balance_to_consume()? + }; + + // Exit doesn't fit in the current earliest epoch + if exit_balance > exit_balance_to_consume { + let balance_to_process = exit_balance.safe_sub(exit_balance_to_consume)?; + let additional_epochs = balance_to_process + .safe_sub(1)? + .safe_div(per_epoch_churn)? + .safe_add(1)?; + earliest_exit_epoch.safe_add_assign(additional_epochs)?; + exit_balance_to_consume + .safe_add_assign(additional_epochs.safe_mul(per_epoch_churn)?)?; + } + let state = self.as_electra_mut()?; + // Consume the balance and update state variables + state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; + state.earliest_exit_epoch = earliest_exit_epoch; + + Ok(state.earliest_exit_epoch) + } + + pub fn compute_consolidation_epoch_and_update_churn( + &mut self, + consolidation_balance: u64, + spec: &ChainSpec, + ) -> Result { + let mut earliest_consolidation_epoch = std::cmp::max( + self.earliest_consolidation_epoch()?, + self.compute_activation_exit_epoch(self.current_epoch(), spec)?, + ); + + let per_epoch_consolidation_churn = self.get_consolidation_churn_limit(spec)?; + + // New epoch for consolidations + let mut consolidation_balance_to_consume = + if self.earliest_consolidation_epoch()? < earliest_consolidation_epoch { + per_epoch_consolidation_churn + } else { + self.consolidation_balance_to_consume()? + }; + // Consolidation doesn't fit in the current earliest epoch + if consolidation_balance > consolidation_balance_to_consume { + let balance_to_process = + consolidation_balance.safe_sub(consolidation_balance_to_consume)?; + let additional_epochs = balance_to_process + .safe_sub(1)? + .safe_div(per_epoch_consolidation_churn)? + .safe_add(1)?; + earliest_consolidation_epoch.safe_add_assign(additional_epochs)?; + consolidation_balance_to_consume + .safe_add_assign(additional_epochs.safe_mul(per_epoch_consolidation_churn)?)?; + } + // Consume the balance and update state variables + let state = self.as_electra_mut()?; + state.consolidation_balance_to_consume = + consolidation_balance_to_consume.safe_sub(consolidation_balance)?; + state.earliest_consolidation_epoch = earliest_consolidation_epoch; + + Ok(state.earliest_consolidation_epoch) + } + #[allow(clippy::arithmetic_side_effects)] pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { // Required for macros (which use type-hints internally). @@ -2147,10 +2385,17 @@ impl BeaconState { /// The number of fields of the `BeaconState` rounded up to the nearest power of two. /// /// This is relevant to tree-hashing of the `BeaconState`. - /// - /// We assume this value is stable across forks. This assumption is checked in the - /// `check_num_fields_pow2` test. - pub const NUM_FIELDS_POW2: usize = BeaconStateBellatrix::::NUM_FIELDS.next_power_of_two(); + pub fn num_fields_pow2(&self) -> usize { + let fork_name = self.fork_name_unchecked(); + match fork_name { + ForkName::Base => BeaconStateBase::::NUM_FIELDS.next_power_of_two(), + ForkName::Altair => BeaconStateAltair::::NUM_FIELDS.next_power_of_two(), + ForkName::Bellatrix => BeaconStateBellatrix::::NUM_FIELDS.next_power_of_two(), + ForkName::Capella => BeaconStateCapella::::NUM_FIELDS.next_power_of_two(), + ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS.next_power_of_two(), + ForkName::Electra => BeaconStateElectra::::NUM_FIELDS.next_power_of_two(), + } + } /// Specialised deserialisation method that uses the `ChainSpec` as context. #[allow(clippy::arithmetic_side_effects)] @@ -2211,7 +2456,7 @@ impl BeaconState { // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate generalized_index - .checked_sub(Self::NUM_FIELDS_POW2) + .checked_sub(self.num_fields_pow2()) .ok_or(Error::IndexNotSupported(generalized_index))? } light_client_update::FINALIZED_ROOT_INDEX => { @@ -2221,7 +2466,7 @@ impl BeaconState { // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches // position of `finalized_checkpoint` in `BeaconState`. finalized_checkpoint_generalized_index - .checked_sub(Self::NUM_FIELDS_POW2) + .checked_sub(self.num_fields_pow2()) .ok_or(Error::IndexNotSupported(generalized_index))? } _ => return Err(Error::IndexNotSupported(generalized_index)), diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 35afe31495..38a76e44c5 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -1,10 +1,10 @@ #![cfg(test)] -use crate::{test_utils::*, ForkName}; +use crate::test_utils::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::{ - test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, - BeaconStateCapella, BeaconStateDeneb, BeaconStateElectra, BeaconStateError, ChainSpec, Domain, - Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector, + test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, + ChainSpec, Domain, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, + RelativeEpoch, Slot, Vector, }; use ssz::Encode; use std::ops::Mul; @@ -403,24 +403,3 @@ fn decode_base_and_altair() { .expect_err("bad altair state cannot be decoded"); } } - -#[test] -fn check_num_fields_pow2() { - use metastruct::NumFields; - pub type E = MainnetEthSpec; - - for fork_name in ForkName::list_all() { - let num_fields = match fork_name { - ForkName::Base => BeaconStateBase::::NUM_FIELDS, - ForkName::Altair => BeaconStateAltair::::NUM_FIELDS, - ForkName::Bellatrix => BeaconStateBellatrix::::NUM_FIELDS, - ForkName::Capella => BeaconStateCapella::::NUM_FIELDS, - ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS, - ForkName::Electra => BeaconStateElectra::::NUM_FIELDS, - }; - assert_eq!( - num_fields.next_power_of_two(), - BeaconState::::NUM_FIELDS_POW2 - ); - } -} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index a46e7c29ff..0946b9ecff 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -89,6 +89,11 @@ pub struct ExecutionPayload { #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, + #[superstruct(only(Electra))] + pub deposit_receipts: VariableList, + #[superstruct(only(Electra))] + pub withdrawal_requests: + VariableList, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 8515e386c2..324d7b9747 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -88,6 +88,10 @@ pub struct ExecutionPayloadHeader { #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub excess_blob_gas: u64, + #[superstruct(only(Electra), partial_getter(copy))] + pub deposit_receipts_root: Hash256, + #[superstruct(only(Electra), partial_getter(copy))] + pub withdrawal_requests_root: Hash256, } impl ExecutionPayloadHeader { @@ -206,6 +210,8 @@ impl ExecutionPayloadHeaderDeneb { withdrawals_root: self.withdrawals_root, blob_gas_used: self.blob_gas_used, excess_blob_gas: self.excess_blob_gas, + deposit_receipts_root: Hash256::zero(), + withdrawal_requests_root: Hash256::zero(), } } } @@ -297,6 +303,8 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe withdrawals_root: payload.withdrawals.tree_hash_root(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + deposit_receipts_root: payload.deposit_receipts.tree_hash_root(), + withdrawal_requests_root: payload.withdrawal_requests.tree_hash_root(), } } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5c521d98af..c170b6b70d 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -122,7 +122,7 @@ pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, - EmptyBlock, + BlockImportSource, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index f73f7c18c5..35176d389d 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -26,6 +26,15 @@ impl TestRandom for BitVector { fn random_for_test(rng: &mut impl RngCore) -> Self { let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; rng.fill_bytes(&mut raw_bytes); + // If N isn't divisible by 8 + // zero out bits greater than N + if let Some(last_byte) = raw_bytes.last_mut() { + let mut mask = 0; + for i in 0..N::to_usize() % 8 { + mask |= 1 << i; + } + *last_byte &= mask; + } Self::from_bytes(raw_bytes).expect("we generate a valid BitVector") } } diff --git a/consensus/types/src/test_utils/test_random/secret_key.rs b/consensus/types/src/test_utils/test_random/secret_key.rs index 3f3f6ed518..da1614aa24 100644 --- a/consensus/types/src/test_utils/test_random/secret_key.rs +++ b/consensus/types/src/test_utils/test_random/secret_key.rs @@ -2,6 +2,8 @@ use super::*; impl TestRandom for SecretKey { fn random_for_test(_rng: &mut impl RngCore) -> Self { + // TODO: Not deterministic generation. Using `SecretKey::deserialize` results in + // `BlstError(BLST_BAD_ENCODING)`, need to debug with blst source on what encoding expects. SecretKey::random() } } diff --git a/consensus/types/src/test_utils/test_random/signature.rs b/consensus/types/src/test_utils/test_random/signature.rs index 5b952296b6..8bc0d71110 100644 --- a/consensus/types/src/test_utils/test_random/signature.rs +++ b/consensus/types/src/test_utils/test_random/signature.rs @@ -1,11 +1,10 @@ use super::*; impl TestRandom for Signature { - fn random_for_test(rng: &mut impl RngCore) -> Self { - let secret_key = SecretKey::random_for_test(rng); - let mut message = vec![0; 32]; - rng.fill_bytes(&mut message); - - secret_key.sign(Hash256::from_slice(&message)) + fn random_for_test(_rng: &mut impl RngCore) -> Self { + // TODO: `SecretKey::random_for_test` does not return a deterministic signature. Since this + // signature will not pass verification we could just return the generator point or the + // generator point multiplied by a random scalar if we want disctint signatures. + Signature::infinity().expect("infinity signature is valid") } } diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 98567cd1e6..9e26d1eeca 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,5 +1,5 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; @@ -55,14 +55,37 @@ impl Validator { epoch >= self.withdrawable_epoch } + /// Returns `true` if the validator is eligible to join the activation queue. + /// + /// Calls the correct function depending on the provided `fork_name`. + pub fn is_eligible_for_activation_queue( + &self, + spec: &ChainSpec, + current_fork: ForkName, + ) -> bool { + if current_fork >= ForkName::Electra { + self.is_eligible_for_activation_queue_electra(spec) + } else { + self.is_eligible_for_activation_queue_base(spec) + } + } + /// Returns `true` if the validator is eligible to join the activation queue. /// /// Spec v0.12.1 - pub fn is_eligible_for_activation_queue(&self, spec: &ChainSpec) -> bool { + fn is_eligible_for_activation_queue_base(&self, spec: &ChainSpec) -> bool { self.activation_eligibility_epoch == spec.far_future_epoch && self.effective_balance == spec.max_effective_balance } + /// Returns `true` if the validator is eligible to join the activation queue. + /// + /// Modified in electra as part of EIP 7251. + fn is_eligible_for_activation_queue_electra(&self, spec: &ChainSpec) -> bool { + self.activation_eligibility_epoch == spec.far_future_epoch + && self.effective_balance >= spec.min_activation_balance + } + /// Returns `true` if the validator is eligible to be activated. /// /// Spec v0.12.1 @@ -102,6 +125,11 @@ impl Validator { .unwrap_or(false) } + /// Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential. + pub fn has_compounding_withdrawal_credential(&self, spec: &ChainSpec) -> bool { + is_compounding_withdrawal_credential(self.withdrawal_credentials, spec) + } + /// Get the eth1 withdrawal address if this validator has one initialized. pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ self.has_eth1_withdrawal_credential(spec) @@ -125,16 +153,99 @@ impl Validator { } /// Returns `true` if the validator is fully withdrawable at some epoch. - pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { + /// + /// Calls the correct function depending on the provided `fork_name`. + pub fn is_fully_withdrawable_at( + &self, + balance: u64, + epoch: Epoch, + spec: &ChainSpec, + current_fork: ForkName, + ) -> bool { + if current_fork >= ForkName::Electra { + self.is_fully_withdrawable_at_electra(balance, epoch, spec) + } else { + self.is_fully_withdrawable_at_capella(balance, epoch, spec) + } + } + + /// Returns `true` if the validator is fully withdrawable at some epoch. + fn is_fully_withdrawable_at_capella( + &self, + balance: u64, + epoch: Epoch, + spec: &ChainSpec, + ) -> bool { self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 } + /// Returns `true` if the validator is fully withdrawable at some epoch. + /// + /// Modified in electra as part of EIP 7251. + fn is_fully_withdrawable_at_electra( + &self, + balance: u64, + epoch: Epoch, + spec: &ChainSpec, + ) -> bool { + self.has_execution_withdrawal_credential(spec) + && self.withdrawable_epoch <= epoch + && balance > 0 + } + /// Returns `true` if the validator is partially withdrawable. - pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { + /// + /// Calls the correct function depending on the provided `fork_name`. + pub fn is_partially_withdrawable_validator( + &self, + balance: u64, + spec: &ChainSpec, + current_fork: ForkName, + ) -> bool { + if current_fork >= ForkName::Electra { + self.is_partially_withdrawable_validator_electra(balance, spec) + } else { + self.is_partially_withdrawable_validator_capella(balance, spec) + } + } + + /// Returns `true` if the validator is partially withdrawable. + fn is_partially_withdrawable_validator_capella(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) && self.effective_balance == spec.max_effective_balance && balance > spec.max_effective_balance } + + /// Returns `true` if the validator is partially withdrawable. + /// + /// Modified in electra as part of EIP 7251. + pub fn is_partially_withdrawable_validator_electra( + &self, + balance: u64, + spec: &ChainSpec, + ) -> bool { + let max_effective_balance = self.get_validator_max_effective_balance(spec); + let has_max_effective_balance = self.effective_balance == max_effective_balance; + let has_excess_balance = balance > max_effective_balance; + self.has_execution_withdrawal_credential(spec) + && has_max_effective_balance + && has_excess_balance + } + + /// Returns `true` if the validator has a 0x01 or 0x02 prefixed withdrawal credential. + pub fn has_execution_withdrawal_credential(&self, spec: &ChainSpec) -> bool { + self.has_compounding_withdrawal_credential(spec) + || self.has_eth1_withdrawal_credential(spec) + } + + /// Returns the max effective balance for a validator in gwei. + pub fn get_validator_max_effective_balance(&self, spec: &ChainSpec) -> u64 { + if self.has_compounding_withdrawal_credential(spec) { + spec.max_effective_balance_electra + } else { + spec.min_activation_balance + } + } } impl Default for Validator { @@ -153,6 +264,17 @@ impl Default for Validator { } } +pub fn is_compounding_withdrawal_credential( + withdrawal_credentials: Hash256, + spec: &ChainSpec, +) -> bool { + withdrawal_credentials + .as_bytes() + .first() + .map(|prefix_byte| *prefix_byte == spec.compounding_withdrawal_prefix_byte) + .unwrap_or(false) +} + #[cfg(test)] mod tests { use super::*; diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 07045dd95c..250188e2db 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -10,10 +10,7 @@ clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } -logging = { workspace = true } -sloggers = { workspace = true } store = { workspace = true } -tempfile = { workspace = true } types = { workspace = true } slog = { workspace = true } strum = { workspace = true } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 617192abfe..fafff0f0f9 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -3,7 +3,8 @@ use beacon_chain::{ slot_clock::SystemTimeSlotClock, }; use beacon_node::{get_data_dir, get_slots_per_restore_point, ClientConfig}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use environment::{Environment, RuntimeContext}; use slog::{info, warn, Logger}; use std::fs; @@ -20,147 +21,173 @@ use types::{BeaconState, EthSpec, Slot}; pub const CMD: &str = "database_manager"; -pub fn version_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("version") - .visible_aliases(&["v"]) - .setting(clap::AppSettings::ColoredHelp) +pub fn version_cli_app() -> Command { + Command::new("version") + .visible_aliases(["v"]) + .styles(get_color_style()) .about("Display database schema version") } -pub fn migrate_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("migrate") - .setting(clap::AppSettings::ColoredHelp) +pub fn migrate_cli_app() -> Command { + Command::new("migrate") + .styles(get_color_style()) .about("Migrate the database to a specific schema version") .arg( - Arg::with_name("to") + Arg::new("to") .long("to") .value_name("VERSION") .help("Schema version to migrate to") - .takes_value(true) + .action(ArgAction::Set) .required(true), ) } -pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("inspect") - .setting(clap::AppSettings::ColoredHelp) +pub fn inspect_cli_app() -> Command { + Command::new("inspect") + .styles(get_color_style()) .about("Inspect raw database values") .arg( - Arg::with_name("column") + Arg::new("column") .long("column") .value_name("TAG") .help("3-byte column ID (see `DBColumn`)") - .takes_value(true) - .required(true), + .action(ArgAction::Set) + .required(true) + .display_order(0), ) .arg( - Arg::with_name("output") + Arg::new("output") .long("output") .value_name("TARGET") .help("Select the type of output to show") .default_value("sizes") - .possible_values(InspectTarget::VARIANTS), + .value_parser(InspectTarget::VARIANTS.to_vec()) + .display_order(0), ) .arg( - Arg::with_name("skip") + Arg::new("skip") .long("skip") .value_name("N") - .help("Skip over the first N keys"), + .help("Skip over the first N keys") + .display_order(0), ) .arg( - Arg::with_name("limit") + Arg::new("limit") .long("limit") .value_name("N") - .help("Output at most N keys"), + .help("Output at most N keys") + .display_order(0), ) .arg( - Arg::with_name("freezer") + Arg::new("freezer") .long("freezer") .help("Inspect the freezer DB rather than the hot DB") - .takes_value(false) - .conflicts_with("blobs-db"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("blobs-db") + .display_order(0), ) .arg( - Arg::with_name("blobs-db") + Arg::new("blobs-db") .long("blobs-db") .help("Inspect the blobs DB rather than the hot DB") - .takes_value(false) - .conflicts_with("freezer"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("freezer") + .display_order(0), ) .arg( - Arg::with_name("output-dir") + Arg::new("output-dir") .long("output-dir") .value_name("DIR") .help("Base directory for the output files. Defaults to the current directory") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) } -pub fn compact_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("compact") - .setting(clap::AppSettings::ColoredHelp) +pub fn compact_cli_app() -> Command { + Command::new("compact") + .styles(get_color_style()) .about("Compact database manually") .arg( - Arg::with_name("column") + Arg::new("column") .long("column") .value_name("TAG") .help("3-byte column ID (see `DBColumn`)") - .takes_value(true) - .required(true), + .action(ArgAction::Set) + .required(true) + .display_order(0), ) .arg( - Arg::with_name("freezer") + Arg::new("freezer") .long("freezer") .help("Inspect the freezer DB rather than the hot DB") - .takes_value(false) - .conflicts_with("blobs-db"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("blobs-db") + .display_order(0), ) .arg( - Arg::with_name("blobs-db") + Arg::new("blobs-db") .long("blobs-db") .help("Inspect the blobs DB rather than the hot DB") - .takes_value(false) - .conflicts_with("freezer"), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("freezer") + .display_order(0), ) } -pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-payloads") +pub fn prune_payloads_app() -> Command { + Command::new("prune-payloads") .alias("prune_payloads") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("Prune finalized execution payloads") } -pub fn prune_blobs_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-blobs") +pub fn prune_blobs_app() -> Command { + Command::new("prune-blobs") .alias("prune_blobs") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("Prune blobs older than data availability boundary") } -pub fn prune_states_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-states") +pub fn prune_states_app() -> Command { + Command::new("prune-states") .alias("prune_states") .arg( - Arg::with_name("confirm") + Arg::new("confirm") .long("confirm") .help( "Commit to pruning states irreversably. Without this flag the command will \ just check that the database is capable of being pruned.", ) - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("Prune all beacon states from the freezer database") } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["db"]) - .setting(clap::AppSettings::ColoredHelp) +pub fn cli_app() -> Command { + Command::new(CMD) + .display_order(0) + .visible_aliases(["db"]) + .styles(get_color_style()) .about("Manage a beacon node database") .arg( - Arg::with_name("slots-per-restore-point") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new("slots-per-restore-point") .long("slots-per-restore-point") .value_name("SLOT_COUNT") .help( @@ -168,32 +195,36 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Cannot be changed after initialization. \ [default: 2048 (mainnet) or 64 (minimal)]", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name("freezer-dir") + Arg::new("freezer-dir") .long("freezer-dir") .value_name("DIR") .help("Data directory for the freezer database.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name("blob-prune-margin-epochs") + Arg::new("blob-prune-margin-epochs") .long("blob-prune-margin-epochs") .value_name("EPOCHS") .help( "The margin for blob pruning in epochs. The oldest blobs are pruned \ up until data_availability_boundary - blob_prune_margin_epochs.", ) - .takes_value(true) - .default_value("0"), + .action(ArgAction::Set) + .default_value("0") + .display_order(0), ) .arg( - Arg::with_name("blobs-dir") + Arg::new("blobs-dir") .long("blobs-dir") .value_name("DIR") .help("Data directory for the blobs database.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .subcommand(migrate_cli_app()) .subcommand(version_cli_app()) @@ -298,8 +329,8 @@ fn parse_inspect_config(cli_args: &ArgMatches) -> Result let target = clap_utils::parse_required(cli_args, "output")?; let skip = clap_utils::parse_optional(cli_args, "skip")?; let limit = clap_utils::parse_optional(cli_args, "limit")?; - let freezer = cli_args.is_present("freezer"); - let blobs_db = cli_args.is_present("blobs-db"); + let freezer = cli_args.get_flag("freezer"); + let blobs_db = cli_args.get_flag("blobs-db"); let output_dir: PathBuf = clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); @@ -421,8 +452,8 @@ pub struct CompactConfig { fn parse_compact_config(cli_args: &ArgMatches) -> Result { let column = clap_utils::parse_required(cli_args, "column")?; - let freezer = cli_args.is_present("freezer"); - let blobs_db = cli_args.is_present("blobs-db"); + let freezer = cli_args.get_flag("freezer"); + let blobs_db = cli_args.get_flag("blobs-db"); Ok(CompactConfig { column, freezer, @@ -566,7 +597,7 @@ pub struct PruneStatesConfig { } fn parse_prune_states_config(cli_args: &ArgMatches) -> Result { - let confirm = cli_args.is_present("confirm"); + let confirm = cli_args.get_flag("confirm"); Ok(PruneStatesConfig { confirm }) } @@ -645,33 +676,33 @@ pub fn prune_states( } /// Run the database manager, returning an error string if the operation did not succeed. -pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result<(), String> { +pub fn run(cli_args: &ArgMatches, env: Environment) -> Result<(), String> { let client_config = parse_client_config(cli_args, &env)?; let context = env.core_context(); let log = context.log().clone(); let format_err = |e| format!("Fatal error: {:?}", e); match cli_args.subcommand() { - ("version", Some(_)) => { + Some(("version", _)) => { display_db_version(client_config, &context, log).map_err(format_err) } - ("migrate", Some(cli_args)) => { + Some(("migrate", cli_args)) => { let migrate_config = parse_migrate_config(cli_args)?; migrate_db(migrate_config, client_config, &context, log).map_err(format_err) } - ("inspect", Some(cli_args)) => { + Some(("inspect", cli_args)) => { let inspect_config = parse_inspect_config(cli_args)?; inspect_db::(inspect_config, client_config) } - ("compact", Some(cli_args)) => { + Some(("compact", cli_args)) => { let compact_config = parse_compact_config(cli_args)?; compact_db::(compact_config, client_config, log).map_err(format_err) } - ("prune-payloads", Some(_)) => { + Some(("prune-payloads", _)) => { prune_payloads(client_config, &context, log).map_err(format_err) } - ("prune-blobs", Some(_)) => prune_blobs(client_config, &context, log).map_err(format_err), - ("prune-states", Some(cli_args)) => { + Some(("prune-blobs", _)) => prune_blobs(client_config, &context, log).map_err(format_err), + Some(("prune-states", cli_args)) => { let executor = env.core_context().executor; let network_config = context .eth2_network_config diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2aba106e50..be369364da 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -20,23 +20,18 @@ serde_json = { workspace = true } env_logger = { workspace = true } types = { workspace = true } state_processing = { workspace = true } -int_to_bytes = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } environment = { workspace = true } eth2_network_config = { workspace = true } -genesis = { workspace = true } deposit_contract = { workspace = true } tree_hash = { workspace = true } clap_utils = { workspace = true } lighthouse_network = { workspace = true } -validator_dir = { workspace = true, features = ["insecure_keys"] } +validator_dir = { workspace = true } lighthouse_version = { workspace = true } -directory = { workspace = true } account_utils = { workspace = true } eth2_wallet = { workspace = true } -eth1_test_rig = { workspace = true } -sensitive_url = { workspace = true } eth2 = { workspace = true } snap = { workspace = true } beacon_chain = { workspace = true } diff --git a/lcli/src/change_genesis_time.rs b/lcli/src/change_genesis_time.rs deleted file mode 100644 index f75652c768..0000000000 --- a/lcli/src/change_genesis_time.rs +++ /dev/null @@ -1,45 +0,0 @@ -use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; -use ssz::Encode; -use std::fs::File; -use std::io::{Read, Write}; -use std::path::PathBuf; -use types::{BeaconState, EthSpec}; - -pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let path = matches - .value_of("ssz-state") - .ok_or("ssz-state not specified")? - .parse::() - .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; - - let genesis_time = matches - .value_of("genesis-time") - .ok_or("genesis-time not specified")? - .parse::() - .map_err(|e| format!("Unable to parse genesis-time: {}", e))?; - - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::()?; - - let mut state: BeaconState = { - let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?; - - let mut ssz = vec![]; - - file.read_to_end(&mut ssz) - .map_err(|e| format!("Unable to read file: {}", e))?; - - BeaconState::from_ssz_bytes(&ssz, spec) - .map_err(|e| format!("Unable to decode SSZ: {:?}", e))? - }; - - *state.genesis_time_mut() = genesis_time; - - let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?; - - file.write_all(&state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to file: {}", e))?; - - Ok(()) -} diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs deleted file mode 100644 index 974a34591f..0000000000 --- a/lcli/src/create_payload_header.rs +++ /dev/null @@ -1,67 +0,0 @@ -use clap::ArgMatches; -use clap_utils::{parse_optional, parse_required}; -use ssz::Encode; -use std::fs::File; -use std::io::Write; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{ - EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ForkName, -}; - -pub fn run(matches: &ArgMatches) -> Result<(), String> { - let eth1_block_hash = parse_required(matches, "execution-block-hash")?; - let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs(), - ); - let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; - let gas_limit = parse_required(matches, "gas-limit")?; - let file_name = matches.value_of("file").ok_or("No file supplied")?; - let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Bellatrix); - - let execution_payload_header: ExecutionPayloadHeader = match fork_name { - ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), - ForkName::Bellatrix => ExecutionPayloadHeader::Bellatrix(ExecutionPayloadHeaderBellatrix { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderBellatrix::default() - }), - ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderCapella::default() - }), - ForkName::Deneb => ExecutionPayloadHeader::Deneb(ExecutionPayloadHeaderDeneb { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderDeneb::default() - }), - ForkName::Electra => ExecutionPayloadHeader::Electra(ExecutionPayloadHeaderElectra { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderElectra::default() - }), - }; - - let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; - let bytes = execution_payload_header.as_ssz_bytes(); - file.write_all(bytes.as_slice()) - .map_err(|_| "Unable to write to file".to_string())?; - Ok(()) -} diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs deleted file mode 100644 index b920486c84..0000000000 --- a/lcli/src/deploy_deposit_contract.rs +++ /dev/null @@ -1,32 +0,0 @@ -use clap::ArgMatches; -use environment::Environment; -use types::EthSpec; - -use eth1_test_rig::{Http, Provider}; - -pub fn run(env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { - let eth1_http: String = clap_utils::parse_required(matches, "eth1-http")?; - let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; - let validator_count: Option = clap_utils::parse_optional(matches, "validator-count")?; - - let client = Provider::::try_from(ð1_http) - .map_err(|e| format!("Unable to connect to eth1 HTTP: {:?}", e))?; - - env.runtime().block_on(async { - let contract = eth1_test_rig::DepositContract::deploy(client, confirmations, None) - .await - .map_err(|e| format!("Failed to deploy deposit contract: {:?}", e))?; - - println!("Deposit contract address: {:?}", contract.address()); - - // Deposit insecure validators to the deposit contract created - if let Some(validator_count) = validator_count { - let amount = env.eth2_config.spec.max_effective_balance; - for i in 0..validator_count { - println!("Submitting deposit for validator {}...", i); - contract.deposit_deterministic_async::(i, amount).await?; - } - } - Ok(()) - }) -} diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs deleted file mode 100644 index 635a36ef70..0000000000 --- a/lcli/src/eth1_genesis.rs +++ /dev/null @@ -1,62 +0,0 @@ -use clap::ArgMatches; -use environment::Environment; -use eth2_network_config::Eth2NetworkConfig; -use genesis::{Eth1Config, Eth1Endpoint, Eth1GenesisService}; -use sensitive_url::SensitiveUrl; -use ssz::Encode; -use std::cmp::max; -use std::path::PathBuf; -use std::time::Duration; -use types::EthSpec; - -/// Interval between polling the eth1 node for genesis information. -pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); - -pub fn run( - env: Environment, - testnet_dir: PathBuf, - matches: &ArgMatches<'_>, -) -> Result<(), String> { - let endpoints = matches - .value_of("eth1-endpoint") - .map(|e| { - warn!("The --eth1-endpoint flag is deprecated. Please use --eth1-endpoints instead"); - String::from(e) - }) - .or_else(|| matches.value_of("eth1-endpoints").map(String::from)); - - let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; - - let spec = eth2_network_config.chain_spec::()?; - - let mut config = Eth1Config::default(); - if let Some(v) = endpoints.clone() { - let endpoint = SensitiveUrl::parse(&v) - .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; - config.endpoint = Eth1Endpoint::NoAuth(endpoint); - } - config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); - config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; - config.lowest_cached_block_number = eth2_network_config.deposit_contract_deploy_block; - config.follow_distance = spec.eth1_follow_distance / 2; - config.node_far_behind_seconds = max(5, config.follow_distance) * spec.seconds_per_eth1_block; - - let genesis_service = - Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone())?; - - env.runtime().block_on(async { - let _ = genesis_service - .wait_for_genesis_state::(ETH1_GENESIS_UPDATE_INTERVAL, spec) - .await - .map(move |genesis_state| { - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); - eth2_network_config.force_write_to_file(testnet_dir) - }) - .map_err(|e| format!("Failed to find genesis: {}", e))?; - - info!("Starting service to produce genesis BeaconState from eth1"); - info!("Connecting to eth1 http endpoints: {:?}", endpoints); - - Ok(()) - }) -} diff --git a/lcli/src/insecure_validators.rs b/lcli/src/insecure_validators.rs deleted file mode 100644 index 67d04c2cd5..0000000000 --- a/lcli/src/insecure_validators.rs +++ /dev/null @@ -1,64 +0,0 @@ -use clap::ArgMatches; -use std::fs; -use std::path::PathBuf; -use validator_dir::Builder as ValidatorBuilder; - -/// Generates validator directories with INSECURE, deterministic keypairs given the range -/// of indices, validator and secret directories. -pub fn generate_validator_dirs( - indices: &[usize], - validators_dir: PathBuf, - secrets_dir: PathBuf, -) -> Result<(), String> { - if !validators_dir.exists() { - fs::create_dir_all(&validators_dir) - .map_err(|e| format!("Unable to create validators dir: {:?}", e))?; - } - - if !secrets_dir.exists() { - fs::create_dir_all(&secrets_dir) - .map_err(|e| format!("Unable to create secrets dir: {:?}", e))?; - } - - for i in indices { - println!("Validator {}", i + 1); - - ValidatorBuilder::new(validators_dir.clone()) - .password_dir(secrets_dir.clone()) - .store_withdrawal_keystore(false) - .insecure_voting_keypair(*i) - .map_err(|e| format!("Unable to generate keys: {:?}", e))? - .build() - .map_err(|e| format!("Unable to build validator: {:?}", e))?; - } - - Ok(()) -} - -pub fn run(matches: &ArgMatches) -> Result<(), String> { - let validator_count: usize = clap_utils::parse_required(matches, "count")?; - let base_dir: PathBuf = clap_utils::parse_required(matches, "base-dir")?; - let node_count: Option = clap_utils::parse_optional(matches, "node-count")?; - if let Some(node_count) = node_count { - let validators_per_node = validator_count / node_count; - let validator_range = (0..validator_count).collect::>(); - let indices_range = validator_range - .chunks(validators_per_node) - .collect::>(); - - for (i, indices) in indices_range.iter().enumerate() { - let validators_dir = base_dir.join(format!("node_{}", i + 1)).join("validators"); - let secrets_dir = base_dir.join(format!("node_{}", i + 1)).join("secrets"); - generate_validator_dirs(indices, validators_dir, secrets_dir)?; - } - } else { - let validators_dir = base_dir.join("validators"); - let secrets_dir = base_dir.join("secrets"); - generate_validator_dirs( - (0..validator_count).collect::>().as_slice(), - validators_dir, - secrets_dir, - )?; - } - Ok(()) -} diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs deleted file mode 100644 index f44edffd46..0000000000 --- a/lcli/src/interop_genesis.rs +++ /dev/null @@ -1,49 +0,0 @@ -use clap::ArgMatches; -use clap_utils::parse_ssz_optional; -use eth2_network_config::Eth2NetworkConfig; -use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; -use ssz::Encode; -use std::path::PathBuf; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{test_utils::generate_deterministic_keypairs, EthSpec, Hash256}; - -pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let validator_count = matches - .value_of("validator-count") - .ok_or("validator-count not specified")? - .parse::() - .map_err(|e| format!("Unable to parse validator-count: {}", e))?; - - let genesis_time = if let Some(genesis_time) = matches.value_of("genesis-time") { - genesis_time - .parse::() - .map_err(|e| format!("Unable to parse genesis-time: {}", e))? - } else { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs() - }; - - let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; - - let mut spec = eth2_network_config.chain_spec::()?; - - if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { - spec.genesis_fork_version = v; - } - - let keypairs = generate_deterministic_keypairs(validator_count); - let genesis_state = interop_genesis_state::( - &keypairs, - genesis_time, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, - &spec, - )?; - - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); - eth2_network_config.force_write_to_file(testnet_dir)?; - - Ok(()) -} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 7b5c1598c9..911e9bdcac 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,26 +1,18 @@ #[macro_use] extern crate log; mod block_root; -mod change_genesis_time; mod check_deposit_data; -mod create_payload_header; -mod deploy_deposit_contract; -mod eth1_genesis; mod generate_bootnode_enr; mod indexed_attestations; -mod insecure_validators; -mod interop_genesis; mod mnemonic_validators; mod mock_el; -mod new_testnet; mod parse_ssz; -mod replace_state_pubkeys; mod skip_slots; mod state_root; mod transition_blocks; -use clap::{App, Arg, ArgMatches, SubCommand}; -use clap_utils::parse_optional; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{parse_optional, FLAG_HEADER}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::Eth2NetworkConfig; use parse_ssz::run_parse_ssz; @@ -32,944 +24,542 @@ use types::{EthSpec, EthSpecId}; fn main() { env_logger::init(); - let matches = App::new("Lighthouse CLI Tool") + let matches = Command::new("Lighthouse CLI Tool") .version(lighthouse_version::VERSION) + .display_order(0) .about("Performs various testing-related tasks, including defining testnets.") .arg( - Arg::with_name("spec") - .short("s") + Arg::new("spec") + .short('s') .long("spec") .value_name("STRING") - .takes_value(true) - .possible_values(&["minimal", "mainnet", "gnosis"]) + .action(ArgAction::Set) + .value_parser(["minimal", "mainnet", "gnosis"]) .default_value("mainnet") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("testnet-dir") - .short("d") + Arg::new("testnet-dir") + .short('d') .long("testnet-dir") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .global(true) - .help("The testnet dir."), + .help("The testnet dir.") + .display_order(0) ) .arg( - Arg::with_name("network") + Arg::new("network") .long("network") .value_name("NAME") - .takes_value(true) + .action(ArgAction::Set) .global(true) .help("The network to use. Defaults to mainnet.") .conflicts_with("testnet-dir") + .display_order(0) ) .subcommand( - SubCommand::with_name("skip-slots") + Command::new("skip-slots") .about( "Performs a state transition from some state across some number of skip slots", ) .arg( - Arg::with_name("output-path") + Arg::new("output-path") .long("output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output a SSZ file."), + .action(ArgAction::Set) + .help("Path to output a SSZ file.") + .display_order(0) ) .arg( - Arg::with_name("pre-state-path") + Arg::new("pre-state-path") .long("pre-state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to a SSZ file of the pre-state."), + .help("Path to a SSZ file of the pre-state.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("state-id") + Arg::new("state-id") .long("state-id") .value_name("STATE_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) .arg( - Arg::with_name("state-root") + Arg::new("state-root") .long("state-root") .value_name("HASH256") - .takes_value(true) - .help("Tree hash root of the provided state, to avoid computing it."), + .action(ArgAction::Set) + .help("Tree hash root of the provided state, to avoid computing it.") + .display_order(0) ) .arg( - Arg::with_name("slots") + Arg::new("slots") .long("slots") .value_name("INTEGER") - .takes_value(true) - .help("Number of slots to skip forward."), + .action(ArgAction::Set) + .help("Number of slots to skip forward.") + .display_order(0) ) .arg( - Arg::with_name("partial-state-advance") + Arg::new("partial-state-advance") .long("partial-state-advance") - .takes_value(false) - .help("If present, don't compute state roots when skipping forward."), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, don't compute state roots when skipping forward.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("transition-blocks") + Command::new("transition-blocks") .about("Performs a state transition given a pre-state and block") .arg( - Arg::with_name("pre-state-path") + Arg::new("pre-state-path") .long("pre-state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") .requires("block-path") - .help("Path to load a BeaconState from as SSZ."), + .help("Path to load a BeaconState from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("block-path") + Arg::new("block-path") .long("block-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") .requires("pre-state-path") - .help("Path to load a SignedBeaconBlock from as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("post-state-output-path") + Arg::new("post-state-output-path") .long("post-state-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the post-state."), + .action(ArgAction::Set) + .help("Path to output the post-state.") + .display_order(0) ) .arg( - Arg::with_name("pre-state-output-path") + Arg::new("pre-state-output-path") .long("pre-state-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the pre-state, useful when used with --beacon-url."), + .action(ArgAction::Set) + .help("Path to output the pre-state, useful when used with --beacon-url.") + .display_order(0) ) .arg( - Arg::with_name("block-output-path") + Arg::new("block-output-path") .long("block-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the block, useful when used with --beacon-url."), + .action(ArgAction::Set) + .help("Path to output the block, useful when used with --beacon-url.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("block-id") + Arg::new("block-id") .long("block-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) .arg( - Arg::with_name("no-signature-verification") + Arg::new("no-signature-verification") .long("no-signature-verification") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disable signature verification.") + .display_order(0) ) .arg( - Arg::with_name("exclude-cache-builds") + Arg::new("exclude-cache-builds") .long("exclude-cache-builds") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, pre-build the committee and tree-hash caches without \ - including them in the timings."), + including them in the timings.") + .display_order(0) ) .arg( - Arg::with_name("exclude-post-block-thc") + Arg::new("exclude-post-block-thc") .long("exclude-post-block-thc") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, don't rebuild the tree-hash-cache after applying \ - the block."), + the block.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("pretty-ssz") + Command::new("pretty-ssz") .about("Parses SSZ-encoded data from a file") .arg( - Arg::with_name("format") - .short("f") + Arg::new("format") + .short('f') .long("format") .value_name("FORMAT") - .takes_value(true) - .required(true) - .default_value("json") - .possible_values(&["json", "yaml"]) - .help("Output format to use") - ) - .arg( - Arg::with_name("type") - .value_name("TYPE") - .takes_value(true) - .required(true) - .help("Type to decode"), - ) - .arg( - Arg::with_name("ssz-file") - .value_name("FILE") - .takes_value(true) - .required(true) - .help("Path to SSZ bytes"), - ) - ) - .subcommand( - SubCommand::with_name("deploy-deposit-contract") - .about( - "Deploy a testing eth1 deposit contract.", - ) - .arg( - Arg::with_name("eth1-http") - .long("eth1-http") - .short("e") - .value_name("ETH1_HTTP_PATH") - .help("Path to an Eth1 JSON-RPC IPC endpoint") - .takes_value(true) - .required(true) - ) - .arg( - Arg::with_name("confirmations") - .value_name("INTEGER") - .long("confirmations") - .takes_value(true) - .default_value("3") - .help("The number of block confirmations before declaring the contract deployed."), - ) - .arg( - Arg::with_name("validator-count") - .value_name("VALIDATOR_COUNT") - .long("validator-count") - .takes_value(true) - .help("If present, makes `validator_count` number of INSECURE deterministic deposits after \ - deploying the deposit contract." - ), - ) - ) - .subcommand( - SubCommand::with_name("eth1-genesis") - .about("Listens to the eth1 chain and finds the genesis beacon state") - .arg( - Arg::with_name("eth1-endpoint") - .short("e") - .long("eth1-endpoint") - .value_name("HTTP_SERVER") - .takes_value(true) - .help("Deprecated. Use --eth1-endpoints."), - ) - .arg( - Arg::with_name("eth1-endpoints") - .long("eth1-endpoints") - .value_name("HTTP_SERVER_LIST") - .takes_value(true) - .conflicts_with("eth1-endpoint") - .help( - "One or more comma-delimited URLs to eth1 JSON-RPC http APIs. \ - If multiple endpoints are given the endpoints are used as \ - fallback in the given order.", - ), - ), - ) - .subcommand( - SubCommand::with_name("interop-genesis") - .about("Produces an interop-compatible genesis state using deterministic keypairs") - .arg( - Arg::with_name("validator-count") - .long("validator-count") - .index(1) - .value_name("INTEGER") - .takes_value(true) - .default_value("1024") - .help("The number of validators in the genesis state."), - ) - .arg( - Arg::with_name("genesis-time") - .long("genesis-time") - .short("t") - .value_name("UNIX_EPOCH") - .takes_value(true) - .help("The value for state.genesis_time. Defaults to now."), - ) - .arg( - Arg::with_name("genesis-fork-version") - .long("genesis-fork-version") - .value_name("HEX") - .takes_value(true) - .help( - "Used to avoid reply attacks between testnets. Recommended to set to - non-default.", - ), - ), - ) - .subcommand( - SubCommand::with_name("change-genesis-time") - .about( - "Loads a file with an SSZ-encoded BeaconState and modifies the genesis time.", - ) - .arg( - Arg::with_name("ssz-state") - .index(1) - .value_name("PATH") - .takes_value(true) - .required(true) - .help("The path to the SSZ file"), - ) - .arg( - Arg::with_name("genesis-time") - .index(2) - .value_name("UNIX_EPOCH") - .takes_value(true) - .required(true) - .help("The value for state.genesis_time."), - ), - ) - .subcommand( - SubCommand::with_name("replace-state-pubkeys") - .about( - "Loads a file with an SSZ-encoded BeaconState and replaces \ - all the validator pubkeys with ones derived from the mnemonic \ - such that validator indices correspond to EIP-2334 voting keypair \ - derivation paths.", - ) - .arg( - Arg::with_name("ssz-state") - .index(1) - .value_name("PATH") - .takes_value(true) - .required(true) - .help("The path to the SSZ file"), - ) - .arg( - Arg::with_name("mnemonic") - .index(2) - .value_name("BIP39_MNENMONIC") - .takes_value(true) - .default_value( - "replace nephew blur decorate waste convince soup column \ - orient excite play baby", - ) - .help("The mnemonic for key derivation."), - ), - ) - .subcommand( - SubCommand::with_name("create-payload-header") - .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ - Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ - is not provided, a payload header for the `Bellatrix` fork will be created.") - .arg( - Arg::with_name("execution-block-hash") - .long("execution-block-hash") - .value_name("BLOCK_HASH") - .takes_value(true) - .help("The block hash used when generating an execution payload. This \ - value is used for `execution_payload_header.block_hash` as well as \ - `execution_payload_header.random`") - .default_value( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ) - .arg( - Arg::with_name("genesis-time") - .long("genesis-time") - .value_name("INTEGER") - .takes_value(true) - .help("The genesis time when generating an execution payload.") - ) - .arg( - Arg::with_name("base-fee-per-gas") - .long("base-fee-per-gas") - .value_name("INTEGER") - .takes_value(true) - .help("The base fee per gas field in the execution payload generated.") - .default_value("1000000000"), - ) - .arg( - Arg::with_name("gas-limit") - .long("gas-limit") - .value_name("INTEGER") - .takes_value(true) - .help("The gas limit field in the execution payload generated.") - .default_value("30000000"), - ) - .arg( - Arg::with_name("file") - .long("file") - .value_name("FILE") - .takes_value(true) - .required(true) - .help("Output file"), - ).arg( - Arg::with_name("fork") - .long("fork") - .value_name("FORK") - .takes_value(true) - .default_value("bellatrix") - .help("The fork for which the execution payload header should be created.") - .possible_values(&["bellatrix", "capella", "deneb", "electra"]) - ) - ) - .subcommand( - SubCommand::with_name("new-testnet") - .about( - "Produce a new testnet directory. If any of the optional flags are not - supplied the values will remain the default for the --spec flag", - ) - .arg( - Arg::with_name("force") - .long("force") - .short("f") - .takes_value(false) - .help("Overwrites any previous testnet configurations"), - ) - .arg( - Arg::with_name("interop-genesis-state") - .long("interop-genesis-state") - .takes_value(false) - .help( - "If present, a interop-style genesis.ssz file will be generated.", - ), - ) - .arg( - Arg::with_name("derived-genesis-state") - .long("derived-genesis-state") - .takes_value(false) - .help( - "If present, a genesis.ssz file will be generated with keys generated from a given mnemonic.", - ), - ) - .arg( - Arg::with_name("mnemonic-phrase") - .long("mnemonic-phrase") - .value_name("MNEMONIC_PHRASE") - .takes_value(true) - .requires("derived-genesis-state") - .help("The mnemonic with which we generate the validator keys for a derived genesis state"), - ) - .arg( - Arg::with_name("min-genesis-time") - .long("min-genesis-time") - .value_name("UNIX_SECONDS") - .takes_value(true) - .help( - "The minimum permitted genesis time. For non-eth1 testnets will be - the genesis time. Defaults to now.", - ), - ) - .arg( - Arg::with_name("min-genesis-active-validator-count") - .long("min-genesis-active-validator-count") - .value_name("INTEGER") - .takes_value(true) - .help("The number of validators required to trigger eth2 genesis."), - ) - .arg( - Arg::with_name("genesis-delay") - .long("genesis-delay") - .value_name("SECONDS") - .takes_value(true) - .help("The delay between sufficient eth1 deposits and eth2 genesis."), - ) - .arg( - Arg::with_name("min-deposit-amount") - .long("min-deposit-amount") - .value_name("GWEI") - .takes_value(true) - .help("The minimum permitted deposit amount."), - ) - .arg( - Arg::with_name("max-effective-balance") - .long("max-effective-balance") - .value_name("GWEI") - .takes_value(true) - .help("The amount required to become a validator."), - ) - .arg( - Arg::with_name("effective-balance-increment") - .long("effective-balance-increment") - .value_name("GWEI") - .takes_value(true) - .help("The steps in effective balance calculation."), - ) - .arg( - Arg::with_name("ejection-balance") - .long("ejection-balance") - .value_name("GWEI") - .takes_value(true) - .help("The balance at which a validator gets ejected."), - ) - .arg( - Arg::with_name("eth1-follow-distance") - .long("eth1-follow-distance") - .value_name("ETH1_BLOCKS") - .takes_value(true) - .help("The distance to follow behind the eth1 chain head."), - ) - .arg( - Arg::with_name("genesis-fork-version") - .long("genesis-fork-version") - .value_name("HEX") - .takes_value(true) - .help( - "Used to avoid reply attacks between testnets. Recommended to set to - non-default.", - ), - ) - .arg( - Arg::with_name("seconds-per-slot") - .long("seconds-per-slot") - .value_name("SECONDS") - .takes_value(true) - .help("Eth2 slot time"), - ) - .arg( - Arg::with_name("seconds-per-eth1-block") - .long("seconds-per-eth1-block") - .value_name("SECONDS") - .takes_value(true) - .help("Eth1 block time"), - ) - .arg( - Arg::with_name("eth1-id") - .long("eth1-id") - .value_name("ETH1_ID") - .takes_value(true) - .help("The chain id and network id for the eth1 testnet."), - ) - .arg( - Arg::with_name("deposit-contract-address") - .long("deposit-contract-address") - .value_name("ETH1_ADDRESS") - .takes_value(true) - .required(true) - .help("The address of the deposit contract."), - ) - .arg( - Arg::with_name("deposit-contract-deploy-block") - .long("deposit-contract-deploy-block") - .value_name("ETH1_BLOCK_NUMBER") - .takes_value(true) - .default_value("0") - .help( - "The block the deposit contract was deployed. Setting this is a huge - optimization for nodes, please do it.", - ), - ) - .arg( - Arg::with_name("altair-fork-epoch") - .long("altair-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Altair hard fork", - ), - ) - .arg( - Arg::with_name("bellatrix-fork-epoch") - .long("bellatrix-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Bellatrix hard fork", - ), - ) - .arg( - Arg::with_name("capella-fork-epoch") - .long("capella-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Capella hard fork", - ), - ) - .arg( - Arg::with_name("deneb-fork-epoch") - .long("deneb-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Deneb hard fork", - ), - ) - .arg( - Arg::with_name("electra-fork-epoch") - .long("electra-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Electra hard fork", - ), - ) - .arg( - Arg::with_name("ttd") - .long("ttd") - .value_name("TTD") - .takes_value(true) - .help( - "The terminal total difficulty", - ), - ) - .arg( - Arg::with_name("eth1-block-hash") - .long("eth1-block-hash") - .value_name("BLOCK_HASH") - .takes_value(true) - .help("The eth1 block hash used when generating a genesis state."), - ) - .arg( - Arg::with_name("execution-payload-header") - .long("execution-payload-header") - .value_name("FILE") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .help("Path to file containing `ExecutionPayloadHeader` SSZ bytes to be \ - used in the genesis state."), + .default_value("json") + .value_parser(["json", "yaml"]) + .help("Output format to use") + .display_order(0) ) .arg( - Arg::with_name("validator-count") - .long("validator-count") - .value_name("INTEGER") - .takes_value(true) - .help("The number of validators when generating a genesis state."), + Arg::new("type") + .value_name("TYPE") + .action(ArgAction::Set) + .required(true) + .help("Type to decode") + .display_order(0) ) .arg( - Arg::with_name("genesis-time") - .long("genesis-time") - .value_name("INTEGER") - .takes_value(true) - .help("The genesis time when generating a genesis state."), + Arg::new("ssz-file") + .value_name("FILE") + .action(ArgAction::Set) + .required(true) + .help("Path to SSZ bytes") + .display_order(0) ) - .arg( - Arg::with_name("proposer-score-boost") - .long("proposer-score-boost") - .value_name("INTEGER") - .takes_value(true) - .help("The proposer score boost to apply as a percentage, e.g. 70 = 70%"), - ) - ) .subcommand( - SubCommand::with_name("check-deposit-data") + Command::new("check-deposit-data") .about("Checks the integrity of some deposit data.") .arg( - Arg::with_name("deposit-amount") + Arg::new("deposit-amount") .index(1) .value_name("GWEI") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The amount (in Gwei) that was deposited"), + .help("The amount (in Gwei) that was deposited") + .display_order(0) ) .arg( - Arg::with_name("deposit-data") + Arg::new("deposit-data") .index(2) .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "A 0x-prefixed hex string of the deposit data. Should include the function signature.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("generate-bootnode-enr") + Command::new("generate-bootnode-enr") .about("Generates an ENR address to be used as a pre-genesis boot node.") .arg( - Arg::with_name("ip") + Arg::new("ip") .long("ip") .value_name("IP_ADDRESS") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The IP address to be included in the ENR and used for discovery"), + .help("The IP address to be included in the ENR and used for discovery") + .display_order(0) ) .arg( - Arg::with_name("udp-port") + Arg::new("udp-port") .long("udp-port") .value_name("UDP_PORT") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The UDP port to be included in the ENR and used for discovery"), + .help("The UDP port to be included in the ENR and used for discovery") + .display_order(0) ) .arg( - Arg::with_name("tcp-port") + Arg::new("tcp-port") .long("tcp-port") .value_name("TCP_PORT") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "The TCP port to be included in the ENR and used for application comms", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("output-dir") + Arg::new("output-dir") .long("output-dir") .value_name("OUTPUT_DIRECTORY") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The directory in which to create the network dir"), + .help("The directory in which to create the network dir") + .display_order(0) ) .arg( - Arg::with_name("genesis-fork-version") + Arg::new("genesis-fork-version") .long("genesis-fork-version") .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "Used to avoid reply attacks between testnets. Recommended to set to non-default.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("insecure-validators") - .about("Produces validator directories with INSECURE, deterministic keypairs.") - .arg( - Arg::with_name("count") - .long("count") - .value_name("COUNT") - .takes_value(true) - .required(true) - .help("Produces validators in the range of 0..count."), - ) - .arg( - Arg::with_name("base-dir") - .long("base-dir") - .value_name("BASE_DIR") - .takes_value(true) - .required(true) - .help("The base directory where validator keypairs and secrets are stored"), - ) - .arg( - Arg::with_name("node-count") - .long("node-count") - .value_name("NODE_COUNT") - .takes_value(true) - .help("The number of nodes to divide the validator keys to"), - ) - ) - .subcommand( - SubCommand::with_name("mnemonic-validators") + Command::new("mnemonic-validators") .about("Produces validator directories by deriving the keys from \ a mnemonic. For testing purposes only, DO NOT USE IN \ PRODUCTION!") .arg( - Arg::with_name("count") + Arg::new("count") .long("count") .value_name("COUNT") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Produces validators in the range of 0..count."), + .help("Produces validators in the range of 0..count.") + .display_order(0) ) .arg( - Arg::with_name("base-dir") + Arg::new("base-dir") .long("base-dir") .value_name("BASE_DIR") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The base directory where validator keypairs and secrets are stored"), + .help("The base directory where validator keypairs and secrets are stored") + .display_order(0) ) .arg( - Arg::with_name("node-count") + Arg::new("node-count") .long("node-count") .value_name("NODE_COUNT") - .takes_value(true) - .help("The number of nodes to divide the validator keys to"), + .action(ArgAction::Set) + .help("The number of nodes to divide the validator keys to") + .display_order(0) ) .arg( - Arg::with_name("mnemonic-phrase") + Arg::new("mnemonic-phrase") .long("mnemonic-phrase") .value_name("MNEMONIC_PHRASE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The mnemonic with which we generate the validator keys"), + .help("The mnemonic with which we generate the validator keys") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("indexed-attestations") + Command::new("indexed-attestations") .about("Convert attestations to indexed form, using the committees from a state.") .arg( - Arg::with_name("state") + Arg::new("state") .long("state") .value_name("SSZ_STATE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("BeaconState to generate committees from (SSZ)"), + .help("BeaconState to generate committees from (SSZ)") + .display_order(0) ) .arg( - Arg::with_name("attestations") + Arg::new("attestations") .long("attestations") .value_name("JSON_ATTESTATIONS") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("List of Attestations to convert to indexed form (JSON)"), + .help("List of Attestations to convert to indexed form (JSON)") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("block-root") + Command::new("block-root") .about("Computes the block root of some block.") .arg( - Arg::with_name("block-path") + Arg::new("block-path") .long("block-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to load a SignedBeaconBlock from as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("block-id") + Arg::new("block-id") .long("block-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("state-root") + Command::new("state-root") .about("Computes the state root of some state.") .arg( - Arg::with_name("state-path") + Arg::new("state-path") .long("state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to load a BeaconState from as SSZ."), + .help("Path to load a BeaconState from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("state-id") + Arg::new("state-id") .long("state-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("mock-el") + Command::new("mock-el") .about("Creates a mock execution layer server. This is NOT SAFE and should only \ be used for testing and development on testnets. Do not use in production. Do not \ use on mainnet. It cannot perform validator duties.") .arg( - Arg::with_name("jwt-output-path") + Arg::new("jwt-output-path") .long("jwt-output-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Path to write the JWT secret."), + .help("Path to write the JWT secret.") + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("IP_ADDRESS") - .takes_value(true) + .action(ArgAction::Set) .help("The server will listen on this address.") .default_value("127.0.0.1") + .display_order(0) ) .arg( - Arg::with_name("listen-port") + Arg::new("listen-port") .long("listen-port") .value_name("PORT") - .takes_value(true) + .action(ArgAction::Set) .help("The server will listen on this port.") .default_value("8551") + .display_order(0) ) .arg( - Arg::with_name("all-payloads-valid") + Arg::new("all-payloads-valid") .long("all-payloads-valid") - .takes_value(true) + .action(ArgAction::Set) .help("Controls the response to newPayload and forkchoiceUpdated. \ Set to 'true' to return VALID. Set to 'false' to return SYNCING.") .default_value("false") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("shanghai-time") + Arg::new("shanghai-time") .long("shanghai-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Shanghai. Defaults to the mainnet value.") .default_value("1681338479") + .display_order(0) ) .arg( - Arg::with_name("cancun-time") + Arg::new("cancun-time") .long("cancun-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Cancun. No default is provided \ until Cancun is triggered on mainnet.") + .display_order(0) ) .arg( - Arg::with_name("prague-time") + Arg::new("prague-time") .long("prague-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Prague. No default is provided \ until Prague is triggered on mainnet.") + .display_order(0) ) ) .get_matches(); let result = matches - .value_of("spec") + .get_one::("spec") .ok_or_else(|| "Missing --spec flag".to_string()) - .and_then(FromStr::from_str) + .and_then(|s| FromStr::from_str(s)) .and_then(|eth_spec_id| match eth_spec_id { EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches), EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches), @@ -985,10 +575,7 @@ fn main() { } } -fn run( - env_builder: EnvironmentBuilder, - matches: &ArgMatches<'_>, -) -> Result<(), String> { +fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> Result<(), String> { let env = env_builder .multi_threaded_tokio_runtime() .map_err(|e| format!("should start tokio runtime: {:?}", e))? @@ -1020,9 +607,6 @@ fn run( (None, Some(network_name)) }; - // Lazily load either the testnet dir or the network config, as required. - // Some subcommands like new-testnet need the testnet dir but not the network config. - let get_testnet_dir = || testnet_dir.clone().ok_or("testnet-dir is required"); let get_network_config = || { if let Some(testnet_dir) = &testnet_dir { Eth2NetworkConfig::load(testnet_dir.clone()).map_err(|e| { @@ -1039,74 +623,42 @@ fn run( }; match matches.subcommand() { - ("transition-blocks", Some(matches)) => { + Some(("transition-blocks", matches)) => { let network_config = get_network_config()?; transition_blocks::run::(env, network_config, matches) .map_err(|e| format!("Failed to transition blocks: {}", e)) } - ("skip-slots", Some(matches)) => { + Some(("skip-slots", matches)) => { let network_config = get_network_config()?; skip_slots::run::(env, network_config, matches) .map_err(|e| format!("Failed to skip slots: {}", e)) } - ("pretty-ssz", Some(matches)) => { + Some(("pretty-ssz", matches)) => { let network_config = get_network_config()?; run_parse_ssz::(network_config, matches) .map_err(|e| format!("Failed to pretty print hex: {}", e)) } - ("deploy-deposit-contract", Some(matches)) => { - deploy_deposit_contract::run::(env, matches) - .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) - } - ("eth1-genesis", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - eth1_genesis::run::(env, testnet_dir, matches) - .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)) - } - ("interop-genesis", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - interop_genesis::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run interop-genesis command: {}", e)) - } - ("change-genesis-time", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - change_genesis_time::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)) - } - ("create-payload-header", Some(matches)) => create_payload_header::run::(matches) - .map_err(|e| format!("Failed to run create-payload-header command: {}", e)), - ("replace-state-pubkeys", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - replace_state_pubkeys::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)) - } - ("new-testnet", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - new_testnet::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run new_testnet command: {}", e)) - } - ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) + Some(("check-deposit-data", matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), - ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) + Some(("generate-bootnode-enr", matches)) => generate_bootnode_enr::run::(matches) .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), - ("insecure-validators", Some(matches)) => insecure_validators::run(matches) - .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), - ("mnemonic-validators", Some(matches)) => mnemonic_validators::run(matches) + Some(("mnemonic-validators", matches)) => mnemonic_validators::run(matches) .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), - ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) + Some(("indexed-attestations", matches)) => indexed_attestations::run::(matches) .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), - ("block-root", Some(matches)) => { + Some(("block-root", matches)) => { let network_config = get_network_config()?; block_root::run::(env, network_config, matches) .map_err(|e| format!("Failed to run block-root command: {}", e)) } - ("state-root", Some(matches)) => { + Some(("state-root", matches)) => { let network_config = get_network_config()?; state_root::run::(env, network_config, matches) .map_err(|e| format!("Failed to run state-root command: {}", e)) } - ("mock-el", Some(matches)) => mock_el::run::(env, matches) + Some(("mock-el", matches)) => mock_el::run::(env, matches) .map_err(|e| format!("Failed to run mock-el command: {}", e)), - (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), + Some((other, _)) => Err(format!("Unknown subcommand {}. See --help.", other)), + _ => Err("No subcommand provided. See --help.".to_string()), } } diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs deleted file mode 100644 index f6bfb2ac01..0000000000 --- a/lcli/src/new_testnet.rs +++ /dev/null @@ -1,393 +0,0 @@ -use account_utils::eth2_keystore::keypair_from_secret; -use clap::ArgMatches; -use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; -use eth2_network_config::{Eth2NetworkConfig, GenesisStateSource, TRUSTED_SETUP_BYTES}; -use eth2_wallet::bip39::Seed; -use eth2_wallet::bip39::{Language, Mnemonic}; -use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; -use ethereum_hashing::hash; -use ssz::Decode; -use ssz::Encode; -use state_processing::process_activations; -use state_processing::upgrade::{ - upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, - upgrade_to_electra, -}; -use std::fs::File; -use std::io::Read; -use std::path::PathBuf; -use std::str::FromStr; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::ExecutionBlockHash; -use types::{ - test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, - Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ForkName, Hash256, Keypair, PublicKey, Validator, -}; - -pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; - let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; - - let overwrite_files = matches.is_present("force"); - - if testnet_dir_path.exists() && !overwrite_files { - return Err(format!( - "{:?} already exists, will not overwrite. Use --force to overwrite", - testnet_dir_path - )); - } - - let mut spec = E::default_spec(); - - // Update the spec value if the flag was defined. Otherwise, leave it as the default. - macro_rules! maybe_update { - ($flag: tt, $var: ident) => { - if let Some(val) = parse_optional(matches, $flag)? { - spec.$var = val - } - }; - } - - spec.deposit_contract_address = deposit_contract_address; - - maybe_update!("min-genesis-time", min_genesis_time); - maybe_update!("min-deposit-amount", min_deposit_amount); - maybe_update!( - "min-genesis-active-validator-count", - min_genesis_active_validator_count - ); - maybe_update!("max-effective-balance", max_effective_balance); - maybe_update!("effective-balance-increment", effective_balance_increment); - maybe_update!("ejection-balance", ejection_balance); - maybe_update!("eth1-follow-distance", eth1_follow_distance); - maybe_update!("genesis-delay", genesis_delay); - maybe_update!("eth1-id", deposit_chain_id); - maybe_update!("eth1-id", deposit_network_id); - maybe_update!("seconds-per-slot", seconds_per_slot); - maybe_update!("seconds-per-eth1-block", seconds_per_eth1_block); - - if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { - spec.genesis_fork_version = v; - } - - if let Some(proposer_score_boost) = parse_optional(matches, "proposer-score-boost")? { - spec.proposer_score_boost = Some(proposer_score_boost); - } - - if let Some(fork_epoch) = parse_optional(matches, "altair-fork-epoch")? { - spec.altair_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "bellatrix-fork-epoch")? { - spec.bellatrix_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "capella-fork-epoch")? { - spec.capella_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "deneb-fork-epoch")? { - spec.deneb_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "electra-fork-epoch")? { - spec.electra_fork_epoch = Some(fork_epoch); - } - - if let Some(ttd) = parse_optional(matches, "ttd")? { - spec.terminal_total_difficulty = ttd; - } - - let validator_count = parse_required(matches, "validator-count")?; - let execution_payload_header: Option> = - parse_optional(matches, "execution-payload-header")? - .map(|filename: String| { - let mut bytes = vec![]; - let mut file = File::open(filename.as_str()) - .map_err(|e| format!("Unable to open {}: {}", filename, e))?; - file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); - match fork_name { - ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( - "genesis fork must be post-merge".to_string(), - )), - ForkName::Bellatrix => { - ExecutionPayloadHeaderBellatrix::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Bellatrix) - } - ForkName::Capella => { - ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Capella) - } - ForkName::Deneb => { - ExecutionPayloadHeaderDeneb::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Deneb) - } - ForkName::Electra => { - ExecutionPayloadHeaderElectra::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Electra) - } - } - .map_err(|e| format!("SSZ decode failed: {:?}", e)) - }) - .transpose()?; - - let (eth1_block_hash, genesis_time) = if let Some(payload) = execution_payload_header.as_ref() { - let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); - let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); - (eth1_block_hash, genesis_time) - } else { - let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { - "One of `--execution-payload-header` or `--eth1-block-hash` must be set".to_string() - })?; - let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs(), - ); - (eth1_block_hash, genesis_time) - }; - - let genesis_state_bytes = if matches.is_present("interop-genesis-state") { - let keypairs = generate_deterministic_keypairs(validator_count); - let keypairs: Vec<_> = keypairs.into_iter().map(|kp| (kp.clone(), kp)).collect(); - - let genesis_state = initialize_state_with_validators::( - &keypairs, - genesis_time, - eth1_block_hash.into_root(), - execution_payload_header, - &spec, - )?; - - Some(genesis_state.as_ssz_bytes()) - } else if matches.is_present("derived-genesis-state") { - let mnemonic_phrase: String = clap_utils::parse_required(matches, "mnemonic-phrase")?; - let mnemonic = Mnemonic::from_phrase(&mnemonic_phrase, Language::English).map_err(|e| { - format!( - "Unable to derive mnemonic from string {:?}: {:?}", - mnemonic_phrase, e - ) - })?; - let seed = Seed::new(&mnemonic, ""); - let keypairs = (0..validator_count as u32) - .map(|index| { - let (secret, _) = - recover_validator_secret_from_mnemonic(seed.as_bytes(), index, KeyType::Voting) - .unwrap(); - - let voting_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); - - let (secret, _) = recover_validator_secret_from_mnemonic( - seed.as_bytes(), - index, - KeyType::Withdrawal, - ) - .unwrap(); - let withdrawal_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); - (voting_keypair, withdrawal_keypair) - }) - .collect::>(); - let genesis_state = initialize_state_with_validators::( - &keypairs, - genesis_time, - eth1_block_hash.into_root(), - execution_payload_header, - &spec, - )?; - Some(genesis_state.as_ssz_bytes()) - } else { - None - }; - - let kzg_trusted_setup = if let Some(epoch) = spec.deneb_fork_epoch { - // Only load the trusted setup if the deneb fork epoch is set - if epoch != Epoch::max_value() { - Some(TRUSTED_SETUP_BYTES.to_vec()) - } else { - None - } - } else { - None - }; - let testnet = Eth2NetworkConfig { - deposit_contract_deploy_block, - boot_enr: Some(vec![]), - genesis_state_bytes: genesis_state_bytes.map(Into::into), - genesis_state_source: GenesisStateSource::IncludedBytes, - config: Config::from_chain_spec::(&spec), - kzg_trusted_setup, - }; - - testnet.write_to_file(testnet_dir_path, overwrite_files) -} - -/// Returns a `BeaconState` with the given validator keypairs embedded into the -/// genesis state. This allows us to start testnets without having to deposit validators -/// manually. -/// -/// The optional `execution_payload_header` allows us to start a network from the bellatrix -/// fork without the need to transition to altair and bellatrix. -/// -/// We need to ensure that `eth1_block_hash` is equal to the genesis block hash that is -/// generated from the execution side `genesis.json`. -fn initialize_state_with_validators( - keypairs: &[(Keypair, Keypair)], // Voting and Withdrawal keypairs - genesis_time: u64, - eth1_block_hash: Hash256, - execution_payload_header: Option>, - spec: &ChainSpec, -) -> Result, String> { - // If no header is provided, then start from a Bellatrix state by default - let default_header: ExecutionPayloadHeader = - ExecutionPayloadHeader::Bellatrix(ExecutionPayloadHeaderBellatrix { - block_hash: ExecutionBlockHash::from_root(eth1_block_hash), - parent_hash: ExecutionBlockHash::zero(), - ..ExecutionPayloadHeaderBellatrix::default() - }); - let execution_payload_header = execution_payload_header.unwrap_or(default_header); - // Empty eth1 data - let eth1_data = Eth1Data { - block_hash: eth1_block_hash, - deposit_count: 0, - deposit_root: Hash256::from_str( - "0xd70a234731285c6804c2a4f56711ddb8c82c99740f207854891028af34e27e5e", - ) - .unwrap(), // empty deposit tree root - }; - let mut state = BeaconState::new(genesis_time, eth1_data, spec); - - // Seed RANDAO with Eth1 entropy - state.fill_randao_mixes_with(eth1_block_hash).unwrap(); - - for keypair in keypairs.iter() { - let withdrawal_credentials = |pubkey: &PublicKey| { - let mut credentials = hash(&pubkey.as_ssz_bytes()); - credentials[0] = spec.bls_withdrawal_prefix_byte; - Hash256::from_slice(&credentials) - }; - let amount = spec.max_effective_balance; - // Create a new validator. - let validator = Validator { - pubkey: keypair.0.pk.clone().into(), - withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount - amount % (spec.effective_balance_increment), - spec.max_effective_balance, - ), - slashed: false, - }; - state.validators_mut().push(validator).unwrap(); - state.balances_mut().push(amount).unwrap(); - } - - process_activations(&mut state, spec).unwrap(); - - if spec - .altair_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_altair(&mut state, spec).unwrap(); - - state.fork_mut().previous_version = spec.altair_fork_version; - } - - // Similarly, perform an upgrade to Bellatrix if configured from genesis. - if spec - .bellatrix_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_bellatrix(&mut state, spec).unwrap(); - - // Remove intermediate Altair fork from `state.fork`. - state.fork_mut().previous_version = spec.bellatrix_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Bellatrix(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_bellatrix_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Similarly, perform an upgrade to Capella if configured from genesis. - if spec - .capella_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_capella(&mut state, spec).unwrap(); - - // Remove intermediate Bellatrix fork from `state.fork`. - state.fork_mut().previous_version = spec.capella_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Capella(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_capella_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Similarly, perform an upgrade to Deneb if configured from genesis. - if spec - .deneb_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_deneb(&mut state, spec).unwrap(); - - // Remove intermediate Capella fork from `state.fork`. - state.fork_mut().previous_version = spec.deneb_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Deneb(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_deneb_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Similarly, perform an upgrade to Electra if configured from genesis. - if spec - .electra_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_electra(&mut state, spec).unwrap(); - - // Remove intermediate Deneb fork from `state.fork`. - state.fork_mut().previous_version = spec.electra_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Electra(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_electra_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Now that we have our validators, initialize the caches (including the committees) - state.build_caches(spec).unwrap(); - - // Set genesis validators root for domain separation and chain versioning - *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap(); - - // Sanity check for state fork matching config fork. - state - .fork_name(spec) - .map_err(|e| format!("state fork mismatch: {e:?}"))?; - - Ok(state) -} diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index e86ffb73dc..3aa77e5700 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -31,8 +31,12 @@ pub fn run_parse_ssz( network_config: Eth2NetworkConfig, matches: &ArgMatches, ) -> Result<(), String> { - let type_str = matches.value_of("type").ok_or("No type supplied")?; - let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; + let type_str = matches + .get_one::("type") + .ok_or("No type supplied")?; + let filename = matches + .get_one::("ssz-file") + .ok_or("No file supplied")?; let format = parse_required(matches, "format")?; let bytes = if filename.ends_with("ssz_snappy") { @@ -58,7 +62,7 @@ pub fn run_parse_ssz( // More fork-specific decoders may need to be added in future, but shouldn't be 100% necessary, // as the fork-generic decoder will always be available (requires correct --network flag). - match type_str { + match type_str.as_str() { "SignedBeaconBlock" => decode_and_print::>( &bytes, |bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec), diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs deleted file mode 100644 index e8d012b16e..0000000000 --- a/lcli/src/replace_state_pubkeys.rs +++ /dev/null @@ -1,86 +0,0 @@ -use account_utils::{eth2_keystore::keypair_from_secret, mnemonic_from_phrase}; -use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; -use eth2_wallet::bip39::Seed; -use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; -use ssz::Encode; -use state_processing::common::DepositDataTree; -use std::fs::File; -use std::io::{Read, Write}; -use std::path::PathBuf; -use tree_hash::TreeHash; -use types::{BeaconState, DepositData, EthSpec, Hash256, SignatureBytes, DEPOSIT_TREE_DEPTH}; - -pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let path = matches - .value_of("ssz-state") - .ok_or("ssz-state not specified")? - .parse::() - .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; - - let mnemonic_phrase = matches - .value_of("mnemonic") - .ok_or("mnemonic not specified")?; - - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::()?; - - let mut state: BeaconState = { - let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?; - - let mut ssz = vec![]; - - file.read_to_end(&mut ssz) - .map_err(|e| format!("Unable to read file: {}", e))?; - - BeaconState::from_ssz_bytes(&ssz, spec) - .map_err(|e| format!("Unable to decode SSZ: {:?}", e))? - }; - - let mnemonic = mnemonic_from_phrase(mnemonic_phrase)?; - let seed = Seed::new(&mnemonic, ""); - - let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let mut deposit_root = Hash256::zero(); - let validators = state.validators_mut(); - for index in 0..validators.len() { - let (secret, _) = - recover_validator_secret_from_mnemonic(seed.as_bytes(), index as u32, KeyType::Voting) - .map_err(|e| format!("Unable to generate validator key: {:?}", e))?; - - let keypair = keypair_from_secret(secret.as_bytes()) - .map_err(|e| format!("Unable build keystore: {:?}", e))?; - - eprintln!("{}: {}", index, keypair.pk); - - validators.get_mut(index).unwrap().pubkey = keypair.pk.into(); - - // Update the deposit tree. - let mut deposit_data = DepositData { - pubkey: validators.get(index).unwrap().pubkey, - // Set this to a junk value since it's very time consuming to generate the withdrawal - // keys and it's not useful for the time being. - withdrawal_credentials: Hash256::zero(), - amount: spec.min_deposit_amount, - signature: SignatureBytes::empty(), - }; - deposit_data.signature = deposit_data.create_signature(&keypair.sk, spec); - deposit_tree - .push_leaf(deposit_data.tree_hash_root()) - .map_err(|e| format!("failed to create deposit tree: {:?}", e))?; - deposit_root = deposit_tree.root(); - } - - // Update the genesis validators root since we changed the validators. - *state.genesis_validators_root_mut() = state.validators().tree_hash_root(); - - // Update the deposit root with our simulated deposits. - state.eth1_data_mut().deposit_root = deposit_root; - - let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?; - - file.write_all(&state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to file: {}", e))?; - - Ok(()) -} diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index d421c077d8..a2173f10df 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -75,7 +75,7 @@ pub fn run( let runs: usize = parse_required(matches, "runs")?; let slots: u64 = parse_required(matches, "slots")?; let cli_state_root: Option = parse_optional(matches, "state-root")?; - let partial: bool = matches.is_present("partial-state-advance"); + let partial: bool = matches.get_flag("partial-state-advance"); info!("Using {} spec", E::spec_name()); info!("Advancing {} slots", slots); diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 77fd352829..ba0c2efa51 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -117,9 +117,9 @@ pub fn run( let beacon_url: Option = parse_optional(matches, "beacon-url")?; let runs: usize = parse_required(matches, "runs")?; let config = Config { - no_signature_verification: matches.is_present("no-signature-verification"), - exclude_cache_builds: matches.is_present("exclude-cache-builds"), - exclude_post_block_thc: matches.is_present("exclude-post-block-thc"), + no_signature_verification: matches.get_flag("no-signature-verification"), + exclude_cache_builds: matches.get_flag("exclude-cache-builds"), + exclude_post_block_thc: matches.get_flag("exclude-post-block-thc"), }; info!("Using {} spec", E::spec_name()); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 54faa03a31..b6d4166b6a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -28,7 +28,6 @@ jemalloc = ["malloc_utils/jemalloc"] [dependencies] beacon_node = { workspace = true } slog = { workspace = true } -sloggers = { workspace = true } types = { workspace = true } bls = { workspace = true } ethereum_hashing = { workspace = true } @@ -54,7 +53,6 @@ unused_port = { workspace = true } database_manager = { path = "../database_manager" } slasher = { workspace = true } validator_manager = { path = "../validator_manager" } -tracing-subscriber = { workspace = true } logging = { workspace = true } [dev-dependencies] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index e59b1d455a..a83a7a9157 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -36,7 +36,7 @@ use {futures::channel::oneshot, std::cell::RefCell}; pub use task_executor::test_utils::null_logger; -const LOG_CHANNEL_SIZE: usize = 2048; +const LOG_CHANNEL_SIZE: usize = 16384; const SSE_LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 932b125dc6..5743bedfd7 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -1,13 +1,16 @@ mod metrics; use beacon_node::ProductionBeaconNode; -use clap::{App, Arg, ArgMatches}; -use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{ + flags::DISABLE_MALLOC_TUNING_FLAG, get_color_style, get_eth2_network_config, FLAG_HEADER, +}; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; +use lazy_static::lazy_static; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info}; @@ -18,6 +21,25 @@ use task_executor::ShutdownReason; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; +lazy_static! { + pub static ref SHORT_VERSION: String = VERSION.replace("Lighthouse/", ""); + pub static ref LONG_VERSION: String = format!( + "{}\n\ + BLS library: {}\n\ + SHA256 hardware acceleration: {}\n\ + Allocator: {}\n\ + Profile: {}\n\ + Specs: mainnet (true), minimal ({}), gnosis ({})", + SHORT_VERSION.as_str(), + bls_library_name(), + have_sha_extensions(), + allocator_name(), + build_profile_name(), + cfg!(feature = "spec-minimal"), + cfg!(feature = "gnosis"), + ); +} + fn bls_library_name() -> &'static str { if cfg!(feature = "portable") { "blst-portable" @@ -54,41 +76,31 @@ fn main() { } // Parse the CLI parameters. - let matches = App::new("Lighthouse") - .version(VERSION.replace("Lighthouse/", "").as_str()) + let matches = Command::new("Lighthouse") + .version(SHORT_VERSION.as_str()) .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) + .next_line_help(true) + .term_width(80) + .disable_help_flag(true) .about( "Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon \ node, a validator client and utilities for managing validator accounts.", ) - .long_version( - format!( - "{}\n\ - BLS library: {}\n\ - SHA256 hardware acceleration: {}\n\ - Allocator: {}\n\ - Profile: {}\n\ - Specs: mainnet (true), minimal ({}), gnosis ({})", - VERSION.replace("Lighthouse/", ""), - bls_library_name(), - have_sha_extensions(), - allocator_name(), - build_profile_name(), - cfg!(feature = "spec-minimal"), - cfg!(feature = "gnosis"), - ).as_str() - ) + .long_version(LONG_VERSION.as_str()) + .display_order(0) .arg( - Arg::with_name("env_log") - .short("l") + Arg::new("env_log") + .short('l') .help( "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", ) - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("logfile") + Arg::new("logfile") .long("logfile") .value_name("FILE") .help( @@ -97,115 +109,135 @@ fn main() { future logs are stored. \ Once the number of log files exceeds the value specified in \ `--logfile-max-number` the oldest log file will be overwritten.") - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-debug-level") + Arg::new("logfile-debug-level") .long("logfile-debug-level") .value_name("LEVEL") .help("The verbosity level used when emitting logs to the log file.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .action(ArgAction::Set) + .value_parser(["info", "debug", "trace", "warn", "error", "crit"]) .default_value("debug") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-format") + Arg::new("logfile-format") .long("logfile-format") .value_name("FORMAT") .help("Specifies the log format used when emitting logs to the logfile.") - .possible_values(&["DEFAULT", "JSON"]) - .takes_value(true) + .value_parser(["DEFAULT", "JSON"]) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-max-size") + Arg::new("logfile-max-size") .long("logfile-max-size") .value_name("SIZE") .help( "The maximum size (in MB) each log file can grow to before rotating. If set \ to 0, background file logging is disabled.") - .takes_value(true) + .action(ArgAction::Set) .default_value("200") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-max-number") + Arg::new("logfile-max-number") .long("logfile-max-number") .value_name("COUNT") .help( "The maximum number of log files that will be stored. If set to 0, \ background file logging is disabled.") - .takes_value(true) + .action(ArgAction::Set) .default_value("5") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-compress") + Arg::new("logfile-compress") .long("logfile-compress") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, compress old log files. This can help reduce the space needed \ to store old logs.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-no-restricted-perms") + Arg::new("logfile-no-restricted-perms") .long("logfile-no-restricted-perms") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, log files will be generated as world-readable meaning they can be read by \ any user on the machine. Note that logs can often contain sensitive information \ about your validator and so this flag should be used with caution. For Windows users, \ the log file permissions will be inherited from the parent folder.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("log-format") + Arg::new("log-format") .long("log-format") .value_name("FORMAT") .help("Specifies the log format used when emitting logs to the terminal.") - .possible_values(&["JSON"]) - .takes_value(true) - .global(true), + .value_parser(["JSON"]) + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("log-color") + Arg::new("log-color") .long("log-color") .alias("log-colour") .help("Force outputting colors when emitting logs to the terminal.") - .global(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("disable-log-timestamp") + Arg::new("disable-log-timestamp") .long("disable-log-timestamp") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, do not include timestamps in logging output.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("debug-level") + Arg::new("debug-level") .long("debug-level") .value_name("LEVEL") .help("Specifies the verbosity level used when emitting logs to the terminal.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .action(ArgAction::Set) + .value_parser(["info", "debug", "trace", "warn", "error", "crit"]) .global(true) - .default_value("info"), + .default_value("info") + .display_order(0) ) .arg( - Arg::with_name("datadir") + Arg::new("datadir") .long("datadir") - .short("d") + .short('d') .value_name("DIR") .global(true) .help( "Used to specify a custom root data directory for lighthouse keys and databases. \ Defaults to $HOME/.lighthouse/{network} where network is the value of the `network` flag \ Note: Users should specify separate custom datadirs for different networks.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("testnet-dir") - .short("t") + Arg::new("testnet-dir") + .short('t') .long("testnet-dir") .value_name("DIR") .help( @@ -213,57 +245,66 @@ fn main() { a hard-coded Lighthouse testnet. Only effective if there is no \ existing database.", ) - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("network") + Arg::new("network") .long("network") .value_name("network") .help("Name of the Eth2 chain Lighthouse will sync and follow.") - .possible_values(HARDCODED_NET_NAMES) + .value_parser(HARDCODED_NET_NAMES.to_vec()) .conflicts_with("testnet-dir") - .takes_value(true) + .action(ArgAction::Set) .global(true) - + .display_order(0) ) .arg( - Arg::with_name("dump-config") + Arg::new("dump-config") .long("dump-config") - .hidden(true) + .hide(true) .help("Dumps the config to a desired location. Used for testing only.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("dump-chain-config") + Arg::new("dump-chain-config") .long("dump-chain-config") - .hidden(true) + .hide(true) .help("Dumps the chain config to a desired location. Used for testing only.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("immediate-shutdown") + Arg::new("immediate-shutdown") .long("immediate-shutdown") - .hidden(true) + .hide(true) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "Shuts down immediately after the Beacon Node or Validator has successfully launched. \ Used for testing only, DO NOT USE IN PRODUCTION.") .global(true) + .display_order(0) ) .arg( - Arg::with_name(DISABLE_MALLOC_TUNING_FLAG) + Arg::new(DISABLE_MALLOC_TUNING_FLAG) .long(DISABLE_MALLOC_TUNING_FLAG) .help( "If present, do not configure the system allocator. Providing this flag will \ generally increase memory usage, it should only be provided when debugging \ specific memory allocation issues." ) - .global(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-total-difficulty-override") + Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ @@ -272,11 +313,12 @@ fn main() { the broad Ethereum community has elected to override the terminal difficulty. \ Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-block-hash-override") + Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ @@ -285,11 +327,12 @@ fn main() { Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-epoch-override") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-block-hash-epoch-override") + Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ @@ -298,11 +341,12 @@ fn main() { Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-override") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("safe-slots-to-import-optimistically") + Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ @@ -311,11 +355,12 @@ fn main() { of an attack at the PoS transition block. Incorrect use of this flag can cause your \ node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ this flag.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("genesis-state-url") + Arg::new("genesis-state-url") .long("genesis-state-url") .value_name("URL") .help( @@ -324,19 +369,30 @@ fn main() { If not supplied, a default URL or the --checkpoint-sync-url may be used. \ If the genesis state is already included in this binary then this value will be ignored.", ) - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("genesis-state-url-timeout") + Arg::new("genesis-state-url-timeout") .long("genesis-state-url-timeout") .value_name("SECONDS") .help( "The timeout in seconds for the request to --genesis-state-url.", ) - .takes_value(true) + .action(ArgAction::Set) .default_value("180") - .global(true), + .global(true) + .display_order(0) + ) + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) @@ -352,7 +408,7 @@ fn main() { // Only apply this optimization for the beacon node. It's the only process with a substantial // memory footprint. let is_beacon_node = matches.subcommand_name() == Some("beacon_node"); - if is_beacon_node && !matches.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if is_beacon_node && !matches.get_flag(DISABLE_MALLOC_TUNING_FLAG) { if let Err(e) = configure_memory_allocator() { eprintln!( "Unable to configure the memory allocator: {} \n\ @@ -370,7 +426,7 @@ fn main() { if let Some(bootnode_matches) = matches.subcommand_matches("boot_node") { // The bootnode uses the main debug-level flag let debug_info = matches - .value_of("debug-level") + .get_one::("debug-level") .expect("Debug-level must be present") .into(); @@ -430,53 +486,53 @@ fn run( } let debug_level = matches - .value_of("debug-level") + .get_one::("debug-level") .ok_or("Expected --debug-level flag")?; - let log_format = matches.value_of("log-format"); + let log_format = matches.get_one::("log-format"); - let log_color = matches.is_present("log-color"); + let log_color = matches.get_flag("log-color"); - let disable_log_timestamp = matches.is_present("disable-log-timestamp"); + let disable_log_timestamp = matches.get_flag("disable-log-timestamp"); let logfile_debug_level = matches - .value_of("logfile-debug-level") + .get_one::("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; let logfile_format = matches - .value_of("logfile-format") + .get_one::("logfile-format") // Ensure that `logfile-format` defaults to the value of `log-format`. - .or_else(|| matches.value_of("log-format")); + .or_else(|| matches.get_one::("log-format")); let logfile_max_size: u64 = matches - .value_of("logfile-max-size") + .get_one::("logfile-max-size") .ok_or("Expected --logfile-max-size flag")? .parse() .map_err(|e| format!("Failed to parse `logfile-max-size`: {:?}", e))?; let logfile_max_number: usize = matches - .value_of("logfile-max-number") + .get_one::("logfile-max-number") .ok_or("Expected --logfile-max-number flag")? .parse() .map_err(|e| format!("Failed to parse `logfile-max-number`: {:?}", e))?; - let logfile_compress = matches.is_present("logfile-compress"); + let logfile_compress = matches.get_flag("logfile-compress"); - let logfile_restricted = !matches.is_present("logfile-no-restricted-perms"); + let logfile_restricted = !matches.get_flag("logfile-no-restricted-perms"); // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { log_path = match matches.subcommand() { - ("beacon_node", _) => Some( + Some(("beacon_node", _)) => Some( parse_path_or_default(matches, "datadir")? .join(DEFAULT_BEACON_NODE_DIR) .join("logs") .join("beacon") .with_extension("log"), ), - ("validator_client", Some(vc_matches)) => { - let base_path = if vc_matches.is_present("validators-dir") { + Some(("validator_client", vc_matches)) => { + let base_path = if vc_matches.contains_id("validators-dir") { parse_path_or_default(vc_matches, "validators-dir")? } else { parse_path_or_default(matches, "datadir")?.join(DEFAULT_VALIDATOR_DIR) @@ -495,9 +551,9 @@ fn run( let sse_logging = { if let Some(bn_matches) = matches.subcommand_matches("beacon_node") { - bn_matches.is_present("gui") + bn_matches.get_flag("gui") } else if let Some(vc_matches) = matches.subcommand_matches("validator_client") { - vc_matches.is_present("http") + vc_matches.get_flag("http") } else { false } @@ -626,13 +682,13 @@ fn run( ); match matches.subcommand() { - ("beacon_node", Some(matches)) => { + Some(("beacon_node", matches)) => { let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); let mut config = beacon_node::get_config::(matches, &context)?; config.logger_config = logger_config; - let shutdown_flag = matches.is_present("immediate-shutdown"); + let shutdown_flag = matches.get_flag("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; executor.clone().spawn( @@ -653,13 +709,13 @@ fn run( "beacon_node", ); } - ("validator_client", Some(matches)) => { + Some(("validator_client", matches)) => { let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); let config = validator_client::Config::from_cli(matches, context.log()) .map_err(|e| format!("Unable to initialize validator config: {}", e))?; - let shutdown_flag = matches.is_present("immediate-shutdown"); + let shutdown_flag = matches.get_flag("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; if !shutdown_flag { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7dfde69d3a..f8e1182e89 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -636,6 +636,26 @@ fn builder_fallback_flags() { ); } +#[test] +fn builder_get_header_timeout() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-header-timeout"), + Some("1500"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_header_timeout, + Some(Duration::from_millis(1500)) + ); + }, + ); +} + #[test] fn builder_user_agent() { run_payload_builder_flag_test_with_config( @@ -1578,7 +1598,7 @@ fn empty_inbound_rate_limiter_flag() { #[test] fn disable_inbound_rate_limiter_flag() { CommandLineTest::new() - .flag("inbound-rate-limiter", Some("disabled")) + .flag("disable-inbound-rate-limiter", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.network.inbound_rate_limiter_config, None)); } @@ -1846,6 +1866,19 @@ fn block_cache_size_flag() { .with_config(|config| assert_eq!(config.store.block_cache_size, new_non_zero_usize(4))); } #[test] +fn state_cache_size_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(128))); +} +#[test] +fn state_cache_size_flag() { + CommandLineTest::new() + .flag("state-cache-size", Some("64")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(64))); +} +#[test] fn historic_state_cache_size_flag() { CommandLineTest::new() .flag("historic-state-cache-size", Some("4")) @@ -2121,7 +2154,6 @@ fn slasher_broadcast_flag_no_args() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-max-db-size", Some("1")) - .flag("slasher-broadcast", None) .run_with_zero_port() .with_config(|config| { let slasher_config = config @@ -2300,7 +2332,7 @@ fn proposer_re_org_disallowed_offsets_default() { #[test] fn proposer_re_org_disallowed_offsets_override() { CommandLineTest::new() - .flag("--proposer-reorg-disallowed-offsets", Some("1,2,3")) + .flag("proposer-reorg-disallowed-offsets", Some("1,2,3")) .run_with_zero_port() .with_config(|config| { assert_eq!( @@ -2314,7 +2346,7 @@ fn proposer_re_org_disallowed_offsets_override() { #[should_panic] fn proposer_re_org_disallowed_offsets_invalid() { CommandLineTest::new() - .flag("--proposer-reorg-disallowed-offsets", Some("32,33,34")) + .flag("proposer-reorg-disallowed-offsets", Some("32,33,34")) .run_with_zero_port(); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index b85af5d2a8..5fe95bea3f 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -622,23 +622,8 @@ fn wrong_broadcast_flag() { #[test] fn latency_measurement_service() { - CommandLineTest::new().run().with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); CommandLineTest::new() - .flag("latency-measurement-service", None) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); - CommandLineTest::new() - .flag("latency-measurement-service", Some("true")) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); - CommandLineTest::new() - .flag("latency-measurement-service", Some("false")) + .flag("disable-latency-measurement-service", None) .run() .with_config(|config| { assert!(!config.enable_latency_measurement_service); diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index fab1cfebf4..bca6a18ab5 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -55,7 +55,12 @@ impl CommandLineTest { } fn run(mut cmd: Command, should_succeed: bool) { - let output = cmd.output().expect("process should complete"); + let output = cmd + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .expect("process should complete"); if output.status.success() != should_succeed { let stdout = String::from_utf8(output.stdout).unwrap(); let stderr = String::from_utf8(output.stderr).unwrap(); diff --git a/scripts/cli.sh b/scripts/cli.sh index 2767ed73c8..6ca019b39e 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -12,14 +12,11 @@ write_to_file() { local file="$2" local program="$3" - # Remove first line of cmd to get rid of commit specific numbers. - cmd=${cmd#*$'\n'} - # We need to add the header and the backticks to create the code block. printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" # Adjust the width of the help text and append to the end of file - sed -i -e '$a\'$'\n''' "$file" + sed -i -e '$a\'$'\n''\n''' "$file" } CMD=./target/release/lighthouse diff --git a/scripts/local_testnet/.gitignore b/scripts/local_testnet/.gitignore new file mode 100644 index 0000000000..98d8a5a630 --- /dev/null +++ b/scripts/local_testnet/.gitignore @@ -0,0 +1 @@ +logs diff --git a/scripts/local_testnet/anvil_test_node.sh b/scripts/local_testnet/anvil_test_node.sh deleted file mode 100755 index 41be917560..0000000000 --- a/scripts/local_testnet/anvil_test_node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -Eeuo pipefail - -source ./vars.env - -exec anvil \ - --balance 1000000000 \ - --gas-limit 1000000000 \ - --accounts 10 \ - --mnemonic "$ETH1_NETWORK_MNEMONIC" \ - --block-time $SECONDS_PER_ETH1_BLOCK \ - --port 8545 \ - --chain-id "$CHAIN_ID" diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh deleted file mode 100755 index 940fe2b858..0000000000 --- a/scripts/local_testnet/beacon_node.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash - -# -# Starts a beacon node based upon a genesis state created by `./setup.sh`. -# - -set -Eeuo pipefail - -source ./vars.env - -SUBSCRIBE_ALL_SUBNETS= -DEBUG_LEVEL=${DEBUG_LEVEL:-info} - -# Get options -while getopts "d:sh" flag; do - case "${flag}" in - d) DEBUG_LEVEL=${OPTARG};; - s) SUBSCRIBE_ALL_SUBNETS="--subscribe-all-subnets";; - h) - echo "Start a beacon node" - echo - echo "usage: $0 " - echo - echo "Options:" - echo " -s: pass --subscribe-all-subnets to 'lighthouse bn ...', default is not passed" - echo " -d: DEBUG_LEVEL, default info" - echo " -h: this help" - echo - echo "Positional arguments:" - echo " DATADIR Value for --datadir parameter" - echo " NETWORK-PORT Value for --enr-udp-port, --enr-tcp-port and --port" - echo " HTTP-PORT Value for --http-port" - echo " EXECUTION-ENDPOINT Value for --execution-endpoint" - echo " EXECUTION-JWT Value for --execution-jwt" - exit - ;; - esac -done - -# Get positional arguments -data_dir=${@:$OPTIND+0:1} -tcp_port=${@:$OPTIND+1:1} -quic_port=${@:$OPTIND+2:1} -http_port=${@:$OPTIND+3:1} -execution_endpoint=${@:$OPTIND+4:1} -execution_jwt=${@:$OPTIND+5:1} - -lighthouse_binary=lighthouse - -exec $lighthouse_binary \ - --debug-level $DEBUG_LEVEL \ - bn \ - $SUBSCRIBE_ALL_SUBNETS \ - --datadir $data_dir \ - --testnet-dir $TESTNET_DIR \ - --enable-private-discovery \ - --disable-peer-scoring \ - --staking \ - --enr-address 127.0.0.1 \ - --enr-udp-port $tcp_port \ - --enr-tcp-port $tcp_port \ - --enr-quic-port $quic_port \ - --port $tcp_port \ - --quic-port $quic_port \ - --http-port $http_port \ - --disable-packet-filter \ - --target-peers $((BN_COUNT - 1)) \ - --execution-endpoint $execution_endpoint \ - --execution-jwt $execution_jwt \ - $BN_ARGS diff --git a/scripts/local_testnet/bootnode.sh b/scripts/local_testnet/bootnode.sh deleted file mode 100755 index ca02a24140..0000000000 --- a/scripts/local_testnet/bootnode.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -# -# Generates a bootnode enr and saves it in $TESTNET/boot_enr.yaml -# Starts a bootnode from the generated enr. -# - -set -Eeuo pipefail - -source ./vars.env - -echo "Generating bootnode enr" - -lcli \ - generate-bootnode-enr \ - --ip 127.0.0.1 \ - --udp-port $BOOTNODE_PORT \ - --tcp-port $BOOTNODE_PORT \ - --genesis-fork-version $GENESIS_FORK_VERSION \ - --output-dir $DATADIR/bootnode - -bootnode_enr=`cat $DATADIR/bootnode/enr.dat` -echo "- $bootnode_enr" > $TESTNET_DIR/boot_enr.yaml - -echo "Generated bootnode enr and written to $TESTNET_DIR/boot_enr.yaml" - -DEBUG_LEVEL=${1:-info} - -echo "Starting bootnode" - -exec lighthouse boot_node \ - --testnet-dir $TESTNET_DIR \ - --port $BOOTNODE_PORT \ - --listen-address 127.0.0.1 \ - --disable-packet-filter \ - --network-dir $DATADIR/bootnode \ diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh deleted file mode 100755 index cd915e470d..0000000000 --- a/scripts/local_testnet/clean.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -# -# Deletes all files associated with the local testnet. -# - -set -Eeuo pipefail - -source ./vars.env - -if [ -d $DATADIR ]; then - rm -rf $DATADIR -fi diff --git a/scripts/local_testnet/dump_logs.sh b/scripts/local_testnet/dump_logs.sh deleted file mode 100755 index 64b7942fb6..0000000000 --- a/scripts/local_testnet/dump_logs.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# Print all the logs output from local testnet - -set -Eeuo pipefail - -source ./vars.env - -for f in "$TESTNET_DIR"/*.log -do - [[ -e "$f" ]] || break # handle the case of no *.log files - echo "=============================================================================" - echo "$f" - echo "=============================================================================" - cat "$f" - echo "" -done diff --git a/scripts/local_testnet/el_bootnode.sh b/scripts/local_testnet/el_bootnode.sh deleted file mode 100755 index ee437a491c..0000000000 --- a/scripts/local_testnet/el_bootnode.sh +++ /dev/null @@ -1,3 +0,0 @@ -priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91" -source ./vars.env -exec $EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file diff --git a/scripts/local_testnet/genesis.json b/scripts/local_testnet/genesis.json deleted file mode 100644 index 26003bed5d..0000000000 --- a/scripts/local_testnet/genesis.json +++ /dev/null @@ -1,863 +0,0 @@ -{ - "config": { - "chainId": 4242, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "mergeNetsplitBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0, - "pragueTime": 0, - "terminalTotalDifficulty": 0, - "terminalTotalDifficultyPassed": true - }, - "alloc": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x6d6172697573766477000000" - }, - "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { - "balance": "10000000000000000000000000" - }, - "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { - "balance": "10000000000000000000000000" - }, - "0x0000000000000000000000000000000000000000": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "1" - }, - "0x4242424242424242424242424242424242424242": { - "balance": "0", - "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", - "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", - "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", - "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", - "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", - "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", - "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", - "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", - "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", - "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", - "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", - "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", - "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", - "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", - "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", - "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", - "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", - "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", - "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", - "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", - "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", - "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", - "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", - "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", - "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", - "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", - "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", - "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", - "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", - "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" - } - }, - "0x9a4aa7d9C2F6386e5F24d790eB2FFB9fd543A170": { - "balance": "1000000000000000000000000000" - }, - "0x5E3141B900ac5f5608b0d057D10d45a0e4927cD9": { - "balance": "1000000000000000000000000000" - }, - "0x7cF5Dbc49F0904065664b5B6C0d69CaB55F33988": { - "balance": "1000000000000000000000000000" - }, - "0x8D12b071A6F3823A535D38C4a583a2FA1859e822": { - "balance": "1000000000000000000000000000" - }, - "0x3B575D3cda6b30736A38B031E0d245E646A21135": { - "balance": "1000000000000000000000000000" - }, - "0x53bDe6CF93461674F590E532006b4022dA57A724": { - "balance": "1000000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x01", - "extraData": "", - "gasLimit": "0x400000", - "nonce": "0x1234", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "1662465600" -} diff --git a/scripts/local_testnet/geth.sh b/scripts/local_testnet/geth.sh deleted file mode 100755 index 5dc4575cf0..0000000000 --- a/scripts/local_testnet/geth.sh +++ /dev/null @@ -1,53 +0,0 @@ -set -Eeuo pipefail - -source ./vars.env - -# Get options -while getopts "d:sh" flag; do - case "${flag}" in - d) DEBUG_LEVEL=${OPTARG};; - s) SUBSCRIBE_ALL_SUBNETS="--subscribe-all-subnets";; - h) - echo "Start a geth node" - echo - echo "usage: $0 " - echo - echo "Options:" - echo " -h: this help" - echo - echo "Positional arguments:" - echo " DATADIR Value for --datadir parameter" - echo " NETWORK-PORT Value for --port" - echo " HTTP-PORT Value for --http.port" - echo " AUTH-PORT Value for --authrpc.port" - echo " GENESIS_FILE Value for geth init" - exit - ;; - esac -done - -# Get positional arguments -data_dir=${@:$OPTIND+0:1} -network_port=${@:$OPTIND+1:1} -http_port=${@:$OPTIND+2:1} -auth_port=${@:$OPTIND+3:1} -genesis_file=${@:$OPTIND+4:1} - -# Init -$GETH_BINARY init \ - --datadir $data_dir \ - $genesis_file - -echo "Completed init" - -exec $GETH_BINARY \ - --datadir $data_dir \ - --ipcdisable \ - --http \ - --http.api="engine,eth,web3,net,debug" \ - --networkid=$CHAIN_ID \ - --syncmode=full \ - --bootnodes $EL_BOOTNODE_ENODE \ - --port $network_port \ - --http.port $http_port \ - --authrpc.port $auth_port diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh deleted file mode 100755 index 83a0027337..0000000000 --- a/scripts/local_testnet/kill_processes.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# Kill processes - -set -Euo pipefail - -# First parameter is the file with -# one pid per line. -if [ -f "$1" ]; then - while read pid - do - # handle the case of blank lines - [[ -n "$pid" ]] || continue - - echo killing $pid - kill $pid || true - done < $1 -fi - - diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml new file mode 100644 index 0000000000..f54fce354a --- /dev/null +++ b/scripts/local_testnet/network_params.yaml @@ -0,0 +1,14 @@ +# Full configuration reference [here](https://github.com/kurtosis-tech/ethereum-package?tab=readme-ov-file#configuration). +participants: + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + count: 4 +network_params: + deneb_fork_epoch: 0 + seconds_per_slot: 3 +global_log_level: debug +snooper_enabled: false diff --git a/scripts/local_testnet/reset_genesis_time.sh b/scripts/local_testnet/reset_genesis_time.sh deleted file mode 100755 index 68c8fb6b4c..0000000000 --- a/scripts/local_testnet/reset_genesis_time.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# -# Resets the beacon state genesis time to now. -# - -set -Eeuo pipefail - -source ./vars.env - -NOW=$(date +%s) - -lcli \ - change-genesis-time \ - $TESTNET_DIR/genesis.ssz \ - $(date +%s) - -echo "Reset genesis time to now ($NOW)" diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh deleted file mode 100755 index 419cba19ed..0000000000 --- a/scripts/local_testnet/setup.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -# -# Produces a testnet specification and a genesis state where the genesis time -# is now + $GENESIS_DELAY. -# -# Generates datadirs for multiple validator keys according to the -# $VALIDATOR_COUNT and $BN_COUNT variables. -# - -set -o nounset -o errexit -o pipefail - -source ./vars.env - - -NOW=`date +%s` -GENESIS_TIME=`expr $NOW + $GENESIS_DELAY` - -lcli \ - new-testnet \ - --spec $SPEC_PRESET \ - --deposit-contract-address $DEPOSIT_CONTRACT_ADDRESS \ - --testnet-dir $TESTNET_DIR \ - --min-genesis-active-validator-count $GENESIS_VALIDATOR_COUNT \ - --min-genesis-time $GENESIS_TIME \ - --genesis-delay $GENESIS_DELAY \ - --genesis-fork-version $GENESIS_FORK_VERSION \ - --altair-fork-epoch $ALTAIR_FORK_EPOCH \ - --bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \ - --capella-fork-epoch $CAPELLA_FORK_EPOCH \ - --deneb-fork-epoch $DENEB_FORK_EPOCH \ - --electra-fork-epoch $ELECTRA_FORK_EPOCH \ - --ttd $TTD \ - --eth1-block-hash $ETH1_BLOCK_HASH \ - --eth1-id $CHAIN_ID \ - --eth1-follow-distance 128 \ - --seconds-per-slot $SECONDS_PER_SLOT \ - --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ - --proposer-score-boost "$PROPOSER_SCORE_BOOST" \ - --validator-count $GENESIS_VALIDATOR_COUNT \ - --interop-genesis-state \ - --force - -echo Specification and genesis.ssz generated at $TESTNET_DIR. -echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)" - -lcli \ - insecure-validators \ - --count $VALIDATOR_COUNT \ - --base-dir $DATADIR \ - --node-count $VC_COUNT - -echo Validators generated with keystore passwords at $DATADIR. diff --git a/scripts/local_testnet/setup_time.sh b/scripts/local_testnet/setup_time.sh deleted file mode 100755 index 36f7fc4e99..0000000000 --- a/scripts/local_testnet/setup_time.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -set -Eeuo pipefail - -source ./vars.env - -# Function to output SLOT_PER_EPOCH for mainnet or minimal -get_spec_preset_value() { - case "$SPEC_PRESET" in - mainnet) echo 32 ;; - minimal) echo 8 ;; - gnosis) echo 16 ;; - *) echo "Unsupported preset: $SPEC_PRESET" >&2; exit 1 ;; - esac -} - -SLOT_PER_EPOCH=$(get_spec_preset_value $SPEC_PRESET) -echo "slot_per_epoch=$SLOT_PER_EPOCH" - -genesis_file=$1 - -# Update future hardforks time in the EL genesis file based on the CL genesis time -GENESIS_TIME=$(lcli pretty-ssz --spec $SPEC_PRESET --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') -echo $GENESIS_TIME -CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) -echo $CAPELLA_TIME -sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file -CANCUN_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) -echo $CANCUN_TIME -sed -i 's/"cancunTime".*$/"cancunTime": '"$CANCUN_TIME"',/g' $genesis_file -PRAGUE_TIME=$((GENESIS_TIME + (ELECTRA_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) -echo $PRAGUE_TIME -sed -i 's/"pragueTime".*$/"pragueTime": '"$PRAGUE_TIME"',/g' $genesis_file -cat $genesis_file - diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index be91d06998..e0172e6b28 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -1,147 +1,83 @@ #!/usr/bin/env bash -# Start all processes necessary to create a local testnet + +# Requires `docker`, `kurtosis`, `yq` set -Eeuo pipefail -source ./vars.env +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ENCLAVE_NAME=local-testnet +NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml -# Set a higher ulimit in case we want to import 1000s of validators. -ulimit -n 65536 - -# VC_COUNT is defaulted in vars.env -DEBUG_LEVEL=${DEBUG_LEVEL:-info} -BUILDER_PROPOSALS= +BUILD_IMAGE=true +BUILDER_PROPOSALS=false +CI=false # Get options -while getopts "v:d:ph" flag; do +while getopts "e:b:n:phc" flag; do case "${flag}" in - v) VC_COUNT=${OPTARG};; - d) DEBUG_LEVEL=${OPTARG};; - p) BUILDER_PROPOSALS="-p";; + e) ENCLAVE_NAME=${OPTARG};; + b) BUILD_IMAGE=${OPTARG};; + n) NETWORK_PARAMS_FILE=${OPTARG};; + p) BUILDER_PROPOSALS=true;; + c) CI=true;; h) - validators=$(( $VALIDATOR_COUNT / $BN_COUNT )) - echo "Start local testnet, defaults: 1 eth1 node, $BN_COUNT beacon nodes," - echo "and $VC_COUNT validator clients with each vc having $validators validators." + echo "Start a local testnet with kurtosis." echo echo "usage: $0 " echo echo "Options:" - echo " -v: VC_COUNT default: $VC_COUNT" - echo " -d: DEBUG_LEVEL default: info" - echo " -p: enable builder proposals" - echo " -h: this help" + echo " -e: enclave name default: $ENCLAVE_NAME" + echo " -b: whether to build Lighthouse docker image default: $BUILD_IMAGE" + echo " -n: kurtosis network params file path default: $NETWORK_PARAMS_FILE" + echo " -p: enable builder proposals" + echo " -c: CI mode, run without other additional services like Grafana and Dora explorer" + echo " -h: this help" exit ;; esac done -if (( $VC_COUNT > $BN_COUNT )); then - echo "Error $VC_COUNT is too large, must be <= BN_COUNT=$BN_COUNT" +LH_IMAGE_NAME=$(yq eval ".participants[0].cl_image" $NETWORK_PARAMS_FILE) + +if ! command -v docker &> /dev/null; then + echo "Docker is not installed. Please install Docker and try again." + exit 1 +fi + +if ! command -v kurtosis &> /dev/null; then + echo "kurtosis command not found. Please install kurtosis and try again." exit fi -genesis_file=${@:$OPTIND+0:1} +if ! command -v yq &> /dev/null; then + echo "yq not found. Please install yq and try again." +fi -# Init some constants -PID_FILE=$TESTNET_DIR/PIDS.pid -LOG_DIR=$TESTNET_DIR +if [ "$BUILDER_PROPOSALS" = true ]; then + yq eval '.participants[0].vc_extra_params = ["--builder-proposals"]' -i $NETWORK_PARAMS_FILE + echo "--builder-proposals VC flag added to network_params.yaml" +fi -# Stop local testnet and remove $PID_FILE -./stop_local_testnet.sh +if [ "$CI" = true ]; then + # TODO: run assertoor tests + yq eval '.additional_services = []' -i $NETWORK_PARAMS_FILE + echo "Running without additional services (CI mode)." +else + yq eval '.additional_services = ["dora", "prometheus_grafana"]' -i $NETWORK_PARAMS_FILE + echo "Additional services dora and prometheus_grafana added to network_params.yaml" +fi -# Clean $DATADIR and create empty log files so the -# user can "tail -f" right after starting this script -# even before its done. -./clean.sh -mkdir -p $LOG_DIR -for (( bn=1; bn<=$BN_COUNT; bn++ )); do - touch $LOG_DIR/beacon_node_$bn.log -done -for (( el=1; el<=$BN_COUNT; el++ )); do - touch $LOG_DIR/geth_$el.log -done -for (( vc=1; vc<=$VC_COUNT; vc++ )); do - touch $LOG_DIR/validator_node_$vc.log -done +if [ "$BUILD_IMAGE" = true ]; then + echo "Building Lighthouse Docker image." + ROOT_DIR="$SCRIPT_DIR/../.." + docker build --build-arg FEATURES=portable -f $ROOT_DIR/Dockerfile -t $LH_IMAGE_NAME $ROOT_DIR +else + echo "Not rebuilding Lighthouse Docker image." +fi -# Sleep with a message -sleeping() { - echo sleeping $1 - sleep $1 -} +# Stop local testnet +kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true -# Execute the command with logs saved to a file. -# -# First parameter is log file name -# Second parameter is executable name -# Remaining parameters are passed to executable -execute_command() { - LOG_NAME=$1 - EX_NAME=$2 - shift - shift - CMD="$EX_NAME $@ >> $LOG_DIR/$LOG_NAME 2>&1" - echo "executing: $CMD" - echo "$CMD" > "$LOG_DIR/$LOG_NAME" - eval "$CMD &" -} - -# Execute the command with logs saved to a file -# and is PID is saved to $PID_FILE. -# -# First parameter is log file name -# Second parameter is executable name -# Remaining parameters are passed to executable -execute_command_add_PID() { - execute_command $@ - echo "$!" >> $PID_FILE -} - - -# Setup data -echo "executing: ./setup.sh >> $LOG_DIR/setup.log" -./setup.sh >> $LOG_DIR/setup.log 2>&1 - -# Call setup_time.sh to update future hardforks time in the EL genesis file based on the CL genesis time -./setup_time.sh $genesis_file - -# Delay to let boot_enr.yaml to be created -execute_command_add_PID bootnode.log ./bootnode.sh -sleeping 3 - -execute_command_add_PID el_bootnode.log ./el_bootnode.sh -sleeping 3 - -# Start beacon nodes -BN_udp_tcp_base=9000 -BN_http_port_base=8000 - -EL_base_network=7000 -EL_base_http=6000 -EL_base_auth_http=5000 - -(( $VC_COUNT < $BN_COUNT )) && SAS=-s || SAS= - -for (( el=1; el<=$BN_COUNT; el++ )); do - execute_command_add_PID geth_$el.log ./geth.sh $DATADIR/geth_datadir$el $((EL_base_network + $el)) $((EL_base_http + $el)) $((EL_base_auth_http + $el)) $genesis_file -done - -sleeping 20 - -# Reset the `genesis.json` config file fork times. -sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' $genesis_file -sed -i 's/"cancunTime".*$/"cancunTime": 0,/g' $genesis_file -sed -i 's/"pragueTime".*$/"pragueTime": 0,/g' $genesis_file - -for (( bn=1; bn<=$BN_COUNT; bn++ )); do - secret=$DATADIR/geth_datadir$bn/geth/jwtsecret - echo $secret - execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_udp_tcp_base + $bn + 100)) $((BN_http_port_base + $bn)) http://localhost:$((EL_base_auth_http + $bn)) $secret -done - -# Start requested number of validator clients -for (( vc=1; vc<=$VC_COUNT; vc++ )); do - execute_command_add_PID validator_node_$vc.log ./validator_client.sh $BUILDER_PROPOSALS -d $DEBUG_LEVEL $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) -done +kurtosis run --enclave $ENCLAVE_NAME github.com/kurtosis-tech/ethereum-package --args-file $NETWORK_PARAMS_FILE echo "Started!" diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index b1c3188ee3..5500f8d5a0 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -1,10 +1,15 @@ #!/usr/bin/env bash -# Stop all processes that were started with start_local_testnet.sh - set -Eeuo pipefail -source ./vars.env +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ENCLAVE_NAME=${1:-local-testnet} +LOGS_PATH=$SCRIPT_DIR/logs +LOGS_SUBDIR=$LOGS_PATH/$ENCLAVE_NAME -PID_FILE=$TESTNET_DIR/PIDS.pid -./kill_processes.sh $PID_FILE -rm -f $PID_FILE +# Delete existing logs directory and make sure parent directory exists. +rm -rf $LOGS_SUBDIR && mkdir -p $LOGS_PATH +kurtosis enclave dump $ENCLAVE_NAME $LOGS_SUBDIR +echo "Local testnet logs stored to $LOGS_SUBDIR." + +kurtosis enclave rm -f $ENCLAVE_NAME +echo "Local testnet stopped." diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh deleted file mode 100755 index d88a1833cb..0000000000 --- a/scripts/local_testnet/validator_client.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# -# Starts a validator client based upon a genesis state created by -# `./setup.sh`. -# -# Usage: ./validator_client.sh - -set -Eeuo pipefail - -source ./vars.env - -DEBUG_LEVEL=info - -BUILDER_PROPOSALS= - -# Get options -while getopts "pd:" flag; do - case "${flag}" in - p) BUILDER_PROPOSALS="--builder-proposals";; - d) DEBUG_LEVEL=${OPTARG};; - esac -done - -exec lighthouse \ - --debug-level $DEBUG_LEVEL \ - vc \ - $BUILDER_PROPOSALS \ - --datadir ${@:$OPTIND:1} \ - --testnet-dir $TESTNET_DIR \ - --init-slashing-protection \ - --beacon-nodes ${@:$OPTIND+1:1} \ - --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 \ - $VC_ARGS diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env deleted file mode 100644 index 9bdec71ff7..0000000000 --- a/scripts/local_testnet/vars.env +++ /dev/null @@ -1,69 +0,0 @@ -# Path to the geth binary -GETH_BINARY=geth -EL_BOOTNODE_BINARY=bootnode - -# Base directories for the validator keys and secrets -DATADIR=~/.lighthouse/local-testnet - -# Directory for the eth2 config -TESTNET_DIR=$DATADIR/testnet - -# Mnemonic for generating validator keys -MNEMONIC_PHRASE="vast thought differ pull jewel broom cook wrist tribe word before omit" - -EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301" - -# Hardcoded deposit contract -DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 - -GENESIS_FORK_VERSION=0x42424242 - -# Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=4b0e17cf5c04616d64526d292b80a1f2720cf2195d990006e4ea6950c5bbcb9f - -VALIDATOR_COUNT=80 -GENESIS_VALIDATOR_COUNT=80 - -# Number of beacon_node instances that you intend to run -BN_COUNT=4 - -# Number of validator clients -VC_COUNT=$BN_COUNT - -# Number of seconds to delay to start genesis block. -# If started by a script this can be 0, if starting by hand -# use something like 180. -GENESIS_DELAY=0 - -# Port for P2P communication with bootnode -BOOTNODE_PORT=4242 - -# Network ID and Chain ID of local eth1 test network -CHAIN_ID=4242 - -# Hard fork configuration -ALTAIR_FORK_EPOCH=0 -BELLATRIX_FORK_EPOCH=0 -CAPELLA_FORK_EPOCH=0 -DENEB_FORK_EPOCH=1 -ELECTRA_FORK_EPOCH=9999999 - -TTD=0 - -# Spec version (mainnet or minimal) -SPEC_PRESET=mainnet - -# Seconds per Eth2 slot -SECONDS_PER_SLOT=3 - -# Seconds per Eth1 block -SECONDS_PER_ETH1_BLOCK=3 - -# Proposer score boost percentage -PROPOSER_SCORE_BOOST=40 - -# Command line arguments for beacon node client -BN_ARGS="" - -# Command line arguments for validator client -VC_ARGS="" diff --git a/scripts/mdlint.sh b/scripts/mdlint.sh new file mode 100755 index 0000000000..5274f108d2 --- /dev/null +++ b/scripts/mdlint.sh @@ -0,0 +1,23 @@ +#! /usr/bin/env bash + +# IMPORTANT +# This script should NOT be run directly. +# Run `make mdlint` from the root of the repository instead. + +# use markdownlint-cli to check for markdown files +docker run --rm -v ./book:/workdir ghcr.io/igorshubovych/markdownlint-cli:latest '**/*.md' --ignore node_modules + +# exit code +exit_code=$(echo $?) + +if [[ $exit_code == 0 ]]; then + echo "All markdown files are properly formatted." + exit 0 +elif [[ $exit_code == 1 ]]; then + echo "Exiting with errors. Run 'make mdlint' locally and commit the changes. Note that not all errors can be fixed automatically, if there are still errors after running 'make mdlint', look for the errors and fix manually." + docker run --rm -v ./book:/workdir ghcr.io/igorshubovych/markdownlint-cli:latest '**/*.md' --ignore node_modules --fix + exit 1 +else + echo "Exiting with exit code >1. Check for the error logs and fix them accordingly." + exit 1 +fi \ No newline at end of file diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index e13c06cdba..441e2a6357 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -1,101 +1,129 @@ #!/usr/bin/env bash -# Requires `lighthouse`, `lcli`, `geth`, `bootnode`, `curl`, `jq` +# Requires `docker`, `kurtosis`, `yq`, `curl`, `jq` +set -Eeuo pipefail +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml BEHAVIOR=$1 +ENCLAVE_NAME=local-testnet-$BEHAVIOR + +SECONDS_PER_SLOT=$(yq eval ".network_params.seconds_per_slot" $NETWORK_PARAMS_FILE) +KEYS_PER_NODE=$(yq eval ".network_params.num_validator_keys_per_node" $NETWORK_PARAMS_FILE) +LH_IMAGE_NAME=$(yq eval ".participants[0].cl_image" $NETWORK_PARAMS_FILE) if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then echo "Usage: doppelganger_protection.sh [success|failure]" exit 1 fi -exit_if_fails() { - echo $@ - $@ - EXIT_CODE=$? - if [[ $EXIT_CODE -eq 1 ]]; then - exit 1 - fi +function exit_and_dump_logs() { + local exit_code=$1 + echo "Shutting down..." + $SCRIPT_DIR/../local_testnet/stop_local_testnet.sh $ENCLAVE_NAME + echo "Test completed with exit code $exit_code." + exit $exit_code } -genesis_file=$2 -source ./vars.env +function get_service_status() { + local service_name=$1 + kurtosis service inspect $ENCLAVE_NAME $service_name | grep Status | cut -d':' -f2 | xargs +} -exit_if_fails ../local_testnet/clean.sh +function run_command_without_exit() { + local command=$1 + set +e + eval "$command" + local exit_code=$? + set -e + echo $exit_code +} +# Start local testnet +$SCRIPT_DIR/../local_testnet/start_local_testnet.sh -e $ENCLAVE_NAME -b false -c -n $NETWORK_PARAMS_FILE -echo "Setting up local testnet" +# Immediately stop node 4 (as we only need the node 4 validator keys generated for later use) +kurtosis service stop $ENCLAVE_NAME cl-4-lighthouse-geth el-4-geth-lighthouse vc-4-geth-lighthouse > /dev/null -exit_if_fails ../local_testnet/setup.sh +# Get the http port to get the config +BN1_HTTP_ADDRESS=`kurtosis port print $ENCLAVE_NAME cl-1-lighthouse-geth http` -# Duplicate this directory so slashing protection doesn't keep us from re-using validator keys -exit_if_fails cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger +# Get the genesis time and genesis delay +MIN_GENESIS_TIME=`curl -s $BN1_HTTP_ADDRESS/eth/v1/config/spec | jq '.data.MIN_GENESIS_TIME|tonumber'` +GENESIS_DELAY=`curl -s $BN1_HTTP_ADDRESS/eth/v1/config/spec | jq '.data.GENESIS_DELAY|tonumber'` -echo "Starting bootnode" +CURRENT_TIME=`date +%s` +# Note: doppelganger protection can only be started post epoch 0 +echo "Waiting until next epoch before starting the next validator client..." +DELAY=$(( $SECONDS_PER_SLOT * 32 + $GENESIS_DELAY + $MIN_GENESIS_TIME - $CURRENT_TIME)) +sleep $DELAY -exit_if_fails ../local_testnet/bootnode.sh &> /dev/null & - -exit_if_fails ../local_testnet/el_bootnode.sh &> /dev/null & - -# wait for the bootnode to start -sleep 10 - -echo "Starting local execution nodes" - -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir1 6000 5000 4000 $genesis_file &> geth.log & -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir2 6100 5100 4100 $genesis_file &> /dev/null & -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir3 6200 5200 4200 $genesis_file &> /dev/null & - -sleep 20 - -exit_if_fails ../local_testnet/beacon_node.sh -d debug $HOME/.lighthouse/local-testnet/node_1 8000 7000 9000 http://localhost:4000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> /dev/null & -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 8100 7100 9100 http://localhost:4100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> /dev/null & -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 8200 7200 9200 http://localhost:4200 $HOME/.lighthouse/local-testnet/geth_datadir3/geth/jwtsecret &> /dev/null & - -echo "Starting local validator clients" - -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:9000 &> /dev/null & -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:9100 &> /dev/null & -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:9200 &> /dev/null & - -echo "Waiting an epoch before starting the next validator client" -sleep $(( $SECONDS_PER_SLOT * 32 )) +# Use BN2 for the next validator client +bn_2_url=$(kurtosis service inspect $ENCLAVE_NAME cl-2-lighthouse-geth | grep 'enr-address' | cut -d'=' -f2) +bn_2_port=4000 if [[ "$BEHAVIOR" == "failure" ]]; then - echo "Starting the doppelganger validator client" + echo "Starting the doppelganger validator client." # Use same keys as keys from VC1 and connect to BN2 # This process should not last longer than 2 epochs - timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1_doppelganger http://localhost:9100 - DOPPELGANGER_EXIT=$? + vc_1_range_start=0 + vc_1_range_end=$(($KEYS_PER_NODE - 1)) + vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end-0" + service_name=vc-1-doppelganger - echo "Shutting down" + kurtosis service add \ + --files /validator_keys:$vc_1_keys_artifact_id,/testnet:el_cl_genesis_data \ + $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ + vc \ + --debug-level debug \ + --testnet-dir=/testnet \ + --validators-dir=/validator_keys/keys \ + --secrets-dir=/validator_keys/secrets \ + --init-slashing-protection \ + --beacon-nodes=http://$bn_2_url:$bn_2_port \ + --enable-doppelganger-protection \ + --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 - # Cleanup - killall geth - killall lighthouse - killall bootnode + # Check if doppelganger VC has stopped and exited. Exit code 1 means the check timed out and VC is still running. + check_exit_cmd="until [ \$(get_service_status $service_name) != 'RUNNING' ]; do sleep 1; done" + doppelganger_exit=$(run_command_without_exit "timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) bash -c \"$check_exit_cmd\"") - echo "Done" - - # We expect to find a doppelganger, exit with success error code if doppelganger was found - # and failure if no doppelganger was found. - if [[ $DOPPELGANGER_EXIT -eq 1 ]]; then - exit 0 + if [[ $doppelganger_exit -eq 1 ]]; then + echo "Test failed: expected doppelganger but VC is still running. Check the logs for details." + exit_and_dump_logs 1 else - exit 1 + echo "Test passed: doppelganger found and VC process stopped successfully." + exit_and_dump_logs 0 fi fi if [[ "$BEHAVIOR" == "success" ]]; then - echo "Starting the last validator client" + echo "Starting the last validator client." - ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_4 http://localhost:9100 & - DOPPELGANGER_FAILURE=0 + vc_4_range_start=$(($KEYS_PER_NODE * 3)) + vc_4_range_end=$(($KEYS_PER_NODE * 4 - 1)) + vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end-0" + service_name=vc-4 + + kurtosis service add \ + --files /validator_keys:$vc_4_keys_artifact_id,/testnet:el_cl_genesis_data \ + $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ + vc \ + --debug-level debug \ + --testnet-dir=/testnet \ + --validators-dir=/validator_keys/keys \ + --secrets-dir=/validator_keys/secrets \ + --init-slashing-protection \ + --beacon-nodes=http://$bn_2_url:$bn_2_port \ + --enable-doppelganger-protection \ + --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 + + doppelganger_failure=0 # Sleep three epochs, then make sure all validators were active in epoch 2. Use # `is_previous_epoch_target_attester` from epoch 3 for a complete view of epoch 2 inclusion. @@ -104,20 +132,27 @@ if [[ "$BEHAVIOR" == "success" ]]; then echo "Waiting three epochs..." sleep $(( $SECONDS_PER_SLOT * 32 * 3 )) - PREVIOUS_DIR=$(pwd) - cd $HOME/.lighthouse/local-testnet/node_4/validators + # Get VC4 validator keys + keys_path=$SCRIPT_DIR/$ENCLAVE_NAME/node_4/validators + rm -rf $keys_path && mkdir -p $keys_path + kurtosis files download $ENCLAVE_NAME $vc_4_keys_artifact_id $keys_path + cd $keys_path/keys + for val in 0x*; do [[ -e $val ]] || continue - curl -s localhost:9100/lighthouse/validator_inclusion/3/$val | jq | grep -q '"is_previous_epoch_target_attester": false' - IS_ATTESTER=$? - if [[ $IS_ATTESTER -eq 0 ]]; then + is_attester=$(run_command_without_exit "curl -s $BN1_HTTP_ADDRESS/lighthouse/validator_inclusion/3/$val | jq | grep -q '\"is_previous_epoch_target_attester\": false'") + if [[ $is_attester -eq 0 ]]; then echo "$val did not attest in epoch 2." else echo "ERROR! $val did attest in epoch 2." - DOPPELGANGER_FAILURE=1 + doppelganger_failure=1 fi done + if [[ $doppelganger_failure -eq 1 ]]; then + exit_and_dump_logs 1 + fi + # Sleep two epochs, then make sure all validators were active in epoch 4. Use # `is_previous_epoch_target_attester` from epoch 5 for a complete view of epoch 4 inclusion. # @@ -126,30 +161,18 @@ if [[ "$BEHAVIOR" == "success" ]]; then sleep $(( $SECONDS_PER_SLOT * 32 * 2 )) for val in 0x*; do [[ -e $val ]] || continue - curl -s localhost:9100/lighthouse/validator_inclusion/5/$val | jq | grep -q '"is_previous_epoch_target_attester": true' - IS_ATTESTER=$? - if [[ $IS_ATTESTER -eq 0 ]]; then + is_attester=$(run_command_without_exit "curl -s $BN1_HTTP_ADDRESS/lighthouse/validator_inclusion/5/$val | jq | grep -q '\"is_previous_epoch_target_attester\": true'") + if [[ $is_attester -eq 0 ]]; then echo "$val attested in epoch 4." else echo "ERROR! $val did not attest in epoch 4." - DOPPELGANGER_FAILURE=1 + doppelganger_failure=1 fi done - echo "Shutting down" - - # Cleanup - cd $PREVIOUS_DIR - - killall geth - killall lighthouse - killall bootnode - - echo "Done" - - if [[ $DOPPELGANGER_FAILURE -eq 1 ]]; then - exit 1 + if [[ $doppelganger_failure -eq 1 ]]; then + exit_and_dump_logs 1 fi fi -exit 0 +exit_and_dump_logs 0 diff --git a/scripts/tests/genesis.json b/scripts/tests/genesis.json deleted file mode 100644 index bfbc08c81e..0000000000 --- a/scripts/tests/genesis.json +++ /dev/null @@ -1,856 +0,0 @@ -{ - "config": { - "chainId": 4242, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "mergeForkBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0, - "terminalTotalDifficulty": 0, - "terminalTotalDifficultyPassed": true - }, - "alloc": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x6d6172697573766477000000" - }, - "0x0000000000000000000000000000000000000000": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "1" - }, - "0x4242424242424242424242424242424242424242": { - "balance": "0", - "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", - "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", - "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", - "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", - "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", - "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", - "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", - "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", - "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", - "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", - "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", - "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", - "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", - "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", - "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", - "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", - "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", - "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", - "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", - "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", - "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", - "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", - "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", - "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", - "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", - "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", - "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", - "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", - "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", - "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" - } - }, - "0x9a4aa7d9C2F6386e5F24d790eB2FFB9fd543A170": { - "balance": "1000000000000000000000000000" - }, - "0x5E3141B900ac5f5608b0d057D10d45a0e4927cD9": { - "balance": "1000000000000000000000000000" - }, - "0x7cF5Dbc49F0904065664b5B6C0d69CaB55F33988": { - "balance": "1000000000000000000000000000" - }, - "0x8D12b071A6F3823A535D38C4a583a2FA1859e822": { - "balance": "1000000000000000000000000000" - }, - "0x3B575D3cda6b30736A38B031E0d245E646A21135": { - "balance": "1000000000000000000000000000" - }, - "0x53bDe6CF93461674F590E532006b4022dA57A724": { - "balance": "1000000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x01", - "extraData": "", - "gasLimit": "0x400000", - "nonce": "0x1234", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "1662465600" -} diff --git a/scripts/tests/network_params.yaml b/scripts/tests/network_params.yaml new file mode 100644 index 0000000000..1725203138 --- /dev/null +++ b/scripts/tests/network_params.yaml @@ -0,0 +1,16 @@ +# Full configuration reference [here](https://github.com/kurtosis-tech/ethereum-package?tab=readme-ov-file#configuration). +participants: + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + count: 4 +network_params: + deneb_fork_epoch: 0 + seconds_per_slot: 3 + num_validator_keys_per_node: 20 +global_log_level: debug +snooper_enabled: false +additional_services: [] diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env deleted file mode 100644 index 4d8f9db64e..0000000000 --- a/scripts/tests/vars.env +++ /dev/null @@ -1,66 +0,0 @@ -# Path to the geth binary -GETH_BINARY=geth -EL_BOOTNODE_BINARY=bootnode - -# Base directories for the validator keys and secrets -DATADIR=~/.lighthouse/local-testnet - -# Directory for the eth2 config -TESTNET_DIR=$DATADIR/testnet - -EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301" - -# Hardcoded deposit contract -DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 - -GENESIS_FORK_VERSION=0x42424242 - -# Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=7a5c656343c3a66dcf75415958b500e8873f9dab0cd588e6cf0785b52a06dd34 - -VALIDATOR_COUNT=80 -GENESIS_VALIDATOR_COUNT=80 - -# Number of beacon_node instances that you intend to run -BN_COUNT=4 - -# Number of validator clients -VC_COUNT=$BN_COUNT - -# Number of seconds to delay to start genesis block. -# If started by a script this can be 0, if starting by hand -# use something like 180. -GENESIS_DELAY=0 - -# Port for P2P communication with bootnode -BOOTNODE_PORT=4242 - -# Network ID and Chain ID of local eth1 test network -CHAIN_ID=4242 - -# Hard fork configuration -ALTAIR_FORK_EPOCH=0 -BELLATRIX_FORK_EPOCH=0 -CAPELLA_FORK_EPOCH=0 -DENEB_FORK_EPOCH=0 -ELECTRA_FORK_EPOCH=18446744073709551615 - -TTD=0 - -# Spec version (mainnet or minimal) -SPEC_PRESET=mainnet - -# Seconds per Eth2 slot -SECONDS_PER_SLOT=3 - -# Seconds per Eth1 block -SECONDS_PER_ETH1_BLOCK=1 - -# Proposer score boost percentage -PROPOSER_SCORE_BOOST=70 - -# Command line arguments for beacon node client -BN_ARGS="" - -# Enable doppelganger detection -VC_ARGS=" --enable-doppelganger-protection " diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 90fb54cd1a..ef5cb8249e 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -25,7 +25,6 @@ rand = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } slog = { workspace = true } -sloggers = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } types = { workspace = true } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index f3d00fa035..fc4614f5d4 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -24,19 +24,16 @@ serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } eth2_network_config = { workspace = true } -ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } -cached_tree_hash = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } types = { workspace = true } snap = { workspace = true } fs2 = { workspace = true } beacon_chain = { workspace = true } -store = { workspace = true } fork_choice = { workspace = true } execution_layer = { workspace = true } logging = { workspace = true } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index f0749c3c7e..bd8cc79156 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -24,9 +24,9 @@ use std::future::Future; use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, Checkpoint, - ExecutionBlockHash, Hash256, IndexedAttestation, KzgProof, ProposerPreparationData, - SignedBeaconBlock, Slot, Uint256, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, + BlockImportSource, Checkpoint, ExecutionBlockHash, Hash256, IndexedAttestation, KzgProof, + ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -498,6 +498,7 @@ impl Tester { block_root, block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 0063975ee1..55a7160594 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -95,7 +95,7 @@ impl DepositContract { .await .map_err(|e| { format!( - "Failed to deploy contract: {}. Is scripts/anvil_tests_node.sh running?.", + "Failed to deploy contract: {}. Is the RPC server running?.", e ) })?; diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 7f66658f0f..43d24cd123 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -10,7 +10,6 @@ serde_json = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } futures = { workspace = true } -environment = { workspace = true } execution_layer = { workspace = true } sensitive_url = { workspace = true } types = { workspace = true } diff --git a/testing/network_testing/README.md b/testing/network_testing/README.md index f97c3cff28..1dcf372dbd 100644 --- a/testing/network_testing/README.md +++ b/testing/network_testing/README.md @@ -50,11 +50,11 @@ $ cargo build --release --bin lighthouse --features network/disable-backfill Once built, it can run via checkpoint sync on any network, making sure we point to our mock-el -Prater testnet: +Holesky testnet: ``` -$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url -https://prater.checkpoint.sigp.io --execution-endpoint http://localhost:8551 +$ lighthouse --network holesky bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://holesky.checkpoint.sigp.io --execution-endpoint http://localhost:8551 ``` Mainnet: diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index d7ff7b3dd8..f8769b10e2 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -8,7 +8,6 @@ edition = { workspace = true } [dependencies] node_test_rig = { path = "../node_test_rig" } -eth1 = { workspace = true } execution_layer = { workspace = true } types = { workspace = true } parking_lot = { workspace = true } @@ -18,7 +17,5 @@ env_logger = { workspace = true } clap = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } -ssz_types = { workspace = true } -ethereum-types = { workspace = true } eth2_network_config = { workspace = true } serde_json = { workspace = true } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 755bb71b43..f69d107e34 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -27,15 +27,32 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { - let node_count = value_t!(matches, "nodes", usize).expect("Missing nodes default"); - let proposer_nodes = - value_t!(matches, "proposer-nodes", usize).expect("Missing proposer-nodes default"); - let validators_per_node = value_t!(matches, "validators-per-node", usize) - .expect("Missing validators-per-node default"); - let speed_up_factor = - value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); - let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); - let continue_after_checks = matches.is_present("continue-after-checks"); + let node_count = matches + .get_one::("nodes") + .expect("missing nodes default") + .parse::() + .expect("missing nodes default"); + let proposer_nodes = matches + .get_one::("proposer-nodes") + .unwrap_or(&String::from("0")) + .parse::() + .unwrap_or(0); + println!("PROPOSER-NODES: {}", proposer_nodes); + let validators_per_node = matches + .get_one::("validators-per-node") + .expect("missing validators-per-node default") + .parse::() + .expect("missing validators-per-node default"); + let speed_up_factor = matches + .get_one::("speed-up-factor") + .expect("missing speed-up-factor default") + .parse::() + .expect("missing speed-up-factor default"); + let log_level = matches + .get_one::("debug-level") + .expect("missing debug-level"); + + let continue_after_checks = matches.get_flag("continue-after-checks"); println!("Basic Simulator:"); println!(" nodes: {}", node_count); @@ -64,7 +81,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { .initialize_logger(LoggerConfig { path: None, debug_level: log_level.clone(), - logfile_debug_level: log_level, + logfile_debug_level: log_level.clone(), log_format: None, logfile_format: None, log_color: false, diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 00af7e560c..a82c8b8577 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -1,12 +1,12 @@ -use clap::{App, Arg, SubCommand}; +use clap::{crate_version, Arg, ArgAction, Command}; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("simulator") +pub fn cli_app() -> Command { + Command::new("simulator") .version(crate_version!()) .author("Sigma Prime ") .about("Options for interacting with simulator") .subcommand( - SubCommand::with_name("basic-sim") + Command::new("basic-sim") .about( "Runs a Beacon Chain simulation with `n` beacon node and validator clients, \ each with `v` validators. \ @@ -16,55 +16,55 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { exit immediately.", ) .arg( - Arg::with_name("nodes") - .short("n") + Arg::new("nodes") + .short('n') .long("nodes") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of beacon nodes"), ) .arg( - Arg::with_name("proposer-nodes") - .short("p") + Arg::new("proposer-nodes") + .short('p') .long("proposer-nodes") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of proposer-only beacon nodes"), ) .arg( - Arg::with_name("validators-per-node") - .short("v") + Arg::new("validators-per-node") + .short('v') .long("validators-per-node") - .takes_value(true) + .action(ArgAction::Set) .default_value("20") .help("Number of validators"), ) .arg( - Arg::with_name("speed-up-factor") - .short("s") + Arg::new("speed-up-factor") + .short('s') .long("speed-up-factor") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Speed up factor. Please use a divisor of 12."), ) .arg( - Arg::with_name("debug-level") - .short("d") + Arg::new("debug-level") + .short('d') .long("debug-level") - .takes_value(true) + .action(ArgAction::Set) .default_value("debug") .help("Set the severity level of the logs."), ) .arg( - Arg::with_name("continue-after-checks") - .short("c") + Arg::new("continue-after-checks") + .short('c') .long("continue_after_checks") - .takes_value(false) + .action(ArgAction::SetTrue) .help("Continue after checks (default false)"), ), ) .subcommand( - SubCommand::with_name("fallback-sim") + Command::new("fallback-sim") .about( "Runs a Beacon Chain simulation with `c` validator clients where each VC is \ connected to `b` beacon nodes with `v` validators. \ @@ -76,50 +76,50 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Otherwise, the simulation will exit and an error will be reported.", ) .arg( - Arg::with_name("vc-count") - .short("c") + Arg::new("vc-count") + .short('c') .long("vc-count") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of validator clients."), ) .arg( - Arg::with_name("bns-per-vc") - .short("b") + Arg::new("bns-per-vc") + .short('b') .long("bns-per-vc") - .takes_value(true) + .action(ArgAction::Set) .default_value("2") .help("Number of beacon nodes per validator client."), ) .arg( - Arg::with_name("validators-per-vc") - .short("v") + Arg::new("validators-per-vc") + .short('v') .long("validators-per-vc") - .takes_value(true) + .action(ArgAction::Set) .default_value("20") .help("Number of validators per client."), ) .arg( - Arg::with_name("speed-up-factor") - .short("s") + Arg::new("speed-up-factor") + .short('s') .long("speed-up-factor") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Speed up factor. Please use a divisor of 12."), ) .arg( - Arg::with_name("debug-level") - .short("d") + Arg::new("debug-level") + .short('d') .long("debug-level") - .takes_value(true) + .action(ArgAction::Set) .default_value("debug") .help("Set the severity level of the logs."), ) .arg( - Arg::with_name("continue-after-checks") - .short("c") + Arg::new("continue-after-checks") + .short('c') .long("continue_after_checks") - .takes_value(false) + .action(ArgAction::SetTrue) .help("Continue after checks (default false)"), ), ) diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 01f7c8418e..c017cc5fdf 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -34,15 +34,36 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { - let vc_count = value_t!(matches, "vc-count", usize).expect("Missing validator-count default"); - let validators_per_vc = - value_t!(matches, "validators-per-vc", usize).expect("Missing validators-per-vc default"); - let bns_per_vc = value_t!(matches, "bns-per-vc", usize).expect("Missing bns-per-vc default"); + let vc_count = matches + .get_one::("vc-count") + .expect("missing vc-count default") + .parse::() + .expect("missing vc-count default"); + + let validators_per_vc = matches + .get_one::("validators-per-vc") + .expect("missing validators-per-vc default") + .parse::() + .expect("missing validators-per-vc default"); + + let bns_per_vc = matches + .get_one::("bns-per-vc") + .expect("missing bns-per-vc default") + .parse::() + .expect("missing bns-per-vc default"); + assert!(bns_per_vc > 1); - let speed_up_factor = - value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); - let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); - let continue_after_checks = matches.is_present("continue-after-checks"); + let speed_up_factor = matches + .get_one::("speed-up-factor") + .expect("missing speed-up-factor default") + .parse::() + .expect("missing speed-up-factor default"); + + let log_level = matches + .get_one::("debug-level") + .expect("missing debug-level default"); + + let continue_after_checks = matches.get_flag("continue-after-checks"); println!("Fallback Simulator:"); println!(" vc-count: {}", vc_count); @@ -70,7 +91,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { .initialize_logger(LoggerConfig { path: None, debug_level: log_level.clone(), - logfile_debug_level: log_level, + logfile_debug_level: log_level.clone(), log_format: None, logfile_format: None, log_color: false, diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index d1a2d0dc67..03ee902c77 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -11,7 +11,6 @@ //! easy-to-find files and stdout only contained info from the simulation. //! -#[macro_use] extern crate clap; mod basic_sim; @@ -34,14 +33,14 @@ fn main() { let matches = cli_app().get_matches(); match matches.subcommand() { - ("basic-sim", Some(matches)) => match basic_sim::run_basic_sim(matches) { + Some(("basic-sim", matches)) => match basic_sim::run_basic_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); std::process::exit(1) } }, - ("fallback-sim", Some(matches)) => match fallback_sim::run_fallback_sim(matches) { + Some(("fallback-sim", matches)) => match fallback_sim::run_fallback_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 292e10d054..911704e751 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -901,13 +901,14 @@ mod tests { } #[tokio::test] - async fn prater_base_types() { - test_base_types("prater", 4246).await + async fn mainnet_bellatrix_types() { + test_bellatrix_types("mainnet", 4244).await } #[tokio::test] - async fn prater_altair_types() { - test_altair_types("prater", 4247).await + async fn holesky_bellatrix_types() { + // web3signer does not support forks prior to Bellatrix on Holesky + test_bellatrix_types("holesky", 4247).await } #[tokio::test] diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 011243e414..e1a82690ed 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -61,3 +61,4 @@ system_health = { path = "../common/system_health" } logging = { workspace = true } strum = { workspace = true } itertools = { workspace = true } +fdlimit = "0.3.0" diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index b497abd7dd..04554786f6 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -23,7 +23,7 @@ pub const POOL_SIZE: u32 = 1; #[cfg(not(test))] pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); #[cfg(test)] -pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(500); +pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(1); /// Supported version of the interchange format. pub const SUPPORTED_INTERCHANGE_FORMAT_VERSION: u64 = 5; diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 9f4611ff6a..5419329927 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -1,34 +1,47 @@ -use clap::{App, Arg}; +use clap::{builder::ArgPredicate, Arg, ArgAction, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("validator_client") - .visible_aliases(&["v", "vc", "validator"]) - .setting(clap::AppSettings::ColoredHelp) +pub fn cli_app() -> Command { + Command::new("validator_client") + .visible_aliases(["v", "vc", "validator"]) + .styles(get_color_style()) + .display_order(0) .about( "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", ) .arg( - Arg::with_name("beacon-nodes") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("beacon-nodes") .long("beacon-nodes") .value_name("NETWORK_ADDRESSES") .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ Default is http://localhost:5052." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("proposer-nodes") + Arg::new("proposer-nodes") .long("proposer-nodes") .value_name("NETWORK_ADDRESSES") .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) // TODO remove this flag in a future release .arg( - Arg::with_name("disable-run-on-all") + Arg::new("disable-run-on-all") .long("disable-run-on-all") .value_name("DISABLE_RUN_ON_ALL") .help("DEPRECATED. Use --broadcast. \ @@ -36,10 +49,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and proposer preparation messages to all beacon nodes provided in the \ `--beacon-nodes flag`. This option changes that behaviour such that these \ api calls only go out to the first available and synced beacon node") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("broadcast") + Arg::new("broadcast") .long("broadcast") .value_name("API_TOPICS") .help("Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ @@ -47,10 +62,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { sync-committee. Default (when flag is omitted) is to broadcast \ subscriptions only." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validators-dir") + Arg::new("validators-dir") .long("validators-dir") .alias("validator-dir") .value_name("VALIDATORS_DIR") @@ -59,11 +75,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { each validator along with the common slashing protection database \ and the validator_definitions.yml" ) - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name("secrets-dir") + Arg::new("secrets-dir") .long("secrets-dir") .value_name("SECRETS_DIRECTORY") .help( @@ -72,11 +89,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { name is the 0x-prefixed hex representation of the validators voting public \ key. Defaults to ~/.lighthouse/{network}/secrets.", ) - .takes_value(true) + .action(ArgAction::Set) + .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name("init-slashing-protection") + Arg::new("init-slashing-protection") .long("init-slashing-protection") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, do not require the slashing protection database to exist before \ running. You SHOULD NOT use this flag unless you're certain that a new \ @@ -84,78 +105,95 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { will have been initialized when you imported your validator keys. If you \ misplace your database and then run with this flag you risk being slashed." ) + .display_order(0) ) .arg( - Arg::with_name("disable-auto-discover") + Arg::new("disable-auto-discover") .long("disable-auto-discover") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, do not attempt to discover new validators in the validators-dir. Validators \ will need to be manually added to the validator_definitions.yml file." ) + .display_order(0) ) .arg( - Arg::with_name("use-long-timeouts") + Arg::new("use-long-timeouts") .long("use-long-timeouts") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, the validator client will use longer timeouts for requests \ made to the beacon node. This flag is generally not recommended, \ longer timeouts can cause missed duties when fallbacks are used.") + .display_order(0) ) .arg( - Arg::with_name("beacon-nodes-tls-certs") + Arg::new("beacon-nodes-tls-certs") .long("beacon-nodes-tls-certs") .value_name("CERTIFICATE-FILES") - .takes_value(true) + .action(ArgAction::Set) .help("Comma-separated paths to custom TLS certificates to use when connecting \ to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ in addition to the OS trust store. Commas must only be used as a \ delimiter, and must not be part of the certificate path.") + .display_order(0) ) // This overwrites the graffiti configured in the beacon node. .arg( - Arg::with_name("graffiti") + Arg::new("graffiti") .long("graffiti") .help("Specify your custom graffiti to be included in blocks.") .value_name("GRAFFITI") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("graffiti-file") + Arg::new("graffiti-file") .long("graffiti-file") .help("Specify a graffiti file to load validator graffitis from.") .value_name("GRAFFITI-FILE") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("graffiti") + .display_order(0) ) .arg( - Arg::with_name("suggested-fee-recipient") + Arg::new("suggested-fee-recipient") .long("suggested-fee-recipient") .help("Once the merge has happened, this address will receive transaction fees \ from blocks proposed by this validator client. If a fee recipient is \ configured in the validator definitions it takes priority over this value.") .value_name("FEE-RECIPIENT") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("produce-block-v3") + Arg::new("produce-block-v3") .long("produce-block-v3") .help("Enable block production via the block v3 endpoint for this validator client. \ This should only be enabled when paired with a beacon node \ that has this endpoint implemented. This flag will be enabled by default in \ future.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("distributed") + Arg::new("distributed") .long("distributed") .help("Enables functionality required for running the validator in a distributed validator cluster.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* REST API related arguments */ .arg( - Arg::with_name("http") + Arg::new("http") .long("http") .help("Enable the RESTful HTTP API server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is @@ -165,7 +203,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { * must also be used in order to make it clear to the user that this is unsafe. */ .arg( - Arg::with_name("http-address") + Arg::new("http-address") .long("http-address") .requires("http") .value_name("ADDRESS") @@ -175,26 +213,31 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { `--unencrypted-http-transport` flag to ensure the user is aware of the \ risks involved. For access via the Internet, users should apply \ transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.") - .requires("unencrypted-http-transport"), + .requires("unencrypted-http-transport") + .display_order(0) ) .arg( - Arg::with_name("unencrypted-http-transport") - .long("unencrypted-http-transport") - .help("This is a safety flag to ensure that the user is aware that the http \ - transport is unencrypted and using a custom HTTP address is unsafe.") - .requires("http-address"), + Arg::new("unencrypted-http-transport") + .long("unencrypted-http-transport") + .help("This is a safety flag to ensure that the user is aware that the http \ + transport is unencrypted and using a custom HTTP address is unsafe.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .requires("http-address") + .display_order(0) ) .arg( - Arg::with_name("http-port") + Arg::new("http-port") .long("http-port") .requires("http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("http", None, "5062") - .takes_value(true), + .default_value_if("http", ArgPredicate::IsPresent, "5062") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-origin") + Arg::new("http-allow-origin") .long("http-allow-origin") .requires("http") .value_name("ORIGIN") @@ -202,10 +245,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5062).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-keystore-export") + Arg::new("http-allow-keystore-export") .long("http-allow-keystore-export") .requires("http") .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ @@ -213,44 +257,52 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { consumers who have access to the API token. This method is useful for \ exporting validators, however it should be used with caution since it \ exposes private key data to authorized users.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("http-store-passwords-in-secrets-dir") + Arg::new("http-store-passwords-in-secrets-dir") .long("http-store-passwords-in-secrets-dir") .requires("http") .help("If present, any validators created via the HTTP will have keystore \ passwords stored in the secrets-dir rather than the validator \ definitions file.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* Prometheus metrics HTTP server related arguments */ .arg( - Arg::with_name("metrics") + Arg::new("metrics") .long("metrics") .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("metrics-address") + Arg::new("metrics-address") .long("metrics-address") .requires("metrics") .value_name("ADDRESS") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "127.0.0.1") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-port") + Arg::new("metrics-port") .long("metrics-port") .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "5064") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "5064") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-allow-origin") + Arg::new("metrics-allow-origin") .long("metrics-allow-origin") .requires("metrics") .value_name("ORIGIN") @@ -258,22 +310,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5064).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-high-validator-count-metrics") + Arg::new("enable-high-validator-count-metrics") .long("enable-high-validator-count-metrics") .help("Enable per validator metrics for > 64 validators. \ Note: This flag is automatically enabled for <= 64 validators. \ Enabling this metric for higher validator counts will lead to higher volume \ of prometheus metrics being collected.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Explorer metrics */ .arg( - Arg::with_name("monitoring-endpoint") + Arg::new("monitoring-endpoint") .long("monitoring-endpoint") .value_name("ADDRESS") .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ @@ -282,19 +337,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ and never provide an untrusted URL.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("monitoring-endpoint-period") + Arg::new("monitoring-endpoint-period") .long("monitoring-endpoint-period") .value_name("SECONDS") .help("Defines how many seconds to wait between each message sent to \ the monitoring-endpoint. Default: 60s") .requires("monitoring-endpoint") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-doppelganger-protection") + Arg::new("enable-doppelganger-protection") .long("enable-doppelganger-protection") .value_name("ENABLE_DOPPELGANGER_PROTECTION") .help("If this flag is set, Lighthouse will delay startup for three epochs and \ @@ -306,56 +363,62 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to avoid potentially committing a slashable offense. Use this flag in order to \ ENABLE this functionality, without this flag Lighthouse will begin attesting \ immediately.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-proposals") + Arg::new("builder-proposals") .long("builder-proposals") .alias("private-tx-proposals") .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-registration-timestamp-override") + Arg::new("builder-registration-timestamp-override") .long("builder-registration-timestamp-override") .alias("builder-registration-timestamp-override") .help("This flag takes a unix timestamp value that will be used to override the \ timestamp used in the builder api registration") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("gas-limit") + Arg::new("gas-limit") .long("gas-limit") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .help("The gas limit to be used in all builder proposals for all validators managed \ by this validator client. Note this will not necessarily be used if the gas limit \ set here moves too far from the previous block's gas limit. [default: 30,000,000]") - .requires("builder-proposals"), + .requires("builder-proposals") + .display_order(0) ) .arg( - Arg::with_name("latency-measurement-service") - .long("latency-measurement-service") - .value_name("BOOLEAN") - .help("Set to 'true' to enable a service that periodically attempts to measure latency to BNs. \ - Set to 'false' to disable.") - .default_value("true") - .takes_value(true), + Arg::new("disable-latency-measurement-service") + .long("disable-latency-measurement-service") + .help("Disables the service that periodically attempts to measure latency to BNs.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("validator-registration-batch-size") + Arg::new("validator-registration-batch-size") .long("validator-registration-batch-size") .value_name("INTEGER") .help("Defines the number of validators per \ validator/register_validator request sent to the BN. This value \ can be reduced to avoid timeouts from builders.") .default_value("500") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-boost-factor") + Arg::new("builder-boost-factor") .long("builder-boost-factor") .value_name("UINT64") .help("Defines the boost factor, \ @@ -363,17 +426,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { when choosing between a builder payload header and payload from \ the local execution node.") .conflicts_with("prefer-builder-proposals") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("prefer-builder-proposals") + Arg::new("prefer-builder-proposals") .long("prefer-builder-proposals") .help("If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("beacon-nodes-sync-tolerances") + Arg::new("beacon-nodes-sync-tolerances") .long("beacon-nodes-sync-tolerances") .value_name("SYNC_TOLERANCES") .help("A comma-separated list of 3 values which sets the size of each sync distance range when \ @@ -395,10 +461,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Nodes in the `Synced` range will tie-break based on their ordering in `--beacon-nodes`. \ This ensures the primary beacon node is prioritised. \ [default: 8,8,48]") - .takes_value(true) + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-slashing-protection-web3signer") + Arg::new("disable-slashing-protection-web3signer") .long("disable-slashing-protection-web3signer") .help("Disable Lighthouse's slashing protection for all web3signer keys. This can \ reduce the I/O burden on the VC but is only safe if slashing protection \ @@ -406,26 +474,30 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ ENABLING WEB3SIGNER'S SLASHING PROTECTION.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Experimental/development options. */ .arg( - Arg::with_name("web3-signer-keep-alive-timeout") + Arg::new("web3-signer-keep-alive-timeout") .long("web3-signer-keep-alive-timeout") .value_name("MILLIS") .default_value("20000") .help("Keep-alive timeout for each web3signer connection. Set to 'null' to never \ timeout") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("web3-signer-max-idle-connections") + Arg::new("web3-signer-max-idle-connections") .long("web3-signer-max-idle-connections") .value_name("COUNT") .help("Maximum number of idle connections to maintain per web3signer host. Default \ is unlimited.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 76642c7ad8..f41835779a 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -157,15 +157,15 @@ impl Config { .unwrap_or_else(|| PathBuf::from(".")); let (mut validator_dir, mut secrets_dir) = (None, None); - if cli_args.value_of("datadir").is_some() { + if cli_args.get_one::("datadir").is_some() { let base_dir: PathBuf = parse_required(cli_args, "datadir")?; validator_dir = Some(base_dir.join(DEFAULT_VALIDATOR_DIR)); secrets_dir = Some(base_dir.join(DEFAULT_SECRET_DIR)); } - if cli_args.value_of("validators-dir").is_some() { + if cli_args.get_one::("validators-dir").is_some() { validator_dir = Some(parse_required(cli_args, "validators-dir")?); } - if cli_args.value_of("secrets-dir").is_some() { + if cli_args.get_one::("secrets-dir").is_some() { secrets_dir = Some(parse_required(cli_args, "secrets-dir")?); } @@ -201,11 +201,11 @@ impl Config { .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; } - config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); - config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); - config.use_long_timeouts = cli_args.is_present("use-long-timeouts"); + config.disable_auto_discover = cli_args.get_flag("disable-auto-discover"); + config.init_slashing_protection = cli_args.get_flag("init-slashing-protection"); + config.use_long_timeouts = cli_args.get_flag("use-long-timeouts"); - if let Some(graffiti_file_path) = cli_args.value_of("graffiti-file") { + if let Some(graffiti_file_path) = cli_args.get_one::("graffiti-file") { let mut graffiti_file = GraffitiFile::new(graffiti_file_path.into()); graffiti_file .read_graffiti_file() @@ -214,7 +214,7 @@ impl Config { info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path); } - if let Some(input_graffiti) = cli_args.value_of("graffiti") { + if let Some(input_graffiti) = cli_args.get_one::("graffiti") { let graffiti_bytes = input_graffiti.as_bytes(); if graffiti_bytes.len() > GRAFFITI_BYTES_LEN { return Err(format!( @@ -243,11 +243,11 @@ impl Config { config.beacon_nodes_tls_certs = Some(tls_certs.split(',').map(PathBuf::from).collect()); } - if cli_args.is_present("distributed") { + if cli_args.get_flag("distributed") { config.distributed = true; } - if cli_args.is_present("disable-run-on-all") { + if cli_args.get_flag("disable-run-on-all") { warn!( log, "The --disable-run-on-all flag is deprecated"; @@ -255,7 +255,7 @@ impl Config { ); config.broadcast_topics = vec![]; } - if let Some(broadcast_topics) = cli_args.value_of("broadcast") { + if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { config.broadcast_topics = broadcast_topics .split(',') .filter(|t| *t != "none") @@ -271,9 +271,9 @@ impl Config { * Beacon node fallback */ - config.beacon_node_fallback.disable_run_on_all = cli_args.is_present("disable-run-on-all"); + config.beacon_node_fallback.disable_run_on_all = cli_args.get_flag("disable-run-on-all"); - if let Some(sync_tolerance) = cli_args.value_of("beacon-nodes-sync-tolerances") { + if let Some(sync_tolerance) = cli_args.get_one::("beacon-nodes-sync-tolerances") { config.beacon_node_fallback.sync_tolerances = BeaconNodeSyncDistanceTiers::from_str(sync_tolerance)?; } else { @@ -300,12 +300,12 @@ impl Config { * Http API server */ - if cli_args.is_present("http") { + if cli_args.get_flag("http") { config.http_api.enabled = true; } - if let Some(address) = cli_args.value_of("http-address") { - if cli_args.is_present("unencrypted-http-transport") { + if let Some(address) = cli_args.get_one::("http-address") { + if cli_args.get_flag("unencrypted-http-transport") { config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; @@ -317,13 +317,13 @@ impl Config { } } - if let Some(port) = cli_args.value_of("http-port") { + if let Some(port) = cli_args.get_one::("http-port") { config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -332,11 +332,11 @@ impl Config { config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-allow-keystore-export") { + if cli_args.get_flag("http-allow-keystore-export") { config.http_api.allow_keystore_export = true; } - if cli_args.is_present("http-store-passwords-in-secrets-dir") { + if cli_args.get_flag("http-store-passwords-in-secrets-dir") { config.http_api.store_passwords_in_secrets_dir = true; } @@ -344,27 +344,27 @@ impl Config { * Prometheus metrics HTTP server */ - if cli_args.is_present("metrics") { + if cli_args.get_flag("metrics") { config.http_metrics.enabled = true; } - if cli_args.is_present("enable-high-validator-count-metrics") { + if cli_args.get_flag("enable-high-validator-count-metrics") { config.enable_high_validator_count_metrics = true; } - if let Some(address) = cli_args.value_of("metrics-address") { + if let Some(address) = cli_args.get_one::("metrics-address") { config.http_metrics.listen_addr = address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("metrics-port") { + if let Some(port) = cli_args.get_one::("metrics-port") { config.http_metrics.listen_port = port .parse::() .map_err(|_| "metrics-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -373,14 +373,14 @@ impl Config { config.http_metrics.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if cli_args.get_flag(DISABLE_MALLOC_TUNING_FLAG) { config.http_metrics.allocator_metrics_enabled = false; } /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { let update_period_secs = clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; config.monitoring_api = Some(monitoring_api::Config { @@ -391,24 +391,24 @@ impl Config { }); } - if cli_args.is_present("enable-doppelganger-protection") { + if cli_args.get_flag("enable-doppelganger-protection") { config.enable_doppelganger_protection = true; } - if cli_args.is_present("builder-proposals") { + if cli_args.get_flag("builder-proposals") { config.builder_proposals = true; } - if cli_args.is_present("produce-block-v3") { + if cli_args.get_flag("produce-block-v3") { config.produce_block_v3 = true; } - if cli_args.is_present("prefer-builder-proposals") { + if cli_args.get_flag("prefer-builder-proposals") { config.prefer_builder_proposals = true; } config.gas_limit = cli_args - .value_of("gas-limit") + .get_one::("gas-limit") .map(|gas_limit| { gas_limit .parse::() @@ -417,7 +417,7 @@ impl Config { .transpose()?; if let Some(registration_timestamp_override) = - cli_args.value_of("builder-registration-timestamp-override") + cli_args.get_one::("builder-registration-timestamp-override") { config.builder_registration_timestamp_override = Some( registration_timestamp_override @@ -429,7 +429,7 @@ impl Config { config.builder_boost_factor = parse_optional(cli_args, "builder-boost-factor")?; config.enable_latency_measurement_service = - parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); + !cli_args.get_flag("disable-latency-measurement-service"); config.validator_registration_batch_size = parse_required(cli_args, "validator-registration-batch-size")?; @@ -438,7 +438,7 @@ impl Config { } config.enable_web3signer_slashing_protection = - if cli_args.is_present("disable-slashing-protection-web3signer") { + if cli_args.get_flag("disable-slashing-protection-web3signer") { warn!( log, "Slashing protection for remote keys disabled"; diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 2ffc9b9adc..f20922e3fe 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -88,14 +88,15 @@ const _: () = assert!({ /// bringing in the entire crate. const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > 2); +// The info in the enum variants is displayed in logging, clippy thinks it's dead code. #[derive(Debug)] pub enum Error { UnableToReadSlotClock, - FailedToDownloadAttesters(String), - FailedToProduceSelectionProof(ValidatorStoreError), - InvalidModulo(ArithError), - Arith(ArithError), - SyncDutiesNotFound(u64), + FailedToDownloadAttesters(#[allow(dead_code)] String), + FailedToProduceSelectionProof(#[allow(dead_code)] ValidatorStoreError), + InvalidModulo(#[allow(dead_code)] ArithError), + Arith(#[allow(dead_code)] ArithError), + SyncDutiesNotFound(#[allow(dead_code)] u64), } impl From for Error { @@ -214,6 +215,8 @@ pub struct DutiesService { pub sync_duties: SyncDutiesMap, /// Provides the canonical list of locally-managed validators. pub validator_store: Arc>, + /// Maps unknown validator pubkeys to the next slot time when a poll should be conducted again. + pub unknown_validator_next_poll_slots: RwLock>, /// Tracks the current slot. pub slot_clock: T, /// Provides HTTP access to remote beacon nodes. @@ -488,6 +491,24 @@ async fn poll_validator_indices( .is_some(); if !is_known { + let current_slot_opt = duties_service.slot_clock.now(); + + if let Some(current_slot) = current_slot_opt { + let is_first_slot_of_epoch = current_slot % E::slots_per_epoch() == 0; + + // Query an unknown validator later if it was queried within the last epoch, or if + // the current slot is the first slot of an epoch. + let poll_later = duties_service + .unknown_validator_next_poll_slots + .read() + .get(&pubkey) + .map(|&poll_slot| poll_slot > current_slot || is_first_slot_of_epoch) + .unwrap_or(false); + if poll_later { + continue; + } + } + // Query the remote BN to resolve a pubkey to a validator index. let download_result = duties_service .beacon_nodes @@ -528,10 +549,23 @@ async fn poll_validator_indices( .initialized_validators() .write() .set_index(&pubkey, response.data.index); + + duties_service + .unknown_validator_next_poll_slots + .write() + .remove(&pubkey); } // This is not necessarily an error, it just means the validator is not yet known to // the beacon chain. Ok(None) => { + if let Some(current_slot) = current_slot_opt { + let next_poll_slot = current_slot.saturating_add(E::slots_per_epoch()); + duties_service + .unknown_validator_next_poll_slots + .write() + .insert(pubkey, next_poll_slot); + } + debug!( log, "Validator without index"; @@ -887,7 +921,7 @@ async fn poll_beacon_attesters_for_epoch( "Attester duties re-org"; "prior_dependent_root" => %prior_dependent_root, "dependent_root" => %dependent_root, - "msg" => "this may happen from time to time" + "note" => "this may happen from time to time" ) } *mut_value = (dependent_root, duty_and_proof); diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index de6c06437b..67cab2bdc3 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -17,8 +17,8 @@ use warp::{http::Response, Filter}; #[derive(Debug)] pub enum Error { - Warp(warp::Error), - Other(String), + Warp(#[allow(dead_code)] warp::Error), + Other(#[allow(dead_code)] String), } impl From for Error { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 14ac15b9e9..246e10cfe2 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -48,7 +48,7 @@ use notifier::spawn_notifier; use parking_lot::RwLock; use preparation_service::{PreparationService, PreparationServiceBuilder}; use reqwest::Certificate; -use slog::{error, info, warn, Logger}; +use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; use std::fs::File; @@ -110,7 +110,7 @@ impl ProductionValidatorClient { /// and attestation production. pub async fn new_from_cli( context: RuntimeContext, - cli_args: &ArgMatches<'_>, + cli_args: &ArgMatches, ) -> Result { let config = Config::from_cli(cli_args, context.log()) .map_err(|e| format!("Unable to initialize config: {}", e))?; @@ -122,6 +122,27 @@ impl ProductionValidatorClient { pub async fn new(context: RuntimeContext, config: Config) -> Result { let log = context.log().clone(); + // Attempt to raise soft fd limit. The behavior is OS specific: + // `linux` - raise soft fd limit to hard + // `macos` - raise soft fd limit to `min(kernel limit, hard fd limit)` + // `windows` & rest - noop + match fdlimit::raise_fd_limit().map_err(|e| format!("Unable to raise fd limit: {}", e))? { + fdlimit::Outcome::LimitRaised { from, to } => { + debug!( + log, + "Raised soft open file descriptor resource limit"; + "old_limit" => from, + "new_limit" => to + ); + } + fdlimit::Outcome::Unsupported => { + debug!( + log, + "Raising soft open file descriptor resource limit is not supported" + ); + } + }; + info!( log, "Starting validator client"; @@ -464,6 +485,7 @@ impl ProductionValidatorClient { slot_clock: slot_clock.clone(), beacon_nodes: beacon_nodes.clone(), validator_store: validator_store.clone(), + unknown_validator_next_poll_slots: <_>::default(), spec: context.eth2_config.spec.clone(), context: duties_context, enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index fe520e11f5..8c11027b8d 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -7,7 +7,7 @@ use crate::http_metrics::metrics; use eth2_keystore::Keystore; use lockfile::Lockfile; use parking_lot::Mutex; -use reqwest::Client; +use reqwest::{header::ACCEPT, Client}; use std::path::PathBuf; use std::sync::Arc; use task_executor::TaskExecutor; @@ -243,6 +243,7 @@ impl SigningMethod { // Request a signature from the Web3Signer instance via HTTP(S). let response: SigningResponse = http_client .post(signing_url.clone()) + .header(ACCEPT, "application/json") .json(&request) .send() .await diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 35af2b1ce7..ebcde6a828 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -6,14 +6,12 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bls = { workspace = true } clap = { workspace = true } types = { workspace = true } environment = { workspace = true } eth2_network_config = { workspace = true } clap_utils = { workspace = true } eth2_wallet = { workspace = true } -eth2_keystore = { workspace = true } account_utils = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index cd19bd0ae3..d53e92deb3 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -1,7 +1,8 @@ use super::common::*; use crate::DumpConfig; use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::std_types::KeystoreJsonStr, types::{StateId, ValidatorId}, @@ -35,8 +36,8 @@ pub const DEPOSITS_FILENAME: &str = "deposits.json"; const BEACON_NODE_HTTP_TIMEOUT: Duration = Duration::from_secs(2); -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Creates new validators from BIP-39 mnemonic. A JSON file will be created which \ contains all the validator keystores and other validator data. This file can then \ @@ -45,7 +46,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", ) .arg( - Arg::with_name(OUTPUT_PATH_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(OUTPUT_PATH_FLAG) .long(OUTPUT_PATH_FLAG) .value_name("DIRECTORY") .help( @@ -53,10 +63,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { files will be created. The directory will be created if it does not exist.", ) .required(true) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEPOSIT_GWEI_FLAG) + Arg::new(DEPOSIT_GWEI_FLAG) .long(DEPOSIT_GWEI_FLAG) .value_name("DEPOSIT_GWEI") .help( @@ -64,51 +75,60 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { required for an active validator (MAX_EFFECTIVE_BALANCE)", ) .conflicts_with(DISABLE_DEPOSITS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FIRST_INDEX_FLAG) + Arg::new(FIRST_INDEX_FLAG) .long(FIRST_INDEX_FLAG) .value_name("FIRST_INDEX") .help("The first of consecutive key indexes you wish to create.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("0"), + .default_value("0") + .display_order(0), ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to create, regardless of how many already exist") .conflicts_with("at-most") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help("If present, the mnemonic will be read in from this file.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) + .help_heading(FLAG_HEADER), ) .arg( - Arg::with_name(DISABLE_DEPOSITS_FLAG) + Arg::new(DISABLE_DEPOSITS_FLAG) .long(DISABLE_DEPOSITS_FLAG) .help( "When provided don't generate the deposits JSON file that is \ commonly used for submitting validator deposits via a web UI. \ Using this flag will save several seconds per validator if the \ user has an alternate strategy for submitting deposits.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) .arg( - Arg::with_name(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) + Arg::new(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) .long(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) .help( "If present, the user will be prompted to enter the voting keystore \ @@ -116,10 +136,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { flag is not provided, a random password will be used. It is not \ necessary to keep backups of voting keystore passwords if the \ mnemonic is safely backed up.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) .arg( - Arg::with_name(ETH1_WITHDRAWAL_ADDRESS_FLAG) + Arg::new(ETH1_WITHDRAWAL_ADDRESS_FLAG) .long(ETH1_WITHDRAWAL_ADDRESS_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -128,10 +151,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { with the mnemonic-derived withdrawal public key in EIP-2334 format.", ) .conflicts_with(DISABLE_DEPOSITS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(GAS_LIMIT_FLAG) + Arg::new(GAS_LIMIT_FLAG) .long(GAS_LIMIT_FLAG) .value_name("UINT64") .help( @@ -139,10 +163,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to leave this as the default value by not specifying this flag.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FEE_RECIPIENT_FLAG) + Arg::new(FEE_RECIPIENT_FLAG) .long(FEE_RECIPIENT_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -150,21 +175,23 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { fee recipient. Omit this flag to use the default value from the VC.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_PROPOSALS_FLAG) + Arg::new(BUILDER_PROPOSALS_FLAG) .long(BUILDER_PROPOSALS_FLAG) .help( "When provided, all created validators will attempt to create \ blocks via builder rather than the local EL.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BEACON_NODE_FLAG) + Arg::new(BEACON_NODE_FLAG) .long(BEACON_NODE_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -174,21 +201,24 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { prevent the same validator being created twice and therefore slashable \ conditions.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FORCE_BLS_WITHDRAWAL_CREDENTIALS) - .takes_value(false) + Arg::new(FORCE_BLS_WITHDRAWAL_CREDENTIALS) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long(FORCE_BLS_WITHDRAWAL_CREDENTIALS) .help( "If present, allows BLS withdrawal credentials rather than an execution \ address. This is not recommended.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + Arg::new(BUILDER_BOOST_FACTOR_FLAG) .long(BUILDER_BOOST_FACTOR_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("UINT64") .required(false) .help( @@ -196,18 +226,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { a percentage multiplier to apply to the builder's payload value \ when choosing between a builder payload header and payload from \ the local execution node.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + Arg::new(PREFER_BUILDER_PROPOSALS_FLAG) .long(PREFER_BUILDER_PROPOSALS_FLAG) .help( "If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) } @@ -242,10 +274,10 @@ impl CreateConfig { first_index: clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?, count: clap_utils::parse_required(matches, COUNT_FLAG)?, mnemonic_path: clap_utils::parse_optional(matches, MNEMONIC_FLAG)?, - stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), - disable_deposits: matches.is_present(DISABLE_DEPOSITS_FLAG), + stdin_inputs: cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG), + disable_deposits: matches.get_flag(DISABLE_DEPOSITS_FLAG), specify_voting_keystore_password: matches - .is_present(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), + .get_flag(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), eth1_withdrawal_address: clap_utils::parse_optional( matches, ETH1_WITHDRAWAL_ADDRESS_FLAG, @@ -259,7 +291,7 @@ impl CreateConfig { fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, bn_url: clap_utils::parse_optional(matches, BEACON_NODE_FLAG)?, - force_bls_withdrawal_credentials: matches.is_present(FORCE_BLS_WITHDRAWAL_CREDENTIALS), + force_bls_withdrawal_credentials: matches.get_flag(FORCE_BLS_WITHDRAWAL_CREDENTIALS), }) } } @@ -516,8 +548,8 @@ impl ValidatorsAndDeposits { } } -pub async fn cli_run<'a, E: EthSpec>( - matches: &'a ArgMatches<'a>, +pub async fn cli_run( + matches: &ArgMatches, spec: &ChainSpec, dump_config: DumpConfig, ) -> Result<(), String> { @@ -581,7 +613,7 @@ pub mod tests { type E = MainnetEthSpec; - const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.3.0"; + const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.7.0"; fn junk_execution_address() -> Option
{ Some(Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap()) @@ -933,12 +965,6 @@ pub mod tests { for deposit in &mut deposits { // Ensures we can match test vectors. deposit.deposit_cli_version = TEST_VECTOR_DEPOSIT_CLI_VERSION.to_string(); - - // We use "prater" and the vectors use "goerli" now. The two names refer to the same - // network so there should be no issue here. - if deposit.network_name == "prater" { - deposit.network_name = "goerli".to_string(); - } } deposits }; diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 4b924189f2..f193e8d0fb 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; use serde::{Deserialize, Serialize}; use std::fs; @@ -13,15 +14,24 @@ pub const VC_TOKEN_FLAG: &str = "vc-token"; pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Uploads validators to a validator client using the HTTP API. The validators \ are defined in a JSON file which can be generated using the \"create-validators\" \ command.", ) .arg( - Arg::with_name(VALIDATORS_FILE_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(VALIDATORS_FILE_FLAG) .long(VALIDATORS_FILE_FLAG) .value_name("PATH_TO_JSON_FILE") .help( @@ -30,10 +40,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { \"validators.json\".", ) .required(true) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VC_URL_FLAG) + Arg::new(VC_URL_FLAG) .long(VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -43,18 +54,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .default_value("http://localhost:5062") .requires(VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VC_TOKEN_FLAG) + Arg::new(VC_TOKEN_FLAG) .long(VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(IGNORE_DUPLICATES_FLAG) - .takes_value(false) + Arg::new(IGNORE_DUPLICATES_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long(IGNORE_DUPLICATES_FLAG) .help( "If present, ignore any validators which already exist on the VC. \ @@ -63,7 +77,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { slashable conditions, it might be an indicator that something is amiss. \ Users should also be careful to avoid submitting duplicate deposits for \ validators that already exist on the VC.", - ), + ) + .display_order(0), ) } @@ -81,15 +96,12 @@ impl ImportConfig { validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, - ignore_duplicates: matches.is_present(IGNORE_DUPLICATES_FLAG), + ignore_duplicates: matches.get_flag(IGNORE_DUPLICATES_FLAG), }) } } -pub async fn cli_run<'a>( - matches: &'a ArgMatches<'a>, - dump_config: DumpConfig, -) -> Result<(), String> { +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = ImportConfig::from_cli(matches)?; if dump_config.should_exit_early(&config)? { Ok(()) diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index a9991d3272..101d6d2136 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -1,5 +1,5 @@ -use clap::App; -use clap::ArgMatches; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use common::write_to_json_file; use environment::Environment; use serde::Serialize; @@ -38,17 +38,28 @@ impl DumpConfig { } } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["vm", "validator-manager", CMD]) +pub fn cli_app() -> Command { + Command::new(CMD) + .visible_aliases(["vm", "validator-manager", CMD]) + .display_order(0) + .styles(get_color_style()) .about("Utilities for managing a Lighthouse validator client via the HTTP API.") + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. -pub fn run<'a, E: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> Result<(), String> { +pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), String> { let context = env.core_context(); let spec = context.eth2_config.spec; let dump_config = clap_utils::parse_optional(matches, DUMP_CONFIGS_FLAG)? @@ -63,20 +74,20 @@ pub fn run<'a, E: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> .block_on_dangerous( async { match matches.subcommand() { - (create_validators::CMD, Some(matches)) => { + Some((create_validators::CMD, matches)) => { create_validators::cli_run::(matches, &spec, dump_config).await } - (import_validators::CMD, Some(matches)) => { + Some((import_validators::CMD, matches)) => { import_validators::cli_run(matches, dump_config).await } - (move_validators::CMD, Some(matches)) => { + Some((move_validators::CMD, matches)) => { move_validators::cli_run(matches, dump_config).await } - ("", _) => Err("No command supplied. See --help.".to_string()), - (unknown, _) => Err(format!( + Some((unknown, _)) => Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD )), + _ => Err("No command supplied. See --help.".to_string()), } }, "validator_manager", diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 5826f2756b..d2149d742c 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -1,7 +1,8 @@ use super::common::*; use crate::DumpConfig; use account_utils::{read_password_from_user, ZeroizeString}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::{ std_types::{ @@ -66,8 +67,8 @@ impl PasswordSource { } } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Uploads validators to a validator client using the HTTP API. The validators \ are defined in a JSON file which can be generated using the \"create-validators\" \ @@ -75,7 +76,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { file system (i.e., not Web3Signer validators).", ) .arg( - Arg::with_name(SRC_VC_URL_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(SRC_VC_URL_FLAG) .long(SRC_VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -85,17 +95,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .required(true) .requires(SRC_VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(SRC_VC_TOKEN_FLAG) + Arg::new(SRC_VC_TOKEN_FLAG) .long(SRC_VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the source validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEST_VC_URL_FLAG) + Arg::new(DEST_VC_URL_FLAG) .long(DEST_VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -105,35 +117,39 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .required(true) .requires(DEST_VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEST_VC_TOKEN_FLAG) + Arg::new(DEST_VC_TOKEN_FLAG) .long(DEST_VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the destination validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VALIDATORS_FLAG) + Arg::new(VALIDATORS_FLAG) .long(VALIDATORS_FLAG) .value_name("STRING") .help( "The validators to be moved. Either a list of 0x-prefixed \ validator pubkeys or the keyword \"all\".", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to move.") .conflicts_with(VALIDATORS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(GAS_LIMIT_FLAG) + Arg::new(GAS_LIMIT_FLAG) .long(GAS_LIMIT_FLAG) .value_name("UINT64") .help( @@ -141,10 +157,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to leave this as the default value by not specifying this flag.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FEE_RECIPIENT_FLAG) + Arg::new(FEE_RECIPIENT_FLAG) .long(FEE_RECIPIENT_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -152,30 +169,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { fee recipient. Omit this flag to use the default value from the VC.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_PROPOSALS_FLAG) + Arg::new(BUILDER_PROPOSALS_FLAG) .long(BUILDER_PROPOSALS_FLAG) .help( "When provided, all created validators will attempt to create \ blocks via builder rather than the local EL.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) .arg( - Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + Arg::new(BUILDER_BOOST_FACTOR_FLAG) .long(BUILDER_BOOST_FACTOR_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("UINT64") .required(false) .help( @@ -183,18 +203,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { a percentage multiplier to apply to the builder's payload value \ when choosing between a builder payload header and payload from \ the local execution node.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + Arg::new(PREFER_BUILDER_PROPOSALS_FLAG) .long(PREFER_BUILDER_PROPOSALS_FLAG) .help( "If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) } @@ -223,10 +245,10 @@ pub struct MoveConfig { impl MoveConfig { fn from_cli(matches: &ArgMatches) -> Result { let count_flag = clap_utils::parse_optional(matches, COUNT_FLAG)?; - let validators_flag = matches.value_of(VALIDATORS_FLAG); + let validators_flag = matches.get_one::(VALIDATORS_FLAG); let validators = match (count_flag, validators_flag) { (Some(count), None) => Validators::Count(count), - (None, Some(string)) => match string { + (None, Some(string)) => match string.as_str() { "all" => Validators::All, pubkeys => pubkeys .split(',') @@ -257,16 +279,13 @@ impl MoveConfig { fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), + stdin_inputs: cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG), }, }) } } -pub async fn cli_run<'a>( - matches: &'a ArgMatches<'a>, - dump_config: DumpConfig, -) -> Result<(), String> { +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = MoveConfig::from_cli(matches)?; if dump_config.should_exit_early(&config)? { Ok(()) diff --git a/validator_manager/test_vectors/generate.py b/validator_manager/test_vectors/generate.py index 722414de73..8bf7f5f52d 100644 --- a/validator_manager/test_vectors/generate.py +++ b/validator_manager/test_vectors/generate.py @@ -1,10 +1,13 @@ # This script uses the `ethereum/staking-deposit-cli` tool to generate # deposit data files which are then used for testing by Lighthouse. # -# To generate vectors, simply run this Python script: +# To generate vectors, run this Python script: # # `python generate.py` # +# This script was last run on Linux using Python v3.10.4. Python v3.11.0 was not working at time +# of writing due to dependency issues in `staking-deposit-cli`. You should probably use `pyenv` and +# `virtualenv`. import os import sys import shutil @@ -89,8 +92,7 @@ def sdc_generate(network, first_index, count, eth1_withdrawal_address=None): os.mkdir(output_dir) command = [ - '/bin/sh', - 'deposit.sh', + './deposit.sh', '--language', 'english', '--non_interactive', 'existing-mnemonic', @@ -114,10 +116,10 @@ def test_network(network): sdc_generate(network, first_index=99, count=2) sdc_generate(network, first_index=1024, count=3) sdc_generate(network, first_index=0, count=2, - eth1_withdrawal_address="0x0f51bb10119727a7e5ea3538074fb341f56b09ad") + eth1_withdrawal_address="0x0f51bb10119727a7e5eA3538074fb341F56B09Ad") setup() test_network("mainnet") -test_network("prater") +test_network("holesky") cleanup() diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json new file mode 100644 index 0000000000..6b343d087a --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json new file mode 100644 index 0000000000..f70410746b --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "997cff67c1675ecd2467ac050850ddec8b0488995abf363cee40cbe1461043acf4e68422e9731340437d566542e010cd186031dc0de30b2f56d19f3bb866e0fa9be31dd49ea27777f25ad786cc8587fb745598e5870647b6deeaab77fba4a9e4", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "8787f86d699426783983d03945a8ebe45b349118d28e8af528b9695887f98fac", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json new file mode 100644 index 0000000000..9b2678651f --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8eed5bb34dec5fdee4a3e68a774143072af0ebdae26a9b24ea0601d516a5eeb18aa2ec804be3f05f8475f2e472ce91809d93b7586c3a90fc8a7bbb63ad1f762eee3df0dc0ea3d33dd8ba782e48de495b3bc76e280658c1406e11d07db659e69", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "74ead0279baa86ed7106268e4806484eaae26a8f1c42f693e4b3cb626c724b63", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "8d87cdd627ed169114c00653fd3167e2afc917010071bbbbddd60e331ed0d0d7273cb4a887efe63e7b840bac713420d907e9dac20df56e50e7346b59e3acfe56753234a34c7ab3d8c40ea00b447db005b4b780701a0a2416c4fdadbdb18bf174", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "978b04b76d0a56ff28beb8eb1859792e0967d0b51e4a31485d2078b8390954d2", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json new file mode 100644 index 0000000000..997260bb87 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json @@ -0,0 +1 @@ +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "818141f1f2fdba651f6a3de4ed43c774974b6cec82b3e6c3fa00569b6b67a88c37742d0033275dc98b4bbaac875e48b416b89cebfd1fe9996e2a29c0a2c512d1cedff558420a1a2b50cf5c743a622d85d941b896b00520b3e9a3eaf1f5eff12c", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "9c9f6ed171b93a08f4e1bc46c0a7feace6466e3e213c6c2d567428c73e22e242", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b62103a32290ec8c710d48f3147895a2dddb25231c9ae38b8ca12bcaf30770a9fc632f4da6b3c5b7a43cfa6a9f096f5e13d26b2c68a42c1c86385aea268dcd2ad3cf766b3f01ee2ba19379ddae9c15830aac8acbef20accc82c734f4c40e5ffd", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "37b75d75086f4b980c85c021ca22343008d445061714cff41d63aea4dca49a5f", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "af2dc295084b4a3eff01a52fe5d42aa931509c24328d5304e59026d0957b55bc35e64802a8d64fdb4a9700bf12e1d6bb184eba01682d8413d86b737e63d3d79a16243d9c8e00115a202efc889ef7129861d8aa32bf8ec9ef5305eecce87b2eda", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "fd0c081818d2ce1bc54b7979e9b348bbbdb8fe5904694143bf4b355dcbbde692", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json new file mode 100644 index 0000000000..4fa3724c59 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json @@ -0,0 +1 @@ +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "b687aa7d55752f00a060c21fa9287485bab94c841d96b3516263fb384a812c92e60ef9fa2e09add9f55db71961fc051e0bb83d214b6f31d04ee59eaba3b43e27eadd2a64884c5d4125a1f5bd6e1d930e5a1e420c278c697d4af6ed3fcdac16cf", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "54dc56d2838ca70bac89ca92ae1f8d04945d3305ce8507b390756b646163387a", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json new file mode 100644 index 0000000000..7436b53f24 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json @@ -0,0 +1 @@ +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a59a2c510c5ce378b514f62550a7115cd6cfebaf73a5ba20c2cf21456a2d2c11d6e117b91d23743fc0361794cf7e5405030eb296926b526e8a2d68aa87569358e69d3884563a23770714730b6fab6ba639977d725a5ed4f29abe3ccc34575610", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "149a5dfbba87109dac65142cc067aed97c9579730488cfe16625be3ce4f753a6", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "966ae45b81402f1155ff313e48ca3a5346264dcc4bc9ee9e69994ee74368852d9d27c1684752735feba6c21042ad366b13f12c6e772c453518900435d87e2d743e1818e7471cf3574598e3b085c4527f643efe679841ddf8a480cac12b2c6e08", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "f44dac412ae36929a84f64d5f7f91cada908a8f9e837fc70628f58804591798d", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json index 31c00c57f2..d9ba926d1c 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json index 2880b7724c..f1ea4c6ad3 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json index da92a1d0d9..5741f23d8f 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json similarity index 93% rename from validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json rename to validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json index 9cc01dc0df..9b9556cf9d 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json @@ -1 +1 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json rename to validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json index 3a971d0959..84140f53fe 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json @@ -1 +1 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json rename to validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json index 2efa5c4ec8..3205390a43 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json @@ -1 +1 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json b/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json deleted file mode 100644 index c736d75b7e..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json deleted file mode 100644 index e86500d14f..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "87b4b4e9c923aa9e1687219e9df0e838956ee6e15b7ab18142467430d00940dc7aa243c9996e85125dfe72d9dbdb00a30a36e16a2003ee0c86f29c9f5d74f12bfe5b7f62693dbf5187a093555ae8d6b48acd075788549c4b6a249b397af24cd0", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "ea80b639356a03f6f58e4acbe881fabefc9d8b93375a6aa7e530c77d7e45d3e4", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json deleted file mode 100644 index c79ae5a4fc..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "ab32595d8201c2b4e8173aece9151fdc15f4d2ad36008462d0416598ddbf0f37ed0877f06d284a9669e73dbc0885bd2207fe64385e95a4488dc2bcb2c324d5c20da3248a6244463583dfbba8db20805765421e59cb56b0bc3ee6d24a9218216d", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "b4df3a3a26dd5f6eb32999d8a7051a7d1a8573a16553d4b45ee706a0d59c1066", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "9655e195eda5517efe6f36bcebd45250c889a4177d7bf5fcd59598d2d03f37f038b5ee2ec079a30a8382ea42f351943f08a6f006bab9c2130db2742bd7315c8ad5aa1f03a0801c26d4c9efdef71c4c59c449c7f9b21fa62600ab8f5f1e2b938a", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7661474fba11bfb453274f62df022cab3c0b6f4a58af4400f6bce83c9cb5fcb8", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json b/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json deleted file mode 100644 index 136dc38554..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "b5dae79ce8f3d7326b46f93182981c5f3d64257a457f038caa78ec8e5cc25a9fdac52c7beb221ab2a3205404131366ad18e1e13801393b3d486819e8cca96128bf1244884a91d05dced092c74bc1e7259788f30dd3432df15f3d2f629645f345", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "94213d76aba9e6a434589d4939dd3764e0832df78f66d30db22a760c14ba1b89", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "816f38a321c4f84ad5187eda58f6d9c1fd1e81c860ed1722bdb76b920fdd430a1e814b9bb893837ae3b38ad738684fbf1795fa687f617c52121472b1ac8d2e34e5c1127186233a8833ffb54c509d9e52cb7242c6c6a65b5e496296b3caa90d89", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "7ad1d059d69794680a1deef5e72c33827f0c449a5f0917095821c0343572789d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "95d20c35484dea6b2a0bd7c2da2d2e810d7829e14c03657b2524adfc2111aa5ed95908ecb975ff75ff742c68ce8df417016c048959b0f807675430f6d981478e26d48e594e0830a0406da9817f8a1ecb94bd8be1f9281eeb5e952a82173c72bb", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "83abfb2a166f7af708526a9bdd2767c4be3cd231c9bc4e2f047a80df88a2860c", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json b/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json deleted file mode 100644 index ccd2ece069..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "8f75836ceb390dd4fc8c16bc4be52ca09b9c5aa0ab5bc16dcfdb344787b29ddfd76d877b0a2330bc8e904b233397c6bd124845d1b868e4951cb6daacea023c986bdf0c6ac28d73f65681d941ea96623bc23acc7c84dcfc1304686240d9171cfc", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "3011f5cac32f13e86ecc061e89ed6675c27a46ab6ecb1ec6f6e5f133ae1d0287", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json b/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json deleted file mode 100644 index 2ab5908307..0000000000 --- a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a7706e102bfb0b986a5c8050044f7e221919463149771a92c3ca46ff7d4564867db48eaf89b5237fed8db2cdb9c9c057099d0982bbdb3fbfcbe0ab7259ad3f31f7713692b78ee25e6251982e7081d049804632b70b8a24d8c3e59b624a0bd221", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "8a26fbee0c3a99fe090af1fce68afc525b4e7efa70df72abaa91f29148b2f672", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8b7aa5b0e97d15ec8c2281b919fde9e064f6ac064b163445ea99441ab063f9d10534bfde861b5606021ae46614ff075e0c2305ce5a6cbcc9f0bc8e7df1a177c4d969a5ed4ac062b0ea959bdac963fe206b73565a1a3937adcca736c6117c15f0", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "d38575167a94b516455c5b7e36d24310a612fa0f4580446c5f9d45e4e94f0642", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/watch/Cargo.toml b/watch/Cargo.toml index aaaf50aa40..9e8da3b293 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -15,6 +15,7 @@ path = "src/main.rs" [dependencies] clap = { workspace = true } +clap_utils = { workspace = true } log = { workspace = true } env_logger = { workspace = true } types = { workspace = true } @@ -30,9 +31,7 @@ url = { workspace = true } rand = { workspace = true } diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } diesel_migrations = { version = "2.0.0", features = ["postgres"] } -byteorder = { workspace = true } bls = { workspace = true } -hex = { workspace = true } r2d2 = { workspace = true } serde_yaml = { workspace = true } diff --git a/watch/src/cli.rs b/watch/src/cli.rs index 97dc217293..b7179efe5d 100644 --- a/watch/src/cli.rs +++ b/watch/src/cli.rs @@ -1,28 +1,29 @@ use crate::{config::Config, logger, server, updater}; -use clap::{App, Arg}; +use clap::{Arg, ArgAction, Command}; +use clap_utils::get_color_style; pub const SERVE: &str = "serve"; pub const RUN_UPDATER: &str = "run-updater"; pub const CONFIG: &str = "config"; -fn run_updater<'a, 'b>() -> App<'a, 'b> { - App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +fn run_updater() -> Command { + Command::new(RUN_UPDATER).styles(get_color_style()) } -fn serve<'a, 'b>() -> App<'a, 'b> { - App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +fn serve() -> Command { + Command::new(SERVE).styles(get_color_style()) } -pub fn app<'a, 'b>() -> App<'a, 'b> { - App::new("beacon_watch_daemon") +pub fn app() -> Command { + Command::new("beacon_watch_daemon") .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .arg( - Arg::with_name(CONFIG) + Arg::new(CONFIG) .long(CONFIG) .value_name("PATH_TO_CONFIG") .help("Path to configuration file") - .takes_value(true) + .action(ArgAction::Set) .global(true), ) .subcommand(run_updater()) @@ -32,7 +33,7 @@ pub fn app<'a, 'b>() -> App<'a, 'b> { pub async fn run() -> Result<(), String> { let matches = app().get_matches(); - let config = match matches.value_of(CONFIG) { + let config = match matches.get_one::(CONFIG) { Some(path) => Config::load_from_file(path.to_string())?, None => Config::default(), }; @@ -40,10 +41,10 @@ pub async fn run() -> Result<(), String> { logger::init_logger(&config.log_level); match matches.subcommand() { - (RUN_UPDATER, Some(_)) => updater::run_updater(config) + Some((RUN_UPDATER, _)) => updater::run_updater(config) .await .map_err(|e| format!("Failure: {:?}", e)), - (SERVE, Some(_)) => server::serve(config) + Some((SERVE, _)) => server::serve(config) .await .map_err(|e| format!("Failure: {:?}", e)), _ => Err("Unsupported subcommand. See --help".into()), diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index 25dd242aab..08036db951 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -31,7 +31,7 @@ pub async fn serve(config: FullConfig) -> Result<(), Error> { ) })?; - let server = start_server(&config, slots_per_epoch as u64, db)?; + let (_addr, server) = start_server(&config, slots_per_epoch as u64, db)?; server.await?; @@ -58,7 +58,13 @@ pub fn start_server( config: &FullConfig, slots_per_epoch: u64, pool: PgPool, -) -> Result> + 'static, Error> { +) -> Result< + ( + SocketAddr, + impl Future> + 'static, + ), + Error, +> { let mut routes = Router::new() .route("/v1/slots", get(handler::get_slots_by_range)) .route("/v1/slots/:slot", get(handler::get_slot)) @@ -106,11 +112,15 @@ pub fn start_server( let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); let listener = TcpListener::bind(addr)?; listener.set_nonblocking(true)?; + + // Read the socket address (it may be different from `addr` if listening on port 0). + let socket_addr = listener.local_addr()?; + let serve = axum::serve(tokio::net::TcpListener::from_std(listener)?, app); info!("HTTP server listening on {}", addr); - Ok(serve.into_future()) + Ok((socket_addr, serve.into_future())) } // The default route indicating that no available routes matched the request. diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index 0e29e7f0cd..5461508edd 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -14,7 +14,6 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use std::collections::HashMap; use std::env; -use std::net::SocketAddr; use std::time::Duration; use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; use tokio::{runtime, task::JoinHandle}; @@ -154,7 +153,7 @@ impl TesterBuilder { * Create a watch configuration */ let database_port = unused_tcp4_port().expect("Unable to find unused port."); - let server_port = unused_tcp4_port().expect("Unable to find unused port."); + let server_port = 0; let config = Config { database: DatabaseConfig { dbname: random_dbname(), @@ -187,14 +186,9 @@ impl TesterBuilder { /* * Spawn a Watch HTTP API. */ - let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); + let (addr, watch_server) = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); tokio::spawn(watch_server); - let addr = SocketAddr::new( - self.config.server.listen_addr, - self.config.server.listen_port, - ); - /* * Create a HTTP client to talk to the watch HTTP API. */